1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "code_generator_arm.h"
18
19 #include "arch/arm/instruction_set_features_arm.h"
20 #include "art_method.h"
21 #include "entrypoints/quick/quick_entrypoints.h"
22 #include "gc/accounting/card_table.h"
23 #include "intrinsics.h"
24 #include "intrinsics_arm.h"
25 #include "mirror/array-inl.h"
26 #include "mirror/class-inl.h"
27 #include "thread.h"
28 #include "utils/arm/assembler_arm.h"
29 #include "utils/arm/managed_register_arm.h"
30 #include "utils/assembler.h"
31 #include "utils/stack_checks.h"
32
33 namespace art {
34
35 namespace arm {
36
ExpectedPairLayout(Location location)37 static bool ExpectedPairLayout(Location location) {
38 // We expected this for both core and fpu register pairs.
39 return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
40 }
41
42 static constexpr int kCurrentMethodStackOffset = 0;
43
44 // We unconditionally allocate R5 to ensure we can do long operations
45 // with baseline.
46 static constexpr Register kCoreSavedRegisterForBaseline = R5;
47 static constexpr Register kCoreCalleeSaves[] =
48 { R5, R6, R7, R8, R10, R11, PC };
49 static constexpr SRegister kFpuCalleeSaves[] =
50 { S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
51
52 // D31 cannot be split into two S registers, and the register allocator only works on
53 // S registers. Therefore there is no need to block it.
54 static constexpr DRegister DTMP = D31;
55
56 #define __ reinterpret_cast<ArmAssembler*>(codegen->GetAssembler())->
57 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
58
59 class NullCheckSlowPathARM : public SlowPathCodeARM {
60 public:
NullCheckSlowPathARM(HNullCheck * instruction)61 explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
62
EmitNativeCode(CodeGenerator * codegen)63 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
64 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
65 __ Bind(GetEntryLabel());
66 arm_codegen->InvokeRuntime(
67 QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
68 }
69
70 private:
71 HNullCheck* const instruction_;
72 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
73 };
74
75 class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
76 public:
DivZeroCheckSlowPathARM(HDivZeroCheck * instruction)77 explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
78
EmitNativeCode(CodeGenerator * codegen)79 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
80 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
81 __ Bind(GetEntryLabel());
82 arm_codegen->InvokeRuntime(
83 QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
84 }
85
86 private:
87 HDivZeroCheck* const instruction_;
88 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
89 };
90
91 class SuspendCheckSlowPathARM : public SlowPathCodeARM {
92 public:
SuspendCheckSlowPathARM(HSuspendCheck * instruction,HBasicBlock * successor)93 SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
94 : instruction_(instruction), successor_(successor) {}
95
EmitNativeCode(CodeGenerator * codegen)96 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
97 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
98 __ Bind(GetEntryLabel());
99 SaveLiveRegisters(codegen, instruction_->GetLocations());
100 arm_codegen->InvokeRuntime(
101 QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
102 RestoreLiveRegisters(codegen, instruction_->GetLocations());
103 if (successor_ == nullptr) {
104 __ b(GetReturnLabel());
105 } else {
106 __ b(arm_codegen->GetLabelOf(successor_));
107 }
108 }
109
GetReturnLabel()110 Label* GetReturnLabel() {
111 DCHECK(successor_ == nullptr);
112 return &return_label_;
113 }
114
GetSuccessor() const115 HBasicBlock* GetSuccessor() const {
116 return successor_;
117 }
118
119 private:
120 HSuspendCheck* const instruction_;
121 // If not null, the block to branch to after the suspend check.
122 HBasicBlock* const successor_;
123
124 // If `successor_` is null, the label to branch to after the suspend check.
125 Label return_label_;
126
127 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
128 };
129
130 class BoundsCheckSlowPathARM : public SlowPathCodeARM {
131 public:
BoundsCheckSlowPathARM(HBoundsCheck * instruction,Location index_location,Location length_location)132 BoundsCheckSlowPathARM(HBoundsCheck* instruction,
133 Location index_location,
134 Location length_location)
135 : instruction_(instruction),
136 index_location_(index_location),
137 length_location_(length_location) {}
138
EmitNativeCode(CodeGenerator * codegen)139 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
140 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
141 __ Bind(GetEntryLabel());
142 // We're moving two locations to locations that could overlap, so we need a parallel
143 // move resolver.
144 InvokeRuntimeCallingConvention calling_convention;
145 codegen->EmitParallelMoves(
146 index_location_,
147 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
148 Primitive::kPrimInt,
149 length_location_,
150 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
151 Primitive::kPrimInt);
152 arm_codegen->InvokeRuntime(
153 QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
154 }
155
156 private:
157 HBoundsCheck* const instruction_;
158 const Location index_location_;
159 const Location length_location_;
160
161 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
162 };
163
164 class LoadClassSlowPathARM : public SlowPathCodeARM {
165 public:
LoadClassSlowPathARM(HLoadClass * cls,HInstruction * at,uint32_t dex_pc,bool do_clinit)166 LoadClassSlowPathARM(HLoadClass* cls,
167 HInstruction* at,
168 uint32_t dex_pc,
169 bool do_clinit)
170 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
171 DCHECK(at->IsLoadClass() || at->IsClinitCheck());
172 }
173
EmitNativeCode(CodeGenerator * codegen)174 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
175 LocationSummary* locations = at_->GetLocations();
176
177 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
178 __ Bind(GetEntryLabel());
179 SaveLiveRegisters(codegen, locations);
180
181 InvokeRuntimeCallingConvention calling_convention;
182 __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
183 int32_t entry_point_offset = do_clinit_
184 ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
185 : QUICK_ENTRY_POINT(pInitializeType);
186 arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
187
188 // Move the class to the desired location.
189 Location out = locations->Out();
190 if (out.IsValid()) {
191 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
192 arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
193 }
194 RestoreLiveRegisters(codegen, locations);
195 __ b(GetExitLabel());
196 }
197
198 private:
199 // The class this slow path will load.
200 HLoadClass* const cls_;
201
202 // The instruction where this slow path is happening.
203 // (Might be the load class or an initialization check).
204 HInstruction* const at_;
205
206 // The dex PC of `at_`.
207 const uint32_t dex_pc_;
208
209 // Whether to initialize the class.
210 const bool do_clinit_;
211
212 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
213 };
214
215 class LoadStringSlowPathARM : public SlowPathCodeARM {
216 public:
LoadStringSlowPathARM(HLoadString * instruction)217 explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
218
EmitNativeCode(CodeGenerator * codegen)219 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
220 LocationSummary* locations = instruction_->GetLocations();
221 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
222
223 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
224 __ Bind(GetEntryLabel());
225 SaveLiveRegisters(codegen, locations);
226
227 InvokeRuntimeCallingConvention calling_convention;
228 __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
229 arm_codegen->InvokeRuntime(
230 QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
231 arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
232
233 RestoreLiveRegisters(codegen, locations);
234 __ b(GetExitLabel());
235 }
236
237 private:
238 HLoadString* const instruction_;
239
240 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
241 };
242
243 class TypeCheckSlowPathARM : public SlowPathCodeARM {
244 public:
TypeCheckSlowPathARM(HInstruction * instruction,Location class_to_check,Location object_class,uint32_t dex_pc)245 TypeCheckSlowPathARM(HInstruction* instruction,
246 Location class_to_check,
247 Location object_class,
248 uint32_t dex_pc)
249 : instruction_(instruction),
250 class_to_check_(class_to_check),
251 object_class_(object_class),
252 dex_pc_(dex_pc) {}
253
EmitNativeCode(CodeGenerator * codegen)254 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
255 LocationSummary* locations = instruction_->GetLocations();
256 DCHECK(instruction_->IsCheckCast()
257 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
258
259 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
260 __ Bind(GetEntryLabel());
261 SaveLiveRegisters(codegen, locations);
262
263 // We're moving two locations to locations that could overlap, so we need a parallel
264 // move resolver.
265 InvokeRuntimeCallingConvention calling_convention;
266 codegen->EmitParallelMoves(
267 class_to_check_,
268 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
269 Primitive::kPrimNot,
270 object_class_,
271 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
272 Primitive::kPrimNot);
273
274 if (instruction_->IsInstanceOf()) {
275 arm_codegen->InvokeRuntime(
276 QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
277 arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
278 } else {
279 DCHECK(instruction_->IsCheckCast());
280 arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
281 }
282
283 RestoreLiveRegisters(codegen, locations);
284 __ b(GetExitLabel());
285 }
286
287 private:
288 HInstruction* const instruction_;
289 const Location class_to_check_;
290 const Location object_class_;
291 uint32_t dex_pc_;
292
293 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
294 };
295
296 class DeoptimizationSlowPathARM : public SlowPathCodeARM {
297 public:
DeoptimizationSlowPathARM(HInstruction * instruction)298 explicit DeoptimizationSlowPathARM(HInstruction* instruction)
299 : instruction_(instruction) {}
300
EmitNativeCode(CodeGenerator * codegen)301 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
302 __ Bind(GetEntryLabel());
303 SaveLiveRegisters(codegen, instruction_->GetLocations());
304 DCHECK(instruction_->IsDeoptimize());
305 HDeoptimize* deoptimize = instruction_->AsDeoptimize();
306 uint32_t dex_pc = deoptimize->GetDexPc();
307 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
308 arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
309 }
310
311 private:
312 HInstruction* const instruction_;
313 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
314 };
315
316 #undef __
317
318 #undef __
319 #define __ reinterpret_cast<ArmAssembler*>(GetAssembler())->
320
ARMCondition(IfCondition cond)321 inline Condition ARMCondition(IfCondition cond) {
322 switch (cond) {
323 case kCondEQ: return EQ;
324 case kCondNE: return NE;
325 case kCondLT: return LT;
326 case kCondLE: return LE;
327 case kCondGT: return GT;
328 case kCondGE: return GE;
329 default:
330 LOG(FATAL) << "Unknown if condition";
331 }
332 return EQ; // Unreachable.
333 }
334
ARMOppositeCondition(IfCondition cond)335 inline Condition ARMOppositeCondition(IfCondition cond) {
336 switch (cond) {
337 case kCondEQ: return NE;
338 case kCondNE: return EQ;
339 case kCondLT: return GE;
340 case kCondLE: return GT;
341 case kCondGT: return LE;
342 case kCondGE: return LT;
343 default:
344 LOG(FATAL) << "Unknown if condition";
345 }
346 return EQ; // Unreachable.
347 }
348
DumpCoreRegister(std::ostream & stream,int reg) const349 void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
350 stream << ArmManagedRegister::FromCoreRegister(Register(reg));
351 }
352
DumpFloatingPointRegister(std::ostream & stream,int reg) const353 void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
354 stream << ArmManagedRegister::FromSRegister(SRegister(reg));
355 }
356
SaveCoreRegister(size_t stack_index,uint32_t reg_id)357 size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
358 __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index);
359 return kArmWordSize;
360 }
361
RestoreCoreRegister(size_t stack_index,uint32_t reg_id)362 size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
363 __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index);
364 return kArmWordSize;
365 }
366
SaveFloatingPointRegister(size_t stack_index,uint32_t reg_id)367 size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
368 __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index);
369 return kArmWordSize;
370 }
371
RestoreFloatingPointRegister(size_t stack_index,uint32_t reg_id)372 size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
373 __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index);
374 return kArmWordSize;
375 }
376
CodeGeneratorARM(HGraph * graph,const ArmInstructionSetFeatures & isa_features,const CompilerOptions & compiler_options)377 CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
378 const ArmInstructionSetFeatures& isa_features,
379 const CompilerOptions& compiler_options)
380 : CodeGenerator(graph,
381 kNumberOfCoreRegisters,
382 kNumberOfSRegisters,
383 kNumberOfRegisterPairs,
384 ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
385 arraysize(kCoreCalleeSaves)),
386 ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
387 arraysize(kFpuCalleeSaves)),
388 compiler_options),
389 block_labels_(graph->GetArena(), 0),
390 location_builder_(graph, this),
391 instruction_visitor_(graph, this),
392 move_resolver_(graph->GetArena(), this),
393 assembler_(true),
394 isa_features_(isa_features) {
395 // Save the PC register to mimic Quick.
396 AddAllocatedRegister(Location::RegisterLocation(PC));
397 }
398
AllocateFreeRegister(Primitive::Type type) const399 Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
400 switch (type) {
401 case Primitive::kPrimLong: {
402 size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
403 ArmManagedRegister pair =
404 ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
405 DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
406 DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
407
408 blocked_core_registers_[pair.AsRegisterPairLow()] = true;
409 blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
410 UpdateBlockedPairRegisters();
411 return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
412 }
413
414 case Primitive::kPrimByte:
415 case Primitive::kPrimBoolean:
416 case Primitive::kPrimChar:
417 case Primitive::kPrimShort:
418 case Primitive::kPrimInt:
419 case Primitive::kPrimNot: {
420 int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
421 // Block all register pairs that contain `reg`.
422 for (int i = 0; i < kNumberOfRegisterPairs; i++) {
423 ArmManagedRegister current =
424 ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
425 if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
426 blocked_register_pairs_[i] = true;
427 }
428 }
429 return Location::RegisterLocation(reg);
430 }
431
432 case Primitive::kPrimFloat: {
433 int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
434 return Location::FpuRegisterLocation(reg);
435 }
436
437 case Primitive::kPrimDouble: {
438 int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
439 DCHECK_EQ(reg % 2, 0);
440 return Location::FpuRegisterPairLocation(reg, reg + 1);
441 }
442
443 case Primitive::kPrimVoid:
444 LOG(FATAL) << "Unreachable type " << type;
445 }
446
447 return Location();
448 }
449
SetupBlockedRegisters(bool is_baseline) const450 void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
451 // Don't allocate the dalvik style register pair passing.
452 blocked_register_pairs_[R1_R2] = true;
453
454 // Stack register, LR and PC are always reserved.
455 blocked_core_registers_[SP] = true;
456 blocked_core_registers_[LR] = true;
457 blocked_core_registers_[PC] = true;
458
459 // Reserve thread register.
460 blocked_core_registers_[TR] = true;
461
462 // Reserve temp register.
463 blocked_core_registers_[IP] = true;
464
465 if (is_baseline) {
466 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
467 blocked_core_registers_[kCoreCalleeSaves[i]] = true;
468 }
469
470 blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
471
472 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
473 blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
474 }
475 }
476
477 UpdateBlockedPairRegisters();
478 }
479
UpdateBlockedPairRegisters() const480 void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
481 for (int i = 0; i < kNumberOfRegisterPairs; i++) {
482 ArmManagedRegister current =
483 ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
484 if (blocked_core_registers_[current.AsRegisterPairLow()]
485 || blocked_core_registers_[current.AsRegisterPairHigh()]) {
486 blocked_register_pairs_[i] = true;
487 }
488 }
489 }
490
InstructionCodeGeneratorARM(HGraph * graph,CodeGeneratorARM * codegen)491 InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
492 : HGraphVisitor(graph),
493 assembler_(codegen->GetAssembler()),
494 codegen_(codegen) {}
495
ComputeSpillMask()496 void CodeGeneratorARM::ComputeSpillMask() {
497 core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
498 // Save one extra register for baseline. Note that on thumb2, there is no easy
499 // instruction to restore just the PC, so this actually helps both baseline
500 // and non-baseline to save and restore at least two registers at entry and exit.
501 core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline);
502 DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
503 fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
504 // We use vpush and vpop for saving and restoring floating point registers, which take
505 // a SRegister and the number of registers to save/restore after that SRegister. We
506 // therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
507 // but in the range.
508 if (fpu_spill_mask_ != 0) {
509 uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
510 uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
511 for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
512 fpu_spill_mask_ |= (1 << i);
513 }
514 }
515 }
516
DWARFReg(Register reg)517 static dwarf::Reg DWARFReg(Register reg) {
518 return dwarf::Reg::ArmCore(static_cast<int>(reg));
519 }
520
DWARFReg(SRegister reg)521 static dwarf::Reg DWARFReg(SRegister reg) {
522 return dwarf::Reg::ArmFp(static_cast<int>(reg));
523 }
524
GenerateFrameEntry()525 void CodeGeneratorARM::GenerateFrameEntry() {
526 bool skip_overflow_check =
527 IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
528 DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
529 __ Bind(&frame_entry_label_);
530
531 if (HasEmptyFrame()) {
532 return;
533 }
534
535 if (!skip_overflow_check) {
536 __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
537 __ LoadFromOffset(kLoadWord, IP, IP, 0);
538 RecordPcInfo(nullptr, 0);
539 }
540
541 // PC is in the list of callee-save to mimic Quick, but we need to push
542 // LR at entry instead.
543 uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR;
544 __ PushList(push_mask);
545 __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask));
546 __ cfi().RelOffsetForMany(DWARFReg(R0), 0, push_mask, kArmWordSize);
547 if (fpu_spill_mask_ != 0) {
548 SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
549 __ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
550 __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
551 __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
552 }
553 int adjust = GetFrameSize() - FrameEntrySpillSize();
554 __ AddConstant(SP, -adjust);
555 __ cfi().AdjustCFAOffset(adjust);
556 __ StoreToOffset(kStoreWord, R0, SP, 0);
557 }
558
GenerateFrameExit()559 void CodeGeneratorARM::GenerateFrameExit() {
560 if (HasEmptyFrame()) {
561 __ bx(LR);
562 return;
563 }
564 __ cfi().RememberState();
565 int adjust = GetFrameSize() - FrameEntrySpillSize();
566 __ AddConstant(SP, adjust);
567 __ cfi().AdjustCFAOffset(-adjust);
568 if (fpu_spill_mask_ != 0) {
569 SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
570 __ vpops(start_register, POPCOUNT(fpu_spill_mask_));
571 __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
572 __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
573 }
574 __ PopList(core_spill_mask_);
575 __ cfi().RestoreState();
576 __ cfi().DefCFAOffset(GetFrameSize());
577 }
578
Bind(HBasicBlock * block)579 void CodeGeneratorARM::Bind(HBasicBlock* block) {
580 __ Bind(GetLabelOf(block));
581 }
582
GetStackLocation(HLoadLocal * load) const583 Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
584 switch (load->GetType()) {
585 case Primitive::kPrimLong:
586 case Primitive::kPrimDouble:
587 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
588
589 case Primitive::kPrimInt:
590 case Primitive::kPrimNot:
591 case Primitive::kPrimFloat:
592 return Location::StackSlot(GetStackSlot(load->GetLocal()));
593
594 case Primitive::kPrimBoolean:
595 case Primitive::kPrimByte:
596 case Primitive::kPrimChar:
597 case Primitive::kPrimShort:
598 case Primitive::kPrimVoid:
599 LOG(FATAL) << "Unexpected type " << load->GetType();
600 UNREACHABLE();
601 }
602
603 LOG(FATAL) << "Unreachable";
604 UNREACHABLE();
605 }
606
GetNextLocation(Primitive::Type type)607 Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) {
608 switch (type) {
609 case Primitive::kPrimBoolean:
610 case Primitive::kPrimByte:
611 case Primitive::kPrimChar:
612 case Primitive::kPrimShort:
613 case Primitive::kPrimInt:
614 case Primitive::kPrimNot: {
615 uint32_t index = gp_index_++;
616 uint32_t stack_index = stack_index_++;
617 if (index < calling_convention.GetNumberOfRegisters()) {
618 return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
619 } else {
620 return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
621 }
622 }
623
624 case Primitive::kPrimLong: {
625 uint32_t index = gp_index_;
626 uint32_t stack_index = stack_index_;
627 gp_index_ += 2;
628 stack_index_ += 2;
629 if (index + 1 < calling_convention.GetNumberOfRegisters()) {
630 if (calling_convention.GetRegisterAt(index) == R1) {
631 // Skip R1, and use R2_R3 instead.
632 gp_index_++;
633 index++;
634 }
635 }
636 if (index + 1 < calling_convention.GetNumberOfRegisters()) {
637 DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1,
638 calling_convention.GetRegisterAt(index + 1));
639 return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index),
640 calling_convention.GetRegisterAt(index + 1));
641 } else {
642 return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
643 }
644 }
645
646 case Primitive::kPrimFloat: {
647 uint32_t stack_index = stack_index_++;
648 if (float_index_ % 2 == 0) {
649 float_index_ = std::max(double_index_, float_index_);
650 }
651 if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
652 return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++));
653 } else {
654 return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
655 }
656 }
657
658 case Primitive::kPrimDouble: {
659 double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
660 uint32_t stack_index = stack_index_;
661 stack_index_ += 2;
662 if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
663 uint32_t index = double_index_;
664 double_index_ += 2;
665 Location result = Location::FpuRegisterPairLocation(
666 calling_convention.GetFpuRegisterAt(index),
667 calling_convention.GetFpuRegisterAt(index + 1));
668 DCHECK(ExpectedPairLayout(result));
669 return result;
670 } else {
671 return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
672 }
673 }
674
675 case Primitive::kPrimVoid:
676 LOG(FATAL) << "Unexpected parameter type " << type;
677 break;
678 }
679 return Location();
680 }
681
GetReturnLocation(Primitive::Type type)682 Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) {
683 switch (type) {
684 case Primitive::kPrimBoolean:
685 case Primitive::kPrimByte:
686 case Primitive::kPrimChar:
687 case Primitive::kPrimShort:
688 case Primitive::kPrimInt:
689 case Primitive::kPrimNot: {
690 return Location::RegisterLocation(R0);
691 }
692
693 case Primitive::kPrimFloat: {
694 return Location::FpuRegisterLocation(S0);
695 }
696
697 case Primitive::kPrimLong: {
698 return Location::RegisterPairLocation(R0, R1);
699 }
700
701 case Primitive::kPrimDouble: {
702 return Location::FpuRegisterPairLocation(S0, S1);
703 }
704
705 case Primitive::kPrimVoid:
706 return Location();
707 }
708 UNREACHABLE();
709 }
710
Move32(Location destination,Location source)711 void CodeGeneratorARM::Move32(Location destination, Location source) {
712 if (source.Equals(destination)) {
713 return;
714 }
715 if (destination.IsRegister()) {
716 if (source.IsRegister()) {
717 __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
718 } else if (source.IsFpuRegister()) {
719 __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>());
720 } else {
721 __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
722 }
723 } else if (destination.IsFpuRegister()) {
724 if (source.IsRegister()) {
725 __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>());
726 } else if (source.IsFpuRegister()) {
727 __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
728 } else {
729 __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
730 }
731 } else {
732 DCHECK(destination.IsStackSlot()) << destination;
733 if (source.IsRegister()) {
734 __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
735 } else if (source.IsFpuRegister()) {
736 __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
737 } else {
738 DCHECK(source.IsStackSlot()) << source;
739 __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
740 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
741 }
742 }
743 }
744
Move64(Location destination,Location source)745 void CodeGeneratorARM::Move64(Location destination, Location source) {
746 if (source.Equals(destination)) {
747 return;
748 }
749 if (destination.IsRegisterPair()) {
750 if (source.IsRegisterPair()) {
751 EmitParallelMoves(
752 Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
753 Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
754 Primitive::kPrimInt,
755 Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
756 Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
757 Primitive::kPrimInt);
758 } else if (source.IsFpuRegister()) {
759 UNIMPLEMENTED(FATAL);
760 } else {
761 DCHECK(source.IsDoubleStackSlot());
762 DCHECK(ExpectedPairLayout(destination));
763 __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
764 SP, source.GetStackIndex());
765 }
766 } else if (destination.IsFpuRegisterPair()) {
767 if (source.IsDoubleStackSlot()) {
768 __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
769 SP,
770 source.GetStackIndex());
771 } else {
772 UNIMPLEMENTED(FATAL);
773 }
774 } else {
775 DCHECK(destination.IsDoubleStackSlot());
776 if (source.IsRegisterPair()) {
777 // No conflict possible, so just do the moves.
778 if (source.AsRegisterPairLow<Register>() == R1) {
779 DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
780 __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
781 __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize));
782 } else {
783 __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
784 SP, destination.GetStackIndex());
785 }
786 } else if (source.IsFpuRegisterPair()) {
787 __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
788 SP,
789 destination.GetStackIndex());
790 } else {
791 DCHECK(source.IsDoubleStackSlot());
792 EmitParallelMoves(
793 Location::StackSlot(source.GetStackIndex()),
794 Location::StackSlot(destination.GetStackIndex()),
795 Primitive::kPrimInt,
796 Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
797 Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)),
798 Primitive::kPrimInt);
799 }
800 }
801 }
802
Move(HInstruction * instruction,Location location,HInstruction * move_for)803 void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
804 LocationSummary* locations = instruction->GetLocations();
805 if (locations != nullptr && locations->Out().Equals(location)) {
806 return;
807 }
808
809 if (locations != nullptr && locations->Out().IsConstant()) {
810 HConstant* const_to_move = locations->Out().GetConstant();
811 if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
812 int32_t value = GetInt32ValueOf(const_to_move);
813 if (location.IsRegister()) {
814 __ LoadImmediate(location.AsRegister<Register>(), value);
815 } else {
816 DCHECK(location.IsStackSlot());
817 __ LoadImmediate(IP, value);
818 __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
819 }
820 } else {
821 DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName();
822 int64_t value = const_to_move->AsLongConstant()->GetValue();
823 if (location.IsRegisterPair()) {
824 __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
825 __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
826 } else {
827 DCHECK(location.IsDoubleStackSlot());
828 __ LoadImmediate(IP, Low32Bits(value));
829 __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
830 __ LoadImmediate(IP, High32Bits(value));
831 __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
832 }
833 }
834 } else if (instruction->IsLoadLocal()) {
835 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
836 switch (instruction->GetType()) {
837 case Primitive::kPrimBoolean:
838 case Primitive::kPrimByte:
839 case Primitive::kPrimChar:
840 case Primitive::kPrimShort:
841 case Primitive::kPrimInt:
842 case Primitive::kPrimNot:
843 case Primitive::kPrimFloat:
844 Move32(location, Location::StackSlot(stack_slot));
845 break;
846
847 case Primitive::kPrimLong:
848 case Primitive::kPrimDouble:
849 Move64(location, Location::DoubleStackSlot(stack_slot));
850 break;
851
852 default:
853 LOG(FATAL) << "Unexpected type " << instruction->GetType();
854 }
855 } else if (instruction->IsTemporary()) {
856 Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
857 if (temp_location.IsStackSlot()) {
858 Move32(location, temp_location);
859 } else {
860 DCHECK(temp_location.IsDoubleStackSlot());
861 Move64(location, temp_location);
862 }
863 } else {
864 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
865 switch (instruction->GetType()) {
866 case Primitive::kPrimBoolean:
867 case Primitive::kPrimByte:
868 case Primitive::kPrimChar:
869 case Primitive::kPrimShort:
870 case Primitive::kPrimNot:
871 case Primitive::kPrimInt:
872 case Primitive::kPrimFloat:
873 Move32(location, locations->Out());
874 break;
875
876 case Primitive::kPrimLong:
877 case Primitive::kPrimDouble:
878 Move64(location, locations->Out());
879 break;
880
881 default:
882 LOG(FATAL) << "Unexpected type " << instruction->GetType();
883 }
884 }
885 }
886
InvokeRuntime(int32_t entry_point_offset,HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)887 void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
888 HInstruction* instruction,
889 uint32_t dex_pc,
890 SlowPathCode* slow_path) {
891 __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
892 __ blx(LR);
893 RecordPcInfo(instruction, dex_pc, slow_path);
894 DCHECK(instruction->IsSuspendCheck()
895 || instruction->IsBoundsCheck()
896 || instruction->IsNullCheck()
897 || instruction->IsDivZeroCheck()
898 || instruction->GetLocations()->CanCall()
899 || !IsLeafMethod());
900 }
901
VisitGoto(HGoto * got)902 void LocationsBuilderARM::VisitGoto(HGoto* got) {
903 got->SetLocations(nullptr);
904 }
905
VisitGoto(HGoto * got)906 void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) {
907 HBasicBlock* successor = got->GetSuccessor();
908 DCHECK(!successor->IsExitBlock());
909
910 HBasicBlock* block = got->GetBlock();
911 HInstruction* previous = got->GetPrevious();
912
913 HLoopInformation* info = block->GetLoopInformation();
914 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
915 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
916 GenerateSuspendCheck(info->GetSuspendCheck(), successor);
917 return;
918 }
919
920 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
921 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
922 }
923 if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
924 __ b(codegen_->GetLabelOf(successor));
925 }
926 }
927
VisitExit(HExit * exit)928 void LocationsBuilderARM::VisitExit(HExit* exit) {
929 exit->SetLocations(nullptr);
930 }
931
VisitExit(HExit * exit)932 void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
933 UNUSED(exit);
934 }
935
GenerateTestAndBranch(HInstruction * instruction,Label * true_target,Label * false_target,Label * always_true_target)936 void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
937 Label* true_target,
938 Label* false_target,
939 Label* always_true_target) {
940 HInstruction* cond = instruction->InputAt(0);
941 if (cond->IsIntConstant()) {
942 // Constant condition, statically compared against 1.
943 int32_t cond_value = cond->AsIntConstant()->GetValue();
944 if (cond_value == 1) {
945 if (always_true_target != nullptr) {
946 __ b(always_true_target);
947 }
948 return;
949 } else {
950 DCHECK_EQ(cond_value, 0);
951 }
952 } else {
953 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
954 // Condition has been materialized, compare the output to 0
955 DCHECK(instruction->GetLocations()->InAt(0).IsRegister());
956 __ cmp(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
957 ShifterOperand(0));
958 __ b(true_target, NE);
959 } else {
960 // Condition has not been materialized, use its inputs as the
961 // comparison and its condition as the branch condition.
962 LocationSummary* locations = cond->GetLocations();
963 DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0);
964 Register left = locations->InAt(0).AsRegister<Register>();
965 if (locations->InAt(1).IsRegister()) {
966 __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
967 } else {
968 DCHECK(locations->InAt(1).IsConstant());
969 HConstant* constant = locations->InAt(1).GetConstant();
970 int32_t value = CodeGenerator::GetInt32ValueOf(constant);
971 ShifterOperand operand;
972 if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
973 __ cmp(left, operand);
974 } else {
975 Register temp = IP;
976 __ LoadImmediate(temp, value);
977 __ cmp(left, ShifterOperand(temp));
978 }
979 }
980 __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
981 }
982 }
983 if (false_target != nullptr) {
984 __ b(false_target);
985 }
986 }
987
VisitIf(HIf * if_instr)988 void LocationsBuilderARM::VisitIf(HIf* if_instr) {
989 LocationSummary* locations =
990 new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
991 HInstruction* cond = if_instr->InputAt(0);
992 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
993 locations->SetInAt(0, Location::RequiresRegister());
994 }
995 }
996
VisitIf(HIf * if_instr)997 void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
998 Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
999 Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1000 Label* always_true_target = true_target;
1001 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1002 if_instr->IfTrueSuccessor())) {
1003 always_true_target = nullptr;
1004 }
1005 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1006 if_instr->IfFalseSuccessor())) {
1007 false_target = nullptr;
1008 }
1009 GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
1010 }
1011
VisitDeoptimize(HDeoptimize * deoptimize)1012 void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1013 LocationSummary* locations = new (GetGraph()->GetArena())
1014 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
1015 HInstruction* cond = deoptimize->InputAt(0);
1016 DCHECK(cond->IsCondition());
1017 if (cond->AsCondition()->NeedsMaterialization()) {
1018 locations->SetInAt(0, Location::RequiresRegister());
1019 }
1020 }
1021
VisitDeoptimize(HDeoptimize * deoptimize)1022 void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1023 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena())
1024 DeoptimizationSlowPathARM(deoptimize);
1025 codegen_->AddSlowPath(slow_path);
1026 Label* slow_path_entry = slow_path->GetEntryLabel();
1027 GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
1028 }
1029
VisitCondition(HCondition * comp)1030 void LocationsBuilderARM::VisitCondition(HCondition* comp) {
1031 LocationSummary* locations =
1032 new (GetGraph()->GetArena()) LocationSummary(comp, LocationSummary::kNoCall);
1033 locations->SetInAt(0, Location::RequiresRegister());
1034 locations->SetInAt(1, Location::RegisterOrConstant(comp->InputAt(1)));
1035 if (comp->NeedsMaterialization()) {
1036 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1037 }
1038 }
1039
VisitCondition(HCondition * comp)1040 void InstructionCodeGeneratorARM::VisitCondition(HCondition* comp) {
1041 if (!comp->NeedsMaterialization()) return;
1042 LocationSummary* locations = comp->GetLocations();
1043 Register left = locations->InAt(0).AsRegister<Register>();
1044
1045 if (locations->InAt(1).IsRegister()) {
1046 __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
1047 } else {
1048 DCHECK(locations->InAt(1).IsConstant());
1049 int32_t value = CodeGenerator::GetInt32ValueOf(locations->InAt(1).GetConstant());
1050 ShifterOperand operand;
1051 if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
1052 __ cmp(left, operand);
1053 } else {
1054 Register temp = IP;
1055 __ LoadImmediate(temp, value);
1056 __ cmp(left, ShifterOperand(temp));
1057 }
1058 }
1059 __ it(ARMCondition(comp->GetCondition()), kItElse);
1060 __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
1061 ARMCondition(comp->GetCondition()));
1062 __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
1063 ARMOppositeCondition(comp->GetCondition()));
1064 }
1065
VisitEqual(HEqual * comp)1066 void LocationsBuilderARM::VisitEqual(HEqual* comp) {
1067 VisitCondition(comp);
1068 }
1069
VisitEqual(HEqual * comp)1070 void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) {
1071 VisitCondition(comp);
1072 }
1073
VisitNotEqual(HNotEqual * comp)1074 void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) {
1075 VisitCondition(comp);
1076 }
1077
VisitNotEqual(HNotEqual * comp)1078 void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) {
1079 VisitCondition(comp);
1080 }
1081
VisitLessThan(HLessThan * comp)1082 void LocationsBuilderARM::VisitLessThan(HLessThan* comp) {
1083 VisitCondition(comp);
1084 }
1085
VisitLessThan(HLessThan * comp)1086 void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) {
1087 VisitCondition(comp);
1088 }
1089
VisitLessThanOrEqual(HLessThanOrEqual * comp)1090 void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1091 VisitCondition(comp);
1092 }
1093
VisitLessThanOrEqual(HLessThanOrEqual * comp)1094 void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1095 VisitCondition(comp);
1096 }
1097
VisitGreaterThan(HGreaterThan * comp)1098 void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) {
1099 VisitCondition(comp);
1100 }
1101
VisitGreaterThan(HGreaterThan * comp)1102 void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) {
1103 VisitCondition(comp);
1104 }
1105
VisitGreaterThanOrEqual(HGreaterThanOrEqual * comp)1106 void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1107 VisitCondition(comp);
1108 }
1109
VisitGreaterThanOrEqual(HGreaterThanOrEqual * comp)1110 void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1111 VisitCondition(comp);
1112 }
1113
VisitLocal(HLocal * local)1114 void LocationsBuilderARM::VisitLocal(HLocal* local) {
1115 local->SetLocations(nullptr);
1116 }
1117
VisitLocal(HLocal * local)1118 void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) {
1119 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
1120 }
1121
VisitLoadLocal(HLoadLocal * load)1122 void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
1123 load->SetLocations(nullptr);
1124 }
1125
VisitLoadLocal(HLoadLocal * load)1126 void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
1127 // Nothing to do, this is driven by the code generator.
1128 UNUSED(load);
1129 }
1130
VisitStoreLocal(HStoreLocal * store)1131 void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
1132 LocationSummary* locations =
1133 new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall);
1134 switch (store->InputAt(1)->GetType()) {
1135 case Primitive::kPrimBoolean:
1136 case Primitive::kPrimByte:
1137 case Primitive::kPrimChar:
1138 case Primitive::kPrimShort:
1139 case Primitive::kPrimInt:
1140 case Primitive::kPrimNot:
1141 case Primitive::kPrimFloat:
1142 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
1143 break;
1144
1145 case Primitive::kPrimLong:
1146 case Primitive::kPrimDouble:
1147 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
1148 break;
1149
1150 default:
1151 LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType();
1152 }
1153 }
1154
VisitStoreLocal(HStoreLocal * store)1155 void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
1156 UNUSED(store);
1157 }
1158
VisitIntConstant(HIntConstant * constant)1159 void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
1160 LocationSummary* locations =
1161 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1162 locations->SetOut(Location::ConstantLocation(constant));
1163 }
1164
VisitIntConstant(HIntConstant * constant)1165 void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
1166 // Will be generated at use site.
1167 UNUSED(constant);
1168 }
1169
VisitNullConstant(HNullConstant * constant)1170 void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
1171 LocationSummary* locations =
1172 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1173 locations->SetOut(Location::ConstantLocation(constant));
1174 }
1175
VisitNullConstant(HNullConstant * constant)1176 void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) {
1177 // Will be generated at use site.
1178 UNUSED(constant);
1179 }
1180
VisitLongConstant(HLongConstant * constant)1181 void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
1182 LocationSummary* locations =
1183 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1184 locations->SetOut(Location::ConstantLocation(constant));
1185 }
1186
VisitLongConstant(HLongConstant * constant)1187 void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
1188 // Will be generated at use site.
1189 UNUSED(constant);
1190 }
1191
VisitFloatConstant(HFloatConstant * constant)1192 void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
1193 LocationSummary* locations =
1194 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1195 locations->SetOut(Location::ConstantLocation(constant));
1196 }
1197
VisitFloatConstant(HFloatConstant * constant)1198 void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
1199 // Will be generated at use site.
1200 UNUSED(constant);
1201 }
1202
VisitDoubleConstant(HDoubleConstant * constant)1203 void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
1204 LocationSummary* locations =
1205 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1206 locations->SetOut(Location::ConstantLocation(constant));
1207 }
1208
VisitDoubleConstant(HDoubleConstant * constant)1209 void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
1210 // Will be generated at use site.
1211 UNUSED(constant);
1212 }
1213
VisitMemoryBarrier(HMemoryBarrier * memory_barrier)1214 void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1215 memory_barrier->SetLocations(nullptr);
1216 }
1217
VisitMemoryBarrier(HMemoryBarrier * memory_barrier)1218 void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1219 GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
1220 }
1221
VisitReturnVoid(HReturnVoid * ret)1222 void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
1223 ret->SetLocations(nullptr);
1224 }
1225
VisitReturnVoid(HReturnVoid * ret)1226 void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
1227 UNUSED(ret);
1228 codegen_->GenerateFrameExit();
1229 }
1230
VisitReturn(HReturn * ret)1231 void LocationsBuilderARM::VisitReturn(HReturn* ret) {
1232 LocationSummary* locations =
1233 new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
1234 locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
1235 }
1236
VisitReturn(HReturn * ret)1237 void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
1238 UNUSED(ret);
1239 codegen_->GenerateFrameExit();
1240 }
1241
VisitInvokeStaticOrDirect(HInvokeStaticOrDirect * invoke)1242 void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1243 // When we do not run baseline, explicit clinit checks triggered by static
1244 // invokes must have been pruned by art::PrepareForRegisterAllocation.
1245 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1246
1247 IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1248 codegen_->GetInstructionSetFeatures());
1249 if (intrinsic.TryDispatch(invoke)) {
1250 return;
1251 }
1252
1253 HandleInvoke(invoke);
1254 }
1255
LoadCurrentMethod(Register reg)1256 void CodeGeneratorARM::LoadCurrentMethod(Register reg) {
1257 DCHECK(RequiresCurrentMethod());
1258 __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
1259 }
1260
TryGenerateIntrinsicCode(HInvoke * invoke,CodeGeneratorARM * codegen)1261 static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
1262 if (invoke->GetLocations()->Intrinsified()) {
1263 IntrinsicCodeGeneratorARM intrinsic(codegen);
1264 intrinsic.Dispatch(invoke);
1265 return true;
1266 }
1267 return false;
1268 }
1269
VisitInvokeStaticOrDirect(HInvokeStaticOrDirect * invoke)1270 void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1271 // When we do not run baseline, explicit clinit checks triggered by static
1272 // invokes must have been pruned by art::PrepareForRegisterAllocation.
1273 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1274
1275 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1276 return;
1277 }
1278
1279 Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1280
1281 codegen_->GenerateStaticOrDirectCall(invoke, temp);
1282 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1283 }
1284
HandleInvoke(HInvoke * invoke)1285 void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
1286 LocationSummary* locations =
1287 new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
1288 locations->AddTemp(Location::RegisterLocation(R0));
1289
1290 InvokeDexCallingConventionVisitorARM calling_convention_visitor;
1291 for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
1292 HInstruction* input = invoke->InputAt(i);
1293 locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
1294 }
1295
1296 locations->SetOut(calling_convention_visitor.GetReturnLocation(invoke->GetType()));
1297 }
1298
VisitInvokeVirtual(HInvokeVirtual * invoke)1299 void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1300 IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1301 codegen_->GetInstructionSetFeatures());
1302 if (intrinsic.TryDispatch(invoke)) {
1303 return;
1304 }
1305
1306 HandleInvoke(invoke);
1307 }
1308
VisitInvokeVirtual(HInvokeVirtual * invoke)1309 void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1310 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1311 return;
1312 }
1313
1314 Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1315 uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
1316 invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
1317 LocationSummary* locations = invoke->GetLocations();
1318 Location receiver = locations->InAt(0);
1319 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1320 // temp = object->GetClass();
1321 if (receiver.IsStackSlot()) {
1322 __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1323 __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1324 } else {
1325 __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1326 }
1327 codegen_->MaybeRecordImplicitNullCheck(invoke);
1328 // temp = temp->GetMethodAt(method_offset);
1329 uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1330 kArmWordSize).Int32Value();
1331 __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1332 // LR = temp->GetEntryPoint();
1333 __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1334 // LR();
1335 __ blx(LR);
1336 DCHECK(!codegen_->IsLeafMethod());
1337 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1338 }
1339
VisitInvokeInterface(HInvokeInterface * invoke)1340 void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1341 HandleInvoke(invoke);
1342 // Add the hidden argument.
1343 invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
1344 }
1345
VisitInvokeInterface(HInvokeInterface * invoke)1346 void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1347 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
1348 Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1349 uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
1350 invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
1351 LocationSummary* locations = invoke->GetLocations();
1352 Location receiver = locations->InAt(0);
1353 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1354
1355 // Set the hidden argument.
1356 __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
1357 invoke->GetDexMethodIndex());
1358
1359 // temp = object->GetClass();
1360 if (receiver.IsStackSlot()) {
1361 __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1362 __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1363 } else {
1364 __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1365 }
1366 codegen_->MaybeRecordImplicitNullCheck(invoke);
1367 // temp = temp->GetImtEntryAt(method_offset);
1368 uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1369 kArmWordSize).Int32Value();
1370 __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1371 // LR = temp->GetEntryPoint();
1372 __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1373 // LR();
1374 __ blx(LR);
1375 DCHECK(!codegen_->IsLeafMethod());
1376 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1377 }
1378
VisitNeg(HNeg * neg)1379 void LocationsBuilderARM::VisitNeg(HNeg* neg) {
1380 LocationSummary* locations =
1381 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
1382 switch (neg->GetResultType()) {
1383 case Primitive::kPrimInt: {
1384 locations->SetInAt(0, Location::RequiresRegister());
1385 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1386 break;
1387 }
1388 case Primitive::kPrimLong: {
1389 locations->SetInAt(0, Location::RequiresRegister());
1390 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1391 break;
1392 }
1393
1394 case Primitive::kPrimFloat:
1395 case Primitive::kPrimDouble:
1396 locations->SetInAt(0, Location::RequiresFpuRegister());
1397 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1398 break;
1399
1400 default:
1401 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1402 }
1403 }
1404
VisitNeg(HNeg * neg)1405 void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
1406 LocationSummary* locations = neg->GetLocations();
1407 Location out = locations->Out();
1408 Location in = locations->InAt(0);
1409 switch (neg->GetResultType()) {
1410 case Primitive::kPrimInt:
1411 DCHECK(in.IsRegister());
1412 __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0));
1413 break;
1414
1415 case Primitive::kPrimLong:
1416 DCHECK(in.IsRegisterPair());
1417 // out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
1418 __ rsbs(out.AsRegisterPairLow<Register>(),
1419 in.AsRegisterPairLow<Register>(),
1420 ShifterOperand(0));
1421 // We cannot emit an RSC (Reverse Subtract with Carry)
1422 // instruction here, as it does not exist in the Thumb-2
1423 // instruction set. We use the following approach
1424 // using SBC and SUB instead.
1425 //
1426 // out.hi = -C
1427 __ sbc(out.AsRegisterPairHigh<Register>(),
1428 out.AsRegisterPairHigh<Register>(),
1429 ShifterOperand(out.AsRegisterPairHigh<Register>()));
1430 // out.hi = out.hi - in.hi
1431 __ sub(out.AsRegisterPairHigh<Register>(),
1432 out.AsRegisterPairHigh<Register>(),
1433 ShifterOperand(in.AsRegisterPairHigh<Register>()));
1434 break;
1435
1436 case Primitive::kPrimFloat:
1437 DCHECK(in.IsFpuRegister());
1438 __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>());
1439 break;
1440
1441 case Primitive::kPrimDouble:
1442 DCHECK(in.IsFpuRegisterPair());
1443 __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1444 FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1445 break;
1446
1447 default:
1448 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1449 }
1450 }
1451
VisitTypeConversion(HTypeConversion * conversion)1452 void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
1453 Primitive::Type result_type = conversion->GetResultType();
1454 Primitive::Type input_type = conversion->GetInputType();
1455 DCHECK_NE(result_type, input_type);
1456
1457 // The float-to-long and double-to-long type conversions rely on a
1458 // call to the runtime.
1459 LocationSummary::CallKind call_kind =
1460 ((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
1461 && result_type == Primitive::kPrimLong)
1462 ? LocationSummary::kCall
1463 : LocationSummary::kNoCall;
1464 LocationSummary* locations =
1465 new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
1466
1467 // The Java language does not allow treating boolean as an integral type but
1468 // our bit representation makes it safe.
1469
1470 switch (result_type) {
1471 case Primitive::kPrimByte:
1472 switch (input_type) {
1473 case Primitive::kPrimBoolean:
1474 // Boolean input is a result of code transformations.
1475 case Primitive::kPrimShort:
1476 case Primitive::kPrimInt:
1477 case Primitive::kPrimChar:
1478 // Processing a Dex `int-to-byte' instruction.
1479 locations->SetInAt(0, Location::RequiresRegister());
1480 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1481 break;
1482
1483 default:
1484 LOG(FATAL) << "Unexpected type conversion from " << input_type
1485 << " to " << result_type;
1486 }
1487 break;
1488
1489 case Primitive::kPrimShort:
1490 switch (input_type) {
1491 case Primitive::kPrimBoolean:
1492 // Boolean input is a result of code transformations.
1493 case Primitive::kPrimByte:
1494 case Primitive::kPrimInt:
1495 case Primitive::kPrimChar:
1496 // Processing a Dex `int-to-short' instruction.
1497 locations->SetInAt(0, Location::RequiresRegister());
1498 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1499 break;
1500
1501 default:
1502 LOG(FATAL) << "Unexpected type conversion from " << input_type
1503 << " to " << result_type;
1504 }
1505 break;
1506
1507 case Primitive::kPrimInt:
1508 switch (input_type) {
1509 case Primitive::kPrimLong:
1510 // Processing a Dex `long-to-int' instruction.
1511 locations->SetInAt(0, Location::Any());
1512 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1513 break;
1514
1515 case Primitive::kPrimFloat:
1516 // Processing a Dex `float-to-int' instruction.
1517 locations->SetInAt(0, Location::RequiresFpuRegister());
1518 locations->SetOut(Location::RequiresRegister());
1519 locations->AddTemp(Location::RequiresFpuRegister());
1520 break;
1521
1522 case Primitive::kPrimDouble:
1523 // Processing a Dex `double-to-int' instruction.
1524 locations->SetInAt(0, Location::RequiresFpuRegister());
1525 locations->SetOut(Location::RequiresRegister());
1526 locations->AddTemp(Location::RequiresFpuRegister());
1527 break;
1528
1529 default:
1530 LOG(FATAL) << "Unexpected type conversion from " << input_type
1531 << " to " << result_type;
1532 }
1533 break;
1534
1535 case Primitive::kPrimLong:
1536 switch (input_type) {
1537 case Primitive::kPrimBoolean:
1538 // Boolean input is a result of code transformations.
1539 case Primitive::kPrimByte:
1540 case Primitive::kPrimShort:
1541 case Primitive::kPrimInt:
1542 case Primitive::kPrimChar:
1543 // Processing a Dex `int-to-long' instruction.
1544 locations->SetInAt(0, Location::RequiresRegister());
1545 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1546 break;
1547
1548 case Primitive::kPrimFloat: {
1549 // Processing a Dex `float-to-long' instruction.
1550 InvokeRuntimeCallingConvention calling_convention;
1551 locations->SetInAt(0, Location::FpuRegisterLocation(
1552 calling_convention.GetFpuRegisterAt(0)));
1553 locations->SetOut(Location::RegisterPairLocation(R0, R1));
1554 break;
1555 }
1556
1557 case Primitive::kPrimDouble: {
1558 // Processing a Dex `double-to-long' instruction.
1559 InvokeRuntimeCallingConvention calling_convention;
1560 locations->SetInAt(0, Location::FpuRegisterPairLocation(
1561 calling_convention.GetFpuRegisterAt(0),
1562 calling_convention.GetFpuRegisterAt(1)));
1563 locations->SetOut(Location::RegisterPairLocation(R0, R1));
1564 break;
1565 }
1566
1567 default:
1568 LOG(FATAL) << "Unexpected type conversion from " << input_type
1569 << " to " << result_type;
1570 }
1571 break;
1572
1573 case Primitive::kPrimChar:
1574 switch (input_type) {
1575 case Primitive::kPrimBoolean:
1576 // Boolean input is a result of code transformations.
1577 case Primitive::kPrimByte:
1578 case Primitive::kPrimShort:
1579 case Primitive::kPrimInt:
1580 // Processing a Dex `int-to-char' instruction.
1581 locations->SetInAt(0, Location::RequiresRegister());
1582 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1583 break;
1584
1585 default:
1586 LOG(FATAL) << "Unexpected type conversion from " << input_type
1587 << " to " << result_type;
1588 }
1589 break;
1590
1591 case Primitive::kPrimFloat:
1592 switch (input_type) {
1593 case Primitive::kPrimBoolean:
1594 // Boolean input is a result of code transformations.
1595 case Primitive::kPrimByte:
1596 case Primitive::kPrimShort:
1597 case Primitive::kPrimInt:
1598 case Primitive::kPrimChar:
1599 // Processing a Dex `int-to-float' instruction.
1600 locations->SetInAt(0, Location::RequiresRegister());
1601 locations->SetOut(Location::RequiresFpuRegister());
1602 break;
1603
1604 case Primitive::kPrimLong:
1605 // Processing a Dex `long-to-float' instruction.
1606 locations->SetInAt(0, Location::RequiresRegister());
1607 locations->SetOut(Location::RequiresFpuRegister());
1608 locations->AddTemp(Location::RequiresRegister());
1609 locations->AddTemp(Location::RequiresRegister());
1610 locations->AddTemp(Location::RequiresFpuRegister());
1611 locations->AddTemp(Location::RequiresFpuRegister());
1612 break;
1613
1614 case Primitive::kPrimDouble:
1615 // Processing a Dex `double-to-float' instruction.
1616 locations->SetInAt(0, Location::RequiresFpuRegister());
1617 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1618 break;
1619
1620 default:
1621 LOG(FATAL) << "Unexpected type conversion from " << input_type
1622 << " to " << result_type;
1623 };
1624 break;
1625
1626 case Primitive::kPrimDouble:
1627 switch (input_type) {
1628 case Primitive::kPrimBoolean:
1629 // Boolean input is a result of code transformations.
1630 case Primitive::kPrimByte:
1631 case Primitive::kPrimShort:
1632 case Primitive::kPrimInt:
1633 case Primitive::kPrimChar:
1634 // Processing a Dex `int-to-double' instruction.
1635 locations->SetInAt(0, Location::RequiresRegister());
1636 locations->SetOut(Location::RequiresFpuRegister());
1637 break;
1638
1639 case Primitive::kPrimLong:
1640 // Processing a Dex `long-to-double' instruction.
1641 locations->SetInAt(0, Location::RequiresRegister());
1642 locations->SetOut(Location::RequiresFpuRegister());
1643 locations->AddTemp(Location::RequiresRegister());
1644 locations->AddTemp(Location::RequiresRegister());
1645 locations->AddTemp(Location::RequiresFpuRegister());
1646 break;
1647
1648 case Primitive::kPrimFloat:
1649 // Processing a Dex `float-to-double' instruction.
1650 locations->SetInAt(0, Location::RequiresFpuRegister());
1651 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1652 break;
1653
1654 default:
1655 LOG(FATAL) << "Unexpected type conversion from " << input_type
1656 << " to " << result_type;
1657 };
1658 break;
1659
1660 default:
1661 LOG(FATAL) << "Unexpected type conversion from " << input_type
1662 << " to " << result_type;
1663 }
1664 }
1665
VisitTypeConversion(HTypeConversion * conversion)1666 void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
1667 LocationSummary* locations = conversion->GetLocations();
1668 Location out = locations->Out();
1669 Location in = locations->InAt(0);
1670 Primitive::Type result_type = conversion->GetResultType();
1671 Primitive::Type input_type = conversion->GetInputType();
1672 DCHECK_NE(result_type, input_type);
1673 switch (result_type) {
1674 case Primitive::kPrimByte:
1675 switch (input_type) {
1676 case Primitive::kPrimBoolean:
1677 // Boolean input is a result of code transformations.
1678 case Primitive::kPrimShort:
1679 case Primitive::kPrimInt:
1680 case Primitive::kPrimChar:
1681 // Processing a Dex `int-to-byte' instruction.
1682 __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8);
1683 break;
1684
1685 default:
1686 LOG(FATAL) << "Unexpected type conversion from " << input_type
1687 << " to " << result_type;
1688 }
1689 break;
1690
1691 case Primitive::kPrimShort:
1692 switch (input_type) {
1693 case Primitive::kPrimBoolean:
1694 // Boolean input is a result of code transformations.
1695 case Primitive::kPrimByte:
1696 case Primitive::kPrimInt:
1697 case Primitive::kPrimChar:
1698 // Processing a Dex `int-to-short' instruction.
1699 __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1700 break;
1701
1702 default:
1703 LOG(FATAL) << "Unexpected type conversion from " << input_type
1704 << " to " << result_type;
1705 }
1706 break;
1707
1708 case Primitive::kPrimInt:
1709 switch (input_type) {
1710 case Primitive::kPrimLong:
1711 // Processing a Dex `long-to-int' instruction.
1712 DCHECK(out.IsRegister());
1713 if (in.IsRegisterPair()) {
1714 __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
1715 } else if (in.IsDoubleStackSlot()) {
1716 __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex());
1717 } else {
1718 DCHECK(in.IsConstant());
1719 DCHECK(in.GetConstant()->IsLongConstant());
1720 int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
1721 __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value));
1722 }
1723 break;
1724
1725 case Primitive::kPrimFloat: {
1726 // Processing a Dex `float-to-int' instruction.
1727 SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1728 __ vmovs(temp, in.AsFpuRegister<SRegister>());
1729 __ vcvtis(temp, temp);
1730 __ vmovrs(out.AsRegister<Register>(), temp);
1731 break;
1732 }
1733
1734 case Primitive::kPrimDouble: {
1735 // Processing a Dex `double-to-int' instruction.
1736 SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1737 DRegister temp_d = FromLowSToD(temp_s);
1738 __ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1739 __ vcvtid(temp_s, temp_d);
1740 __ vmovrs(out.AsRegister<Register>(), temp_s);
1741 break;
1742 }
1743
1744 default:
1745 LOG(FATAL) << "Unexpected type conversion from " << input_type
1746 << " to " << result_type;
1747 }
1748 break;
1749
1750 case Primitive::kPrimLong:
1751 switch (input_type) {
1752 case Primitive::kPrimBoolean:
1753 // Boolean input is a result of code transformations.
1754 case Primitive::kPrimByte:
1755 case Primitive::kPrimShort:
1756 case Primitive::kPrimInt:
1757 case Primitive::kPrimChar:
1758 // Processing a Dex `int-to-long' instruction.
1759 DCHECK(out.IsRegisterPair());
1760 DCHECK(in.IsRegister());
1761 __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>());
1762 // Sign extension.
1763 __ Asr(out.AsRegisterPairHigh<Register>(),
1764 out.AsRegisterPairLow<Register>(),
1765 31);
1766 break;
1767
1768 case Primitive::kPrimFloat:
1769 // Processing a Dex `float-to-long' instruction.
1770 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
1771 conversion,
1772 conversion->GetDexPc(),
1773 nullptr);
1774 break;
1775
1776 case Primitive::kPrimDouble:
1777 // Processing a Dex `double-to-long' instruction.
1778 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
1779 conversion,
1780 conversion->GetDexPc(),
1781 nullptr);
1782 break;
1783
1784 default:
1785 LOG(FATAL) << "Unexpected type conversion from " << input_type
1786 << " to " << result_type;
1787 }
1788 break;
1789
1790 case Primitive::kPrimChar:
1791 switch (input_type) {
1792 case Primitive::kPrimBoolean:
1793 // Boolean input is a result of code transformations.
1794 case Primitive::kPrimByte:
1795 case Primitive::kPrimShort:
1796 case Primitive::kPrimInt:
1797 // Processing a Dex `int-to-char' instruction.
1798 __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1799 break;
1800
1801 default:
1802 LOG(FATAL) << "Unexpected type conversion from " << input_type
1803 << " to " << result_type;
1804 }
1805 break;
1806
1807 case Primitive::kPrimFloat:
1808 switch (input_type) {
1809 case Primitive::kPrimBoolean:
1810 // Boolean input is a result of code transformations.
1811 case Primitive::kPrimByte:
1812 case Primitive::kPrimShort:
1813 case Primitive::kPrimInt:
1814 case Primitive::kPrimChar: {
1815 // Processing a Dex `int-to-float' instruction.
1816 __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>());
1817 __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>());
1818 break;
1819 }
1820
1821 case Primitive::kPrimLong: {
1822 // Processing a Dex `long-to-float' instruction.
1823 Register low = in.AsRegisterPairLow<Register>();
1824 Register high = in.AsRegisterPairHigh<Register>();
1825 SRegister output = out.AsFpuRegister<SRegister>();
1826 Register constant_low = locations->GetTemp(0).AsRegister<Register>();
1827 Register constant_high = locations->GetTemp(1).AsRegister<Register>();
1828 SRegister temp1_s = locations->GetTemp(2).AsFpuRegisterPairLow<SRegister>();
1829 DRegister temp1_d = FromLowSToD(temp1_s);
1830 SRegister temp2_s = locations->GetTemp(3).AsFpuRegisterPairLow<SRegister>();
1831 DRegister temp2_d = FromLowSToD(temp2_s);
1832
1833 // Operations use doubles for precision reasons (each 32-bit
1834 // half of a long fits in the 53-bit mantissa of a double,
1835 // but not in the 24-bit mantissa of a float). This is
1836 // especially important for the low bits. The result is
1837 // eventually converted to float.
1838
1839 // temp1_d = int-to-double(high)
1840 __ vmovsr(temp1_s, high);
1841 __ vcvtdi(temp1_d, temp1_s);
1842 // Using vmovd to load the `k2Pow32EncodingForDouble` constant
1843 // as an immediate value into `temp2_d` does not work, as
1844 // this instruction only transfers 8 significant bits of its
1845 // immediate operand. Instead, use two 32-bit core
1846 // registers to load `k2Pow32EncodingForDouble` into
1847 // `temp2_d`.
1848 __ LoadImmediate(constant_low, Low32Bits(k2Pow32EncodingForDouble));
1849 __ LoadImmediate(constant_high, High32Bits(k2Pow32EncodingForDouble));
1850 __ vmovdrr(temp2_d, constant_low, constant_high);
1851 // temp1_d = temp1_d * 2^32
1852 __ vmuld(temp1_d, temp1_d, temp2_d);
1853 // temp2_d = unsigned-to-double(low)
1854 __ vmovsr(temp2_s, low);
1855 __ vcvtdu(temp2_d, temp2_s);
1856 // temp1_d = temp1_d + temp2_d
1857 __ vaddd(temp1_d, temp1_d, temp2_d);
1858 // output = double-to-float(temp1_d);
1859 __ vcvtsd(output, temp1_d);
1860 break;
1861 }
1862
1863 case Primitive::kPrimDouble:
1864 // Processing a Dex `double-to-float' instruction.
1865 __ vcvtsd(out.AsFpuRegister<SRegister>(),
1866 FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1867 break;
1868
1869 default:
1870 LOG(FATAL) << "Unexpected type conversion from " << input_type
1871 << " to " << result_type;
1872 };
1873 break;
1874
1875 case Primitive::kPrimDouble:
1876 switch (input_type) {
1877 case Primitive::kPrimBoolean:
1878 // Boolean input is a result of code transformations.
1879 case Primitive::kPrimByte:
1880 case Primitive::kPrimShort:
1881 case Primitive::kPrimInt:
1882 case Primitive::kPrimChar: {
1883 // Processing a Dex `int-to-double' instruction.
1884 __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>());
1885 __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1886 out.AsFpuRegisterPairLow<SRegister>());
1887 break;
1888 }
1889
1890 case Primitive::kPrimLong: {
1891 // Processing a Dex `long-to-double' instruction.
1892 Register low = in.AsRegisterPairLow<Register>();
1893 Register high = in.AsRegisterPairHigh<Register>();
1894 SRegister out_s = out.AsFpuRegisterPairLow<SRegister>();
1895 DRegister out_d = FromLowSToD(out_s);
1896 Register constant_low = locations->GetTemp(0).AsRegister<Register>();
1897 Register constant_high = locations->GetTemp(1).AsRegister<Register>();
1898 SRegister temp_s = locations->GetTemp(2).AsFpuRegisterPairLow<SRegister>();
1899 DRegister temp_d = FromLowSToD(temp_s);
1900
1901 // out_d = int-to-double(high)
1902 __ vmovsr(out_s, high);
1903 __ vcvtdi(out_d, out_s);
1904 // Using vmovd to load the `k2Pow32EncodingForDouble` constant
1905 // as an immediate value into `temp_d` does not work, as
1906 // this instruction only transfers 8 significant bits of its
1907 // immediate operand. Instead, use two 32-bit core
1908 // registers to load `k2Pow32EncodingForDouble` into `temp_d`.
1909 __ LoadImmediate(constant_low, Low32Bits(k2Pow32EncodingForDouble));
1910 __ LoadImmediate(constant_high, High32Bits(k2Pow32EncodingForDouble));
1911 __ vmovdrr(temp_d, constant_low, constant_high);
1912 // out_d = out_d * 2^32
1913 __ vmuld(out_d, out_d, temp_d);
1914 // temp_d = unsigned-to-double(low)
1915 __ vmovsr(temp_s, low);
1916 __ vcvtdu(temp_d, temp_s);
1917 // out_d = out_d + temp_d
1918 __ vaddd(out_d, out_d, temp_d);
1919 break;
1920 }
1921
1922 case Primitive::kPrimFloat:
1923 // Processing a Dex `float-to-double' instruction.
1924 __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1925 in.AsFpuRegister<SRegister>());
1926 break;
1927
1928 default:
1929 LOG(FATAL) << "Unexpected type conversion from " << input_type
1930 << " to " << result_type;
1931 };
1932 break;
1933
1934 default:
1935 LOG(FATAL) << "Unexpected type conversion from " << input_type
1936 << " to " << result_type;
1937 }
1938 }
1939
VisitAdd(HAdd * add)1940 void LocationsBuilderARM::VisitAdd(HAdd* add) {
1941 LocationSummary* locations =
1942 new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
1943 switch (add->GetResultType()) {
1944 case Primitive::kPrimInt: {
1945 locations->SetInAt(0, Location::RequiresRegister());
1946 locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
1947 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1948 break;
1949 }
1950
1951 case Primitive::kPrimLong: {
1952 locations->SetInAt(0, Location::RequiresRegister());
1953 locations->SetInAt(1, Location::RequiresRegister());
1954 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1955 break;
1956 }
1957
1958 case Primitive::kPrimFloat:
1959 case Primitive::kPrimDouble: {
1960 locations->SetInAt(0, Location::RequiresFpuRegister());
1961 locations->SetInAt(1, Location::RequiresFpuRegister());
1962 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1963 break;
1964 }
1965
1966 default:
1967 LOG(FATAL) << "Unexpected add type " << add->GetResultType();
1968 }
1969 }
1970
VisitAdd(HAdd * add)1971 void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
1972 LocationSummary* locations = add->GetLocations();
1973 Location out = locations->Out();
1974 Location first = locations->InAt(0);
1975 Location second = locations->InAt(1);
1976 switch (add->GetResultType()) {
1977 case Primitive::kPrimInt:
1978 if (second.IsRegister()) {
1979 __ add(out.AsRegister<Register>(),
1980 first.AsRegister<Register>(),
1981 ShifterOperand(second.AsRegister<Register>()));
1982 } else {
1983 __ AddConstant(out.AsRegister<Register>(),
1984 first.AsRegister<Register>(),
1985 second.GetConstant()->AsIntConstant()->GetValue());
1986 }
1987 break;
1988
1989 case Primitive::kPrimLong: {
1990 DCHECK(second.IsRegisterPair());
1991 __ adds(out.AsRegisterPairLow<Register>(),
1992 first.AsRegisterPairLow<Register>(),
1993 ShifterOperand(second.AsRegisterPairLow<Register>()));
1994 __ adc(out.AsRegisterPairHigh<Register>(),
1995 first.AsRegisterPairHigh<Register>(),
1996 ShifterOperand(second.AsRegisterPairHigh<Register>()));
1997 break;
1998 }
1999
2000 case Primitive::kPrimFloat:
2001 __ vadds(out.AsFpuRegister<SRegister>(),
2002 first.AsFpuRegister<SRegister>(),
2003 second.AsFpuRegister<SRegister>());
2004 break;
2005
2006 case Primitive::kPrimDouble:
2007 __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2008 FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2009 FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2010 break;
2011
2012 default:
2013 LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2014 }
2015 }
2016
VisitSub(HSub * sub)2017 void LocationsBuilderARM::VisitSub(HSub* sub) {
2018 LocationSummary* locations =
2019 new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
2020 switch (sub->GetResultType()) {
2021 case Primitive::kPrimInt: {
2022 locations->SetInAt(0, Location::RequiresRegister());
2023 locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
2024 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2025 break;
2026 }
2027
2028 case Primitive::kPrimLong: {
2029 locations->SetInAt(0, Location::RequiresRegister());
2030 locations->SetInAt(1, Location::RequiresRegister());
2031 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2032 break;
2033 }
2034 case Primitive::kPrimFloat:
2035 case Primitive::kPrimDouble: {
2036 locations->SetInAt(0, Location::RequiresFpuRegister());
2037 locations->SetInAt(1, Location::RequiresFpuRegister());
2038 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2039 break;
2040 }
2041 default:
2042 LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2043 }
2044 }
2045
VisitSub(HSub * sub)2046 void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
2047 LocationSummary* locations = sub->GetLocations();
2048 Location out = locations->Out();
2049 Location first = locations->InAt(0);
2050 Location second = locations->InAt(1);
2051 switch (sub->GetResultType()) {
2052 case Primitive::kPrimInt: {
2053 if (second.IsRegister()) {
2054 __ sub(out.AsRegister<Register>(),
2055 first.AsRegister<Register>(),
2056 ShifterOperand(second.AsRegister<Register>()));
2057 } else {
2058 __ AddConstant(out.AsRegister<Register>(),
2059 first.AsRegister<Register>(),
2060 -second.GetConstant()->AsIntConstant()->GetValue());
2061 }
2062 break;
2063 }
2064
2065 case Primitive::kPrimLong: {
2066 DCHECK(second.IsRegisterPair());
2067 __ subs(out.AsRegisterPairLow<Register>(),
2068 first.AsRegisterPairLow<Register>(),
2069 ShifterOperand(second.AsRegisterPairLow<Register>()));
2070 __ sbc(out.AsRegisterPairHigh<Register>(),
2071 first.AsRegisterPairHigh<Register>(),
2072 ShifterOperand(second.AsRegisterPairHigh<Register>()));
2073 break;
2074 }
2075
2076 case Primitive::kPrimFloat: {
2077 __ vsubs(out.AsFpuRegister<SRegister>(),
2078 first.AsFpuRegister<SRegister>(),
2079 second.AsFpuRegister<SRegister>());
2080 break;
2081 }
2082
2083 case Primitive::kPrimDouble: {
2084 __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2085 FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2086 FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2087 break;
2088 }
2089
2090
2091 default:
2092 LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2093 }
2094 }
2095
VisitMul(HMul * mul)2096 void LocationsBuilderARM::VisitMul(HMul* mul) {
2097 LocationSummary* locations =
2098 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2099 switch (mul->GetResultType()) {
2100 case Primitive::kPrimInt:
2101 case Primitive::kPrimLong: {
2102 locations->SetInAt(0, Location::RequiresRegister());
2103 locations->SetInAt(1, Location::RequiresRegister());
2104 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2105 break;
2106 }
2107
2108 case Primitive::kPrimFloat:
2109 case Primitive::kPrimDouble: {
2110 locations->SetInAt(0, Location::RequiresFpuRegister());
2111 locations->SetInAt(1, Location::RequiresFpuRegister());
2112 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2113 break;
2114 }
2115
2116 default:
2117 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2118 }
2119 }
2120
VisitMul(HMul * mul)2121 void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
2122 LocationSummary* locations = mul->GetLocations();
2123 Location out = locations->Out();
2124 Location first = locations->InAt(0);
2125 Location second = locations->InAt(1);
2126 switch (mul->GetResultType()) {
2127 case Primitive::kPrimInt: {
2128 __ mul(out.AsRegister<Register>(),
2129 first.AsRegister<Register>(),
2130 second.AsRegister<Register>());
2131 break;
2132 }
2133 case Primitive::kPrimLong: {
2134 Register out_hi = out.AsRegisterPairHigh<Register>();
2135 Register out_lo = out.AsRegisterPairLow<Register>();
2136 Register in1_hi = first.AsRegisterPairHigh<Register>();
2137 Register in1_lo = first.AsRegisterPairLow<Register>();
2138 Register in2_hi = second.AsRegisterPairHigh<Register>();
2139 Register in2_lo = second.AsRegisterPairLow<Register>();
2140
2141 // Extra checks to protect caused by the existence of R1_R2.
2142 // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
2143 // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
2144 DCHECK_NE(out_hi, in1_lo);
2145 DCHECK_NE(out_hi, in2_lo);
2146
2147 // input: in1 - 64 bits, in2 - 64 bits
2148 // output: out
2149 // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
2150 // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
2151 // parts: out.lo = (in1.lo * in2.lo)[31:0]
2152
2153 // IP <- in1.lo * in2.hi
2154 __ mul(IP, in1_lo, in2_hi);
2155 // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
2156 __ mla(out_hi, in1_hi, in2_lo, IP);
2157 // out.lo <- (in1.lo * in2.lo)[31:0];
2158 __ umull(out_lo, IP, in1_lo, in2_lo);
2159 // out.hi <- in2.hi * in1.lo + in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
2160 __ add(out_hi, out_hi, ShifterOperand(IP));
2161 break;
2162 }
2163
2164 case Primitive::kPrimFloat: {
2165 __ vmuls(out.AsFpuRegister<SRegister>(),
2166 first.AsFpuRegister<SRegister>(),
2167 second.AsFpuRegister<SRegister>());
2168 break;
2169 }
2170
2171 case Primitive::kPrimDouble: {
2172 __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2173 FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2174 FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2175 break;
2176 }
2177
2178 default:
2179 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2180 }
2181 }
2182
VisitDiv(HDiv * div)2183 void LocationsBuilderARM::VisitDiv(HDiv* div) {
2184 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2185 if (div->GetResultType() == Primitive::kPrimLong) {
2186 // pLdiv runtime call.
2187 call_kind = LocationSummary::kCall;
2188 } else if (div->GetResultType() == Primitive::kPrimInt &&
2189 !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2190 // pIdivmod runtime call.
2191 call_kind = LocationSummary::kCall;
2192 }
2193
2194 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
2195
2196 switch (div->GetResultType()) {
2197 case Primitive::kPrimInt: {
2198 if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2199 locations->SetInAt(0, Location::RequiresRegister());
2200 locations->SetInAt(1, Location::RequiresRegister());
2201 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2202 } else {
2203 InvokeRuntimeCallingConvention calling_convention;
2204 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2205 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2206 // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2207 // we only need the former.
2208 locations->SetOut(Location::RegisterLocation(R0));
2209 }
2210 break;
2211 }
2212 case Primitive::kPrimLong: {
2213 InvokeRuntimeCallingConvention calling_convention;
2214 locations->SetInAt(0, Location::RegisterPairLocation(
2215 calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2216 locations->SetInAt(1, Location::RegisterPairLocation(
2217 calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2218 locations->SetOut(Location::RegisterPairLocation(R0, R1));
2219 break;
2220 }
2221 case Primitive::kPrimFloat:
2222 case Primitive::kPrimDouble: {
2223 locations->SetInAt(0, Location::RequiresFpuRegister());
2224 locations->SetInAt(1, Location::RequiresFpuRegister());
2225 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2226 break;
2227 }
2228
2229 default:
2230 LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2231 }
2232 }
2233
VisitDiv(HDiv * div)2234 void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
2235 LocationSummary* locations = div->GetLocations();
2236 Location out = locations->Out();
2237 Location first = locations->InAt(0);
2238 Location second = locations->InAt(1);
2239
2240 switch (div->GetResultType()) {
2241 case Primitive::kPrimInt: {
2242 if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2243 __ sdiv(out.AsRegister<Register>(),
2244 first.AsRegister<Register>(),
2245 second.AsRegister<Register>());
2246 } else {
2247 InvokeRuntimeCallingConvention calling_convention;
2248 DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2249 DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2250 DCHECK_EQ(R0, out.AsRegister<Register>());
2251
2252 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
2253 }
2254 break;
2255 }
2256
2257 case Primitive::kPrimLong: {
2258 InvokeRuntimeCallingConvention calling_convention;
2259 DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
2260 DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
2261 DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
2262 DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
2263 DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
2264 DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
2265
2266 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr);
2267 break;
2268 }
2269
2270 case Primitive::kPrimFloat: {
2271 __ vdivs(out.AsFpuRegister<SRegister>(),
2272 first.AsFpuRegister<SRegister>(),
2273 second.AsFpuRegister<SRegister>());
2274 break;
2275 }
2276
2277 case Primitive::kPrimDouble: {
2278 __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2279 FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2280 FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2281 break;
2282 }
2283
2284 default:
2285 LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2286 }
2287 }
2288
VisitRem(HRem * rem)2289 void LocationsBuilderARM::VisitRem(HRem* rem) {
2290 Primitive::Type type = rem->GetResultType();
2291
2292 // Most remainders are implemented in the runtime.
2293 LocationSummary::CallKind call_kind = LocationSummary::kCall;
2294 if (rem->GetResultType() == Primitive::kPrimInt &&
2295 codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2296 // Have hardware divide instruction for int, do it with three instructions.
2297 call_kind = LocationSummary::kNoCall;
2298 }
2299
2300 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2301
2302 switch (type) {
2303 case Primitive::kPrimInt: {
2304 if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2305 locations->SetInAt(0, Location::RequiresRegister());
2306 locations->SetInAt(1, Location::RequiresRegister());
2307 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2308 locations->AddTemp(Location::RequiresRegister());
2309 } else {
2310 InvokeRuntimeCallingConvention calling_convention;
2311 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2312 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2313 // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2314 // we only need the latter.
2315 locations->SetOut(Location::RegisterLocation(R1));
2316 }
2317 break;
2318 }
2319 case Primitive::kPrimLong: {
2320 InvokeRuntimeCallingConvention calling_convention;
2321 locations->SetInAt(0, Location::RegisterPairLocation(
2322 calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2323 locations->SetInAt(1, Location::RegisterPairLocation(
2324 calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2325 // The runtime helper puts the output in R2,R3.
2326 locations->SetOut(Location::RegisterPairLocation(R2, R3));
2327 break;
2328 }
2329 case Primitive::kPrimFloat: {
2330 InvokeRuntimeCallingConvention calling_convention;
2331 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2332 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2333 locations->SetOut(Location::FpuRegisterLocation(S0));
2334 break;
2335 }
2336
2337 case Primitive::kPrimDouble: {
2338 InvokeRuntimeCallingConvention calling_convention;
2339 locations->SetInAt(0, Location::FpuRegisterPairLocation(
2340 calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1)));
2341 locations->SetInAt(1, Location::FpuRegisterPairLocation(
2342 calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3)));
2343 locations->SetOut(Location::Location::FpuRegisterPairLocation(S0, S1));
2344 break;
2345 }
2346
2347 default:
2348 LOG(FATAL) << "Unexpected rem type " << type;
2349 }
2350 }
2351
VisitRem(HRem * rem)2352 void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
2353 LocationSummary* locations = rem->GetLocations();
2354 Location out = locations->Out();
2355 Location first = locations->InAt(0);
2356 Location second = locations->InAt(1);
2357
2358 Primitive::Type type = rem->GetResultType();
2359 switch (type) {
2360 case Primitive::kPrimInt: {
2361 if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2362 Register reg1 = first.AsRegister<Register>();
2363 Register reg2 = second.AsRegister<Register>();
2364 Register temp = locations->GetTemp(0).AsRegister<Register>();
2365
2366 // temp = reg1 / reg2 (integer division)
2367 // temp = temp * reg2
2368 // dest = reg1 - temp
2369 __ sdiv(temp, reg1, reg2);
2370 __ mul(temp, temp, reg2);
2371 __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp));
2372 } else {
2373 InvokeRuntimeCallingConvention calling_convention;
2374 DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2375 DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2376 DCHECK_EQ(R1, out.AsRegister<Register>());
2377
2378 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
2379 }
2380 break;
2381 }
2382
2383 case Primitive::kPrimLong: {
2384 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr);
2385 break;
2386 }
2387
2388 case Primitive::kPrimFloat: {
2389 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr);
2390 break;
2391 }
2392
2393 case Primitive::kPrimDouble: {
2394 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr);
2395 break;
2396 }
2397
2398 default:
2399 LOG(FATAL) << "Unexpected rem type " << type;
2400 }
2401 }
2402
VisitDivZeroCheck(HDivZeroCheck * instruction)2403 void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2404 LocationSummary* locations =
2405 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2406 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2407 if (instruction->HasUses()) {
2408 locations->SetOut(Location::SameAsFirstInput());
2409 }
2410 }
2411
VisitDivZeroCheck(HDivZeroCheck * instruction)2412 void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2413 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
2414 codegen_->AddSlowPath(slow_path);
2415
2416 LocationSummary* locations = instruction->GetLocations();
2417 Location value = locations->InAt(0);
2418
2419 switch (instruction->GetType()) {
2420 case Primitive::kPrimInt: {
2421 if (value.IsRegister()) {
2422 __ cmp(value.AsRegister<Register>(), ShifterOperand(0));
2423 __ b(slow_path->GetEntryLabel(), EQ);
2424 } else {
2425 DCHECK(value.IsConstant()) << value;
2426 if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
2427 __ b(slow_path->GetEntryLabel());
2428 }
2429 }
2430 break;
2431 }
2432 case Primitive::kPrimLong: {
2433 if (value.IsRegisterPair()) {
2434 __ orrs(IP,
2435 value.AsRegisterPairLow<Register>(),
2436 ShifterOperand(value.AsRegisterPairHigh<Register>()));
2437 __ b(slow_path->GetEntryLabel(), EQ);
2438 } else {
2439 DCHECK(value.IsConstant()) << value;
2440 if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
2441 __ b(slow_path->GetEntryLabel());
2442 }
2443 }
2444 break;
2445 default:
2446 LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
2447 }
2448 }
2449 }
2450
HandleShift(HBinaryOperation * op)2451 void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
2452 DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2453
2454 LocationSummary* locations =
2455 new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
2456
2457 switch (op->GetResultType()) {
2458 case Primitive::kPrimInt: {
2459 locations->SetInAt(0, Location::RequiresRegister());
2460 locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1)));
2461 // Make the output overlap, as it will be used to hold the masked
2462 // second input.
2463 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2464 break;
2465 }
2466 case Primitive::kPrimLong: {
2467 locations->SetInAt(0, Location::RequiresRegister());
2468 locations->SetInAt(1, Location::RequiresRegister());
2469 locations->AddTemp(Location::RequiresRegister());
2470 locations->SetOut(Location::RequiresRegister());
2471 break;
2472 }
2473 default:
2474 LOG(FATAL) << "Unexpected operation type " << op->GetResultType();
2475 }
2476 }
2477
HandleShift(HBinaryOperation * op)2478 void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
2479 DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2480
2481 LocationSummary* locations = op->GetLocations();
2482 Location out = locations->Out();
2483 Location first = locations->InAt(0);
2484 Location second = locations->InAt(1);
2485
2486 Primitive::Type type = op->GetResultType();
2487 switch (type) {
2488 case Primitive::kPrimInt: {
2489 Register out_reg = out.AsRegister<Register>();
2490 Register first_reg = first.AsRegister<Register>();
2491 // Arm doesn't mask the shift count so we need to do it ourselves.
2492 if (second.IsRegister()) {
2493 Register second_reg = second.AsRegister<Register>();
2494 __ and_(out_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
2495 if (op->IsShl()) {
2496 __ Lsl(out_reg, first_reg, out_reg);
2497 } else if (op->IsShr()) {
2498 __ Asr(out_reg, first_reg, out_reg);
2499 } else {
2500 __ Lsr(out_reg, first_reg, out_reg);
2501 }
2502 } else {
2503 int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
2504 uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue);
2505 if (shift_value == 0) { // arm does not support shifting with 0 immediate.
2506 __ Mov(out_reg, first_reg);
2507 } else if (op->IsShl()) {
2508 __ Lsl(out_reg, first_reg, shift_value);
2509 } else if (op->IsShr()) {
2510 __ Asr(out_reg, first_reg, shift_value);
2511 } else {
2512 __ Lsr(out_reg, first_reg, shift_value);
2513 }
2514 }
2515 break;
2516 }
2517 case Primitive::kPrimLong: {
2518 Register o_h = out.AsRegisterPairHigh<Register>();
2519 Register o_l = out.AsRegisterPairLow<Register>();
2520
2521 Register temp = locations->GetTemp(0).AsRegister<Register>();
2522
2523 Register high = first.AsRegisterPairHigh<Register>();
2524 Register low = first.AsRegisterPairLow<Register>();
2525
2526 Register second_reg = second.AsRegister<Register>();
2527
2528 if (op->IsShl()) {
2529 __ and_(o_l, second_reg, ShifterOperand(kMaxLongShiftValue));
2530 // Shift the high part
2531 __ Lsl(o_h, high, o_l);
2532 // Shift the low part and `or` what overflew on the high part
2533 __ rsb(temp, o_l, ShifterOperand(kArmBitsPerWord));
2534 __ Lsr(temp, low, temp);
2535 __ orr(o_h, o_h, ShifterOperand(temp));
2536 // If the shift is > 32 bits, override the high part
2537 __ subs(temp, o_l, ShifterOperand(kArmBitsPerWord));
2538 __ it(PL);
2539 __ Lsl(o_h, low, temp, false, PL);
2540 // Shift the low part
2541 __ Lsl(o_l, low, o_l);
2542 } else if (op->IsShr()) {
2543 __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
2544 // Shift the low part
2545 __ Lsr(o_l, low, o_h);
2546 // Shift the high part and `or` what underflew on the low part
2547 __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
2548 __ Lsl(temp, high, temp);
2549 __ orr(o_l, o_l, ShifterOperand(temp));
2550 // If the shift is > 32 bits, override the low part
2551 __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
2552 __ it(PL);
2553 __ Asr(o_l, high, temp, false, PL);
2554 // Shift the high part
2555 __ Asr(o_h, high, o_h);
2556 } else {
2557 __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
2558 // same as Shr except we use `Lsr`s and not `Asr`s
2559 __ Lsr(o_l, low, o_h);
2560 __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
2561 __ Lsl(temp, high, temp);
2562 __ orr(o_l, o_l, ShifterOperand(temp));
2563 __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
2564 __ it(PL);
2565 __ Lsr(o_l, high, temp, false, PL);
2566 __ Lsr(o_h, high, o_h);
2567 }
2568 break;
2569 }
2570 default:
2571 LOG(FATAL) << "Unexpected operation type " << type;
2572 }
2573 }
2574
VisitShl(HShl * shl)2575 void LocationsBuilderARM::VisitShl(HShl* shl) {
2576 HandleShift(shl);
2577 }
2578
VisitShl(HShl * shl)2579 void InstructionCodeGeneratorARM::VisitShl(HShl* shl) {
2580 HandleShift(shl);
2581 }
2582
VisitShr(HShr * shr)2583 void LocationsBuilderARM::VisitShr(HShr* shr) {
2584 HandleShift(shr);
2585 }
2586
VisitShr(HShr * shr)2587 void InstructionCodeGeneratorARM::VisitShr(HShr* shr) {
2588 HandleShift(shr);
2589 }
2590
VisitUShr(HUShr * ushr)2591 void LocationsBuilderARM::VisitUShr(HUShr* ushr) {
2592 HandleShift(ushr);
2593 }
2594
VisitUShr(HUShr * ushr)2595 void InstructionCodeGeneratorARM::VisitUShr(HUShr* ushr) {
2596 HandleShift(ushr);
2597 }
2598
VisitNewInstance(HNewInstance * instruction)2599 void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
2600 LocationSummary* locations =
2601 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2602 InvokeRuntimeCallingConvention calling_convention;
2603 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2604 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2605 locations->SetOut(Location::RegisterLocation(R0));
2606 }
2607
VisitNewInstance(HNewInstance * instruction)2608 void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
2609 InvokeRuntimeCallingConvention calling_convention;
2610 codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
2611 __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2612 codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2613 instruction,
2614 instruction->GetDexPc(),
2615 nullptr);
2616 }
2617
VisitNewArray(HNewArray * instruction)2618 void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
2619 LocationSummary* locations =
2620 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2621 InvokeRuntimeCallingConvention calling_convention;
2622 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2623 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2624 locations->SetOut(Location::RegisterLocation(R0));
2625 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2626 }
2627
VisitNewArray(HNewArray * instruction)2628 void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
2629 InvokeRuntimeCallingConvention calling_convention;
2630 codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(2));
2631 __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2632 codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2633 instruction,
2634 instruction->GetDexPc(),
2635 nullptr);
2636 }
2637
VisitParameterValue(HParameterValue * instruction)2638 void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
2639 LocationSummary* locations =
2640 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2641 Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2642 if (location.IsStackSlot()) {
2643 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2644 } else if (location.IsDoubleStackSlot()) {
2645 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2646 }
2647 locations->SetOut(location);
2648 }
2649
VisitParameterValue(HParameterValue * instruction)2650 void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instruction) {
2651 // Nothing to do, the parameter is already at its location.
2652 UNUSED(instruction);
2653 }
2654
VisitNot(HNot * not_)2655 void LocationsBuilderARM::VisitNot(HNot* not_) {
2656 LocationSummary* locations =
2657 new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
2658 locations->SetInAt(0, Location::RequiresRegister());
2659 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2660 }
2661
VisitNot(HNot * not_)2662 void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
2663 LocationSummary* locations = not_->GetLocations();
2664 Location out = locations->Out();
2665 Location in = locations->InAt(0);
2666 switch (not_->GetResultType()) {
2667 case Primitive::kPrimInt:
2668 __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>()));
2669 break;
2670
2671 case Primitive::kPrimLong:
2672 __ mvn(out.AsRegisterPairLow<Register>(),
2673 ShifterOperand(in.AsRegisterPairLow<Register>()));
2674 __ mvn(out.AsRegisterPairHigh<Register>(),
2675 ShifterOperand(in.AsRegisterPairHigh<Register>()));
2676 break;
2677
2678 default:
2679 LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
2680 }
2681 }
2682
VisitBooleanNot(HBooleanNot * bool_not)2683 void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) {
2684 LocationSummary* locations =
2685 new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
2686 locations->SetInAt(0, Location::RequiresRegister());
2687 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2688 }
2689
VisitBooleanNot(HBooleanNot * bool_not)2690 void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) {
2691 LocationSummary* locations = bool_not->GetLocations();
2692 Location out = locations->Out();
2693 Location in = locations->InAt(0);
2694 __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1));
2695 }
2696
VisitCompare(HCompare * compare)2697 void LocationsBuilderARM::VisitCompare(HCompare* compare) {
2698 LocationSummary* locations =
2699 new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
2700 switch (compare->InputAt(0)->GetType()) {
2701 case Primitive::kPrimLong: {
2702 locations->SetInAt(0, Location::RequiresRegister());
2703 locations->SetInAt(1, Location::RequiresRegister());
2704 // Output overlaps because it is written before doing the low comparison.
2705 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2706 break;
2707 }
2708 case Primitive::kPrimFloat:
2709 case Primitive::kPrimDouble: {
2710 locations->SetInAt(0, Location::RequiresFpuRegister());
2711 locations->SetInAt(1, Location::RequiresFpuRegister());
2712 locations->SetOut(Location::RequiresRegister());
2713 break;
2714 }
2715 default:
2716 LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
2717 }
2718 }
2719
VisitCompare(HCompare * compare)2720 void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
2721 LocationSummary* locations = compare->GetLocations();
2722 Register out = locations->Out().AsRegister<Register>();
2723 Location left = locations->InAt(0);
2724 Location right = locations->InAt(1);
2725
2726 Label less, greater, done;
2727 Primitive::Type type = compare->InputAt(0)->GetType();
2728 switch (type) {
2729 case Primitive::kPrimLong: {
2730 __ cmp(left.AsRegisterPairHigh<Register>(),
2731 ShifterOperand(right.AsRegisterPairHigh<Register>())); // Signed compare.
2732 __ b(&less, LT);
2733 __ b(&greater, GT);
2734 // Do LoadImmediate before any `cmp`, as LoadImmediate might affect the status flags.
2735 __ LoadImmediate(out, 0);
2736 __ cmp(left.AsRegisterPairLow<Register>(),
2737 ShifterOperand(right.AsRegisterPairLow<Register>())); // Unsigned compare.
2738 break;
2739 }
2740 case Primitive::kPrimFloat:
2741 case Primitive::kPrimDouble: {
2742 __ LoadImmediate(out, 0);
2743 if (type == Primitive::kPrimFloat) {
2744 __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
2745 } else {
2746 __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
2747 FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
2748 }
2749 __ vmstat(); // transfer FP status register to ARM APSR.
2750 __ b(compare->IsGtBias() ? &greater : &less, VS); // VS for unordered.
2751 break;
2752 }
2753 default:
2754 LOG(FATAL) << "Unexpected compare type " << type;
2755 }
2756 __ b(&done, EQ);
2757 __ b(&less, CC); // CC is for both: unsigned compare for longs and 'less than' for floats.
2758
2759 __ Bind(&greater);
2760 __ LoadImmediate(out, 1);
2761 __ b(&done);
2762
2763 __ Bind(&less);
2764 __ LoadImmediate(out, -1);
2765
2766 __ Bind(&done);
2767 }
2768
VisitPhi(HPhi * instruction)2769 void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
2770 LocationSummary* locations =
2771 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2772 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2773 locations->SetInAt(i, Location::Any());
2774 }
2775 locations->SetOut(Location::Any());
2776 }
2777
VisitPhi(HPhi * instruction)2778 void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
2779 UNUSED(instruction);
2780 LOG(FATAL) << "Unreachable";
2781 }
2782
GenerateMemoryBarrier(MemBarrierKind kind)2783 void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
2784 // TODO (ported from quick): revisit Arm barrier kinds
2785 DmbOptions flavour = DmbOptions::ISH; // quiet c++ warnings
2786 switch (kind) {
2787 case MemBarrierKind::kAnyStore:
2788 case MemBarrierKind::kLoadAny:
2789 case MemBarrierKind::kAnyAny: {
2790 flavour = DmbOptions::ISH;
2791 break;
2792 }
2793 case MemBarrierKind::kStoreStore: {
2794 flavour = DmbOptions::ISHST;
2795 break;
2796 }
2797 default:
2798 LOG(FATAL) << "Unexpected memory barrier " << kind;
2799 }
2800 __ dmb(flavour);
2801 }
2802
GenerateWideAtomicLoad(Register addr,uint32_t offset,Register out_lo,Register out_hi)2803 void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr,
2804 uint32_t offset,
2805 Register out_lo,
2806 Register out_hi) {
2807 if (offset != 0) {
2808 __ LoadImmediate(out_lo, offset);
2809 __ add(IP, addr, ShifterOperand(out_lo));
2810 addr = IP;
2811 }
2812 __ ldrexd(out_lo, out_hi, addr);
2813 }
2814
GenerateWideAtomicStore(Register addr,uint32_t offset,Register value_lo,Register value_hi,Register temp1,Register temp2,HInstruction * instruction)2815 void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
2816 uint32_t offset,
2817 Register value_lo,
2818 Register value_hi,
2819 Register temp1,
2820 Register temp2,
2821 HInstruction* instruction) {
2822 Label fail;
2823 if (offset != 0) {
2824 __ LoadImmediate(temp1, offset);
2825 __ add(IP, addr, ShifterOperand(temp1));
2826 addr = IP;
2827 }
2828 __ Bind(&fail);
2829 // We need a load followed by store. (The address used in a STREX instruction must
2830 // be the same as the address in the most recently executed LDREX instruction.)
2831 __ ldrexd(temp1, temp2, addr);
2832 codegen_->MaybeRecordImplicitNullCheck(instruction);
2833 __ strexd(temp1, value_lo, value_hi, addr);
2834 __ cmp(temp1, ShifterOperand(0));
2835 __ b(&fail, NE);
2836 }
2837
HandleFieldSet(HInstruction * instruction,const FieldInfo & field_info)2838 void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
2839 DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
2840
2841 LocationSummary* locations =
2842 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2843 locations->SetInAt(0, Location::RequiresRegister());
2844
2845 Primitive::Type field_type = field_info.GetFieldType();
2846 if (Primitive::IsFloatingPointType(field_type)) {
2847 locations->SetInAt(1, Location::RequiresFpuRegister());
2848 } else {
2849 locations->SetInAt(1, Location::RequiresRegister());
2850 }
2851
2852 bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble;
2853 bool generate_volatile = field_info.IsVolatile()
2854 && is_wide
2855 && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
2856 // Temporary registers for the write barrier.
2857 // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark.
2858 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
2859 locations->AddTemp(Location::RequiresRegister());
2860 locations->AddTemp(Location::RequiresRegister());
2861 } else if (generate_volatile) {
2862 // Arm encoding have some additional constraints for ldrexd/strexd:
2863 // - registers need to be consecutive
2864 // - the first register should be even but not R14.
2865 // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
2866 // enable Arm encoding.
2867 DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
2868
2869 locations->AddTemp(Location::RequiresRegister());
2870 locations->AddTemp(Location::RequiresRegister());
2871 if (field_type == Primitive::kPrimDouble) {
2872 // For doubles we need two more registers to copy the value.
2873 locations->AddTemp(Location::RegisterLocation(R2));
2874 locations->AddTemp(Location::RegisterLocation(R3));
2875 }
2876 }
2877 }
2878
HandleFieldSet(HInstruction * instruction,const FieldInfo & field_info)2879 void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
2880 const FieldInfo& field_info) {
2881 DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
2882
2883 LocationSummary* locations = instruction->GetLocations();
2884 Register base = locations->InAt(0).AsRegister<Register>();
2885 Location value = locations->InAt(1);
2886
2887 bool is_volatile = field_info.IsVolatile();
2888 bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
2889 Primitive::Type field_type = field_info.GetFieldType();
2890 uint32_t offset = field_info.GetFieldOffset().Uint32Value();
2891
2892 if (is_volatile) {
2893 GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
2894 }
2895
2896 switch (field_type) {
2897 case Primitive::kPrimBoolean:
2898 case Primitive::kPrimByte: {
2899 __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset);
2900 break;
2901 }
2902
2903 case Primitive::kPrimShort:
2904 case Primitive::kPrimChar: {
2905 __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset);
2906 break;
2907 }
2908
2909 case Primitive::kPrimInt:
2910 case Primitive::kPrimNot: {
2911 __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset);
2912 break;
2913 }
2914
2915 case Primitive::kPrimLong: {
2916 if (is_volatile && !atomic_ldrd_strd) {
2917 GenerateWideAtomicStore(base, offset,
2918 value.AsRegisterPairLow<Register>(),
2919 value.AsRegisterPairHigh<Register>(),
2920 locations->GetTemp(0).AsRegister<Register>(),
2921 locations->GetTemp(1).AsRegister<Register>(),
2922 instruction);
2923 } else {
2924 __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset);
2925 codegen_->MaybeRecordImplicitNullCheck(instruction);
2926 }
2927 break;
2928 }
2929
2930 case Primitive::kPrimFloat: {
2931 __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset);
2932 break;
2933 }
2934
2935 case Primitive::kPrimDouble: {
2936 DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>());
2937 if (is_volatile && !atomic_ldrd_strd) {
2938 Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>();
2939 Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>();
2940
2941 __ vmovrrd(value_reg_lo, value_reg_hi, value_reg);
2942
2943 GenerateWideAtomicStore(base, offset,
2944 value_reg_lo,
2945 value_reg_hi,
2946 locations->GetTemp(2).AsRegister<Register>(),
2947 locations->GetTemp(3).AsRegister<Register>(),
2948 instruction);
2949 } else {
2950 __ StoreDToOffset(value_reg, base, offset);
2951 codegen_->MaybeRecordImplicitNullCheck(instruction);
2952 }
2953 break;
2954 }
2955
2956 case Primitive::kPrimVoid:
2957 LOG(FATAL) << "Unreachable type " << field_type;
2958 UNREACHABLE();
2959 }
2960
2961 // Longs and doubles are handled in the switch.
2962 if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) {
2963 codegen_->MaybeRecordImplicitNullCheck(instruction);
2964 }
2965
2966 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
2967 Register temp = locations->GetTemp(0).AsRegister<Register>();
2968 Register card = locations->GetTemp(1).AsRegister<Register>();
2969 codegen_->MarkGCCard(temp, card, base, value.AsRegister<Register>());
2970 }
2971
2972 if (is_volatile) {
2973 GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
2974 }
2975 }
2976
HandleFieldGet(HInstruction * instruction,const FieldInfo & field_info)2977 void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
2978 DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
2979 LocationSummary* locations =
2980 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2981 locations->SetInAt(0, Location::RequiresRegister());
2982
2983 bool volatile_for_double = field_info.IsVolatile()
2984 && (field_info.GetFieldType() == Primitive::kPrimDouble)
2985 && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
2986 bool overlap = field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong);
2987
2988 if (Primitive::IsFloatingPointType(instruction->GetType())) {
2989 locations->SetOut(Location::RequiresFpuRegister());
2990 } else {
2991 locations->SetOut(Location::RequiresRegister(),
2992 (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
2993 }
2994 if (volatile_for_double) {
2995 // Arm encoding have some additional constraints for ldrexd/strexd:
2996 // - registers need to be consecutive
2997 // - the first register should be even but not R14.
2998 // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
2999 // enable Arm encoding.
3000 DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3001 locations->AddTemp(Location::RequiresRegister());
3002 locations->AddTemp(Location::RequiresRegister());
3003 }
3004 }
3005
HandleFieldGet(HInstruction * instruction,const FieldInfo & field_info)3006 void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
3007 const FieldInfo& field_info) {
3008 DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3009
3010 LocationSummary* locations = instruction->GetLocations();
3011 Register base = locations->InAt(0).AsRegister<Register>();
3012 Location out = locations->Out();
3013 bool is_volatile = field_info.IsVolatile();
3014 bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3015 Primitive::Type field_type = field_info.GetFieldType();
3016 uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3017
3018 switch (field_type) {
3019 case Primitive::kPrimBoolean: {
3020 __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset);
3021 break;
3022 }
3023
3024 case Primitive::kPrimByte: {
3025 __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset);
3026 break;
3027 }
3028
3029 case Primitive::kPrimShort: {
3030 __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset);
3031 break;
3032 }
3033
3034 case Primitive::kPrimChar: {
3035 __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset);
3036 break;
3037 }
3038
3039 case Primitive::kPrimInt:
3040 case Primitive::kPrimNot: {
3041 __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
3042 break;
3043 }
3044
3045 case Primitive::kPrimLong: {
3046 if (is_volatile && !atomic_ldrd_strd) {
3047 GenerateWideAtomicLoad(base, offset,
3048 out.AsRegisterPairLow<Register>(),
3049 out.AsRegisterPairHigh<Register>());
3050 } else {
3051 __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset);
3052 }
3053 break;
3054 }
3055
3056 case Primitive::kPrimFloat: {
3057 __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset);
3058 break;
3059 }
3060
3061 case Primitive::kPrimDouble: {
3062 DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>());
3063 if (is_volatile && !atomic_ldrd_strd) {
3064 Register lo = locations->GetTemp(0).AsRegister<Register>();
3065 Register hi = locations->GetTemp(1).AsRegister<Register>();
3066 GenerateWideAtomicLoad(base, offset, lo, hi);
3067 codegen_->MaybeRecordImplicitNullCheck(instruction);
3068 __ vmovdrr(out_reg, lo, hi);
3069 } else {
3070 __ LoadDFromOffset(out_reg, base, offset);
3071 codegen_->MaybeRecordImplicitNullCheck(instruction);
3072 }
3073 break;
3074 }
3075
3076 case Primitive::kPrimVoid:
3077 LOG(FATAL) << "Unreachable type " << field_type;
3078 UNREACHABLE();
3079 }
3080
3081 // Doubles are handled in the switch.
3082 if (field_type != Primitive::kPrimDouble) {
3083 codegen_->MaybeRecordImplicitNullCheck(instruction);
3084 }
3085
3086 if (is_volatile) {
3087 GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
3088 }
3089 }
3090
VisitInstanceFieldSet(HInstanceFieldSet * instruction)3091 void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3092 HandleFieldSet(instruction, instruction->GetFieldInfo());
3093 }
3094
VisitInstanceFieldSet(HInstanceFieldSet * instruction)3095 void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3096 HandleFieldSet(instruction, instruction->GetFieldInfo());
3097 }
3098
VisitInstanceFieldGet(HInstanceFieldGet * instruction)3099 void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3100 HandleFieldGet(instruction, instruction->GetFieldInfo());
3101 }
3102
VisitInstanceFieldGet(HInstanceFieldGet * instruction)3103 void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3104 HandleFieldGet(instruction, instruction->GetFieldInfo());
3105 }
3106
VisitStaticFieldGet(HStaticFieldGet * instruction)3107 void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3108 HandleFieldGet(instruction, instruction->GetFieldInfo());
3109 }
3110
VisitStaticFieldGet(HStaticFieldGet * instruction)3111 void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3112 HandleFieldGet(instruction, instruction->GetFieldInfo());
3113 }
3114
VisitStaticFieldSet(HStaticFieldSet * instruction)3115 void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3116 HandleFieldSet(instruction, instruction->GetFieldInfo());
3117 }
3118
VisitStaticFieldSet(HStaticFieldSet * instruction)3119 void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3120 HandleFieldSet(instruction, instruction->GetFieldInfo());
3121 }
3122
VisitNullCheck(HNullCheck * instruction)3123 void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
3124 LocationSummary* locations =
3125 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3126 locations->SetInAt(0, Location::RequiresRegister());
3127 if (instruction->HasUses()) {
3128 locations->SetOut(Location::SameAsFirstInput());
3129 }
3130 }
3131
GenerateImplicitNullCheck(HNullCheck * instruction)3132 void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
3133 if (codegen_->CanMoveNullCheckToUser(instruction)) {
3134 return;
3135 }
3136 Location obj = instruction->GetLocations()->InAt(0);
3137
3138 __ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0);
3139 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3140 }
3141
GenerateExplicitNullCheck(HNullCheck * instruction)3142 void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) {
3143 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
3144 codegen_->AddSlowPath(slow_path);
3145
3146 LocationSummary* locations = instruction->GetLocations();
3147 Location obj = locations->InAt(0);
3148
3149 __ cmp(obj.AsRegister<Register>(), ShifterOperand(0));
3150 __ b(slow_path->GetEntryLabel(), EQ);
3151 }
3152
VisitNullCheck(HNullCheck * instruction)3153 void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
3154 if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
3155 GenerateImplicitNullCheck(instruction);
3156 } else {
3157 GenerateExplicitNullCheck(instruction);
3158 }
3159 }
3160
VisitArrayGet(HArrayGet * instruction)3161 void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
3162 LocationSummary* locations =
3163 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3164 locations->SetInAt(0, Location::RequiresRegister());
3165 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3166 if (Primitive::IsFloatingPointType(instruction->GetType())) {
3167 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3168 } else {
3169 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3170 }
3171 }
3172
VisitArrayGet(HArrayGet * instruction)3173 void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
3174 LocationSummary* locations = instruction->GetLocations();
3175 Register obj = locations->InAt(0).AsRegister<Register>();
3176 Location index = locations->InAt(1);
3177
3178 switch (instruction->GetType()) {
3179 case Primitive::kPrimBoolean: {
3180 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3181 Register out = locations->Out().AsRegister<Register>();
3182 if (index.IsConstant()) {
3183 size_t offset =
3184 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3185 __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
3186 } else {
3187 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3188 __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
3189 }
3190 break;
3191 }
3192
3193 case Primitive::kPrimByte: {
3194 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
3195 Register out = locations->Out().AsRegister<Register>();
3196 if (index.IsConstant()) {
3197 size_t offset =
3198 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3199 __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
3200 } else {
3201 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3202 __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
3203 }
3204 break;
3205 }
3206
3207 case Primitive::kPrimShort: {
3208 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
3209 Register out = locations->Out().AsRegister<Register>();
3210 if (index.IsConstant()) {
3211 size_t offset =
3212 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3213 __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
3214 } else {
3215 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3216 __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
3217 }
3218 break;
3219 }
3220
3221 case Primitive::kPrimChar: {
3222 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3223 Register out = locations->Out().AsRegister<Register>();
3224 if (index.IsConstant()) {
3225 size_t offset =
3226 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3227 __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
3228 } else {
3229 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3230 __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
3231 }
3232 break;
3233 }
3234
3235 case Primitive::kPrimInt:
3236 case Primitive::kPrimNot: {
3237 DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
3238 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3239 Register out = locations->Out().AsRegister<Register>();
3240 if (index.IsConstant()) {
3241 size_t offset =
3242 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3243 __ LoadFromOffset(kLoadWord, out, obj, offset);
3244 } else {
3245 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3246 __ LoadFromOffset(kLoadWord, out, IP, data_offset);
3247 }
3248 break;
3249 }
3250
3251 case Primitive::kPrimLong: {
3252 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3253 Location out = locations->Out();
3254 if (index.IsConstant()) {
3255 size_t offset =
3256 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3257 __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
3258 } else {
3259 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3260 __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
3261 }
3262 break;
3263 }
3264
3265 case Primitive::kPrimFloat: {
3266 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3267 Location out = locations->Out();
3268 DCHECK(out.IsFpuRegister());
3269 if (index.IsConstant()) {
3270 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3271 __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset);
3272 } else {
3273 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3274 __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset);
3275 }
3276 break;
3277 }
3278
3279 case Primitive::kPrimDouble: {
3280 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3281 Location out = locations->Out();
3282 DCHECK(out.IsFpuRegisterPair());
3283 if (index.IsConstant()) {
3284 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3285 __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3286 } else {
3287 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3288 __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3289 }
3290 break;
3291 }
3292
3293 case Primitive::kPrimVoid:
3294 LOG(FATAL) << "Unreachable type " << instruction->GetType();
3295 UNREACHABLE();
3296 }
3297 codegen_->MaybeRecordImplicitNullCheck(instruction);
3298 }
3299
VisitArraySet(HArraySet * instruction)3300 void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
3301 Primitive::Type value_type = instruction->GetComponentType();
3302
3303 bool needs_write_barrier =
3304 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3305 bool needs_runtime_call = instruction->NeedsTypeCheck();
3306
3307 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
3308 instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
3309 if (needs_runtime_call) {
3310 InvokeRuntimeCallingConvention calling_convention;
3311 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3312 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3313 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3314 } else {
3315 locations->SetInAt(0, Location::RequiresRegister());
3316 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3317 if (Primitive::IsFloatingPointType(value_type)) {
3318 locations->SetInAt(2, Location::RequiresFpuRegister());
3319 } else {
3320 locations->SetInAt(2, Location::RequiresRegister());
3321 }
3322
3323 if (needs_write_barrier) {
3324 // Temporary registers for the write barrier.
3325 locations->AddTemp(Location::RequiresRegister());
3326 locations->AddTemp(Location::RequiresRegister());
3327 }
3328 }
3329 }
3330
VisitArraySet(HArraySet * instruction)3331 void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
3332 LocationSummary* locations = instruction->GetLocations();
3333 Register obj = locations->InAt(0).AsRegister<Register>();
3334 Location index = locations->InAt(1);
3335 Primitive::Type value_type = instruction->GetComponentType();
3336 bool needs_runtime_call = locations->WillCall();
3337 bool needs_write_barrier =
3338 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3339
3340 switch (value_type) {
3341 case Primitive::kPrimBoolean:
3342 case Primitive::kPrimByte: {
3343 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3344 Register value = locations->InAt(2).AsRegister<Register>();
3345 if (index.IsConstant()) {
3346 size_t offset =
3347 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3348 __ StoreToOffset(kStoreByte, value, obj, offset);
3349 } else {
3350 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3351 __ StoreToOffset(kStoreByte, value, IP, data_offset);
3352 }
3353 break;
3354 }
3355
3356 case Primitive::kPrimShort:
3357 case Primitive::kPrimChar: {
3358 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3359 Register value = locations->InAt(2).AsRegister<Register>();
3360 if (index.IsConstant()) {
3361 size_t offset =
3362 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3363 __ StoreToOffset(kStoreHalfword, value, obj, offset);
3364 } else {
3365 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3366 __ StoreToOffset(kStoreHalfword, value, IP, data_offset);
3367 }
3368 break;
3369 }
3370
3371 case Primitive::kPrimInt:
3372 case Primitive::kPrimNot: {
3373 if (!needs_runtime_call) {
3374 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3375 Register value = locations->InAt(2).AsRegister<Register>();
3376 if (index.IsConstant()) {
3377 size_t offset =
3378 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3379 __ StoreToOffset(kStoreWord, value, obj, offset);
3380 } else {
3381 DCHECK(index.IsRegister()) << index;
3382 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3383 __ StoreToOffset(kStoreWord, value, IP, data_offset);
3384 }
3385 codegen_->MaybeRecordImplicitNullCheck(instruction);
3386 if (needs_write_barrier) {
3387 DCHECK_EQ(value_type, Primitive::kPrimNot);
3388 Register temp = locations->GetTemp(0).AsRegister<Register>();
3389 Register card = locations->GetTemp(1).AsRegister<Register>();
3390 codegen_->MarkGCCard(temp, card, obj, value);
3391 }
3392 } else {
3393 DCHECK_EQ(value_type, Primitive::kPrimNot);
3394 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
3395 instruction,
3396 instruction->GetDexPc(),
3397 nullptr);
3398 }
3399 break;
3400 }
3401
3402 case Primitive::kPrimLong: {
3403 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3404 Location value = locations->InAt(2);
3405 if (index.IsConstant()) {
3406 size_t offset =
3407 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3408 __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
3409 } else {
3410 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3411 __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
3412 }
3413 break;
3414 }
3415
3416 case Primitive::kPrimFloat: {
3417 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3418 Location value = locations->InAt(2);
3419 DCHECK(value.IsFpuRegister());
3420 if (index.IsConstant()) {
3421 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3422 __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset);
3423 } else {
3424 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3425 __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset);
3426 }
3427 break;
3428 }
3429
3430 case Primitive::kPrimDouble: {
3431 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3432 Location value = locations->InAt(2);
3433 DCHECK(value.IsFpuRegisterPair());
3434 if (index.IsConstant()) {
3435 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3436 __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3437 } else {
3438 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3439 __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3440 }
3441
3442 break;
3443 }
3444
3445 case Primitive::kPrimVoid:
3446 LOG(FATAL) << "Unreachable type " << value_type;
3447 UNREACHABLE();
3448 }
3449
3450 // Ints and objects are handled in the switch.
3451 if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
3452 codegen_->MaybeRecordImplicitNullCheck(instruction);
3453 }
3454 }
3455
VisitArrayLength(HArrayLength * instruction)3456 void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
3457 LocationSummary* locations =
3458 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3459 locations->SetInAt(0, Location::RequiresRegister());
3460 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3461 }
3462
VisitArrayLength(HArrayLength * instruction)3463 void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
3464 LocationSummary* locations = instruction->GetLocations();
3465 uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
3466 Register obj = locations->InAt(0).AsRegister<Register>();
3467 Register out = locations->Out().AsRegister<Register>();
3468 __ LoadFromOffset(kLoadWord, out, obj, offset);
3469 codegen_->MaybeRecordImplicitNullCheck(instruction);
3470 }
3471
VisitBoundsCheck(HBoundsCheck * instruction)3472 void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3473 LocationSummary* locations =
3474 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3475 locations->SetInAt(0, Location::RequiresRegister());
3476 locations->SetInAt(1, Location::RequiresRegister());
3477 if (instruction->HasUses()) {
3478 locations->SetOut(Location::SameAsFirstInput());
3479 }
3480 }
3481
VisitBoundsCheck(HBoundsCheck * instruction)3482 void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3483 LocationSummary* locations = instruction->GetLocations();
3484 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
3485 instruction, locations->InAt(0), locations->InAt(1));
3486 codegen_->AddSlowPath(slow_path);
3487
3488 Register index = locations->InAt(0).AsRegister<Register>();
3489 Register length = locations->InAt(1).AsRegister<Register>();
3490
3491 __ cmp(index, ShifterOperand(length));
3492 __ b(slow_path->GetEntryLabel(), CS);
3493 }
3494
MarkGCCard(Register temp,Register card,Register object,Register value)3495 void CodeGeneratorARM::MarkGCCard(Register temp, Register card, Register object, Register value) {
3496 Label is_null;
3497 __ CompareAndBranchIfZero(value, &is_null);
3498 __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
3499 __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
3500 __ strb(card, Address(card, temp));
3501 __ Bind(&is_null);
3502 }
3503
VisitTemporary(HTemporary * temp)3504 void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
3505 temp->SetLocations(nullptr);
3506 }
3507
VisitTemporary(HTemporary * temp)3508 void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
3509 // Nothing to do, this is driven by the code generator.
3510 UNUSED(temp);
3511 }
3512
VisitParallelMove(HParallelMove * instruction)3513 void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
3514 UNUSED(instruction);
3515 LOG(FATAL) << "Unreachable";
3516 }
3517
VisitParallelMove(HParallelMove * instruction)3518 void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) {
3519 codegen_->GetMoveResolver()->EmitNativeCode(instruction);
3520 }
3521
VisitSuspendCheck(HSuspendCheck * instruction)3522 void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3523 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3524 }
3525
VisitSuspendCheck(HSuspendCheck * instruction)3526 void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3527 HBasicBlock* block = instruction->GetBlock();
3528 if (block->GetLoopInformation() != nullptr) {
3529 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3530 // The back edge will generate the suspend check.
3531 return;
3532 }
3533 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3534 // The goto will generate the suspend check.
3535 return;
3536 }
3537 GenerateSuspendCheck(instruction, nullptr);
3538 }
3539
GenerateSuspendCheck(HSuspendCheck * instruction,HBasicBlock * successor)3540 void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction,
3541 HBasicBlock* successor) {
3542 SuspendCheckSlowPathARM* slow_path =
3543 down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath());
3544 if (slow_path == nullptr) {
3545 slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
3546 instruction->SetSlowPath(slow_path);
3547 codegen_->AddSlowPath(slow_path);
3548 if (successor != nullptr) {
3549 DCHECK(successor->IsLoopHeader());
3550 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
3551 }
3552 } else {
3553 DCHECK_EQ(slow_path->GetSuccessor(), successor);
3554 }
3555
3556 __ LoadFromOffset(
3557 kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
3558 __ cmp(IP, ShifterOperand(0));
3559 // TODO: Figure out the branch offsets and use cbz/cbnz.
3560 if (successor == nullptr) {
3561 __ b(slow_path->GetEntryLabel(), NE);
3562 __ Bind(slow_path->GetReturnLabel());
3563 } else {
3564 __ b(codegen_->GetLabelOf(successor), EQ);
3565 __ b(slow_path->GetEntryLabel());
3566 }
3567 }
3568
GetAssembler() const3569 ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
3570 return codegen_->GetAssembler();
3571 }
3572
EmitMove(size_t index)3573 void ParallelMoveResolverARM::EmitMove(size_t index) {
3574 MoveOperands* move = moves_.Get(index);
3575 Location source = move->GetSource();
3576 Location destination = move->GetDestination();
3577
3578 if (source.IsRegister()) {
3579 if (destination.IsRegister()) {
3580 __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
3581 } else {
3582 DCHECK(destination.IsStackSlot());
3583 __ StoreToOffset(kStoreWord, source.AsRegister<Register>(),
3584 SP, destination.GetStackIndex());
3585 }
3586 } else if (source.IsStackSlot()) {
3587 if (destination.IsRegister()) {
3588 __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(),
3589 SP, source.GetStackIndex());
3590 } else if (destination.IsFpuRegister()) {
3591 __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
3592 } else {
3593 DCHECK(destination.IsStackSlot());
3594 __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
3595 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3596 }
3597 } else if (source.IsFpuRegister()) {
3598 if (destination.IsFpuRegister()) {
3599 __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
3600 } else {
3601 DCHECK(destination.IsStackSlot());
3602 __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
3603 }
3604 } else if (source.IsDoubleStackSlot()) {
3605 if (destination.IsDoubleStackSlot()) {
3606 __ LoadDFromOffset(DTMP, SP, source.GetStackIndex());
3607 __ StoreDToOffset(DTMP, SP, destination.GetStackIndex());
3608 } else if (destination.IsRegisterPair()) {
3609 DCHECK(ExpectedPairLayout(destination));
3610 __ LoadFromOffset(
3611 kLoadWordPair, destination.AsRegisterPairLow<Register>(), SP, source.GetStackIndex());
3612 } else {
3613 DCHECK(destination.IsFpuRegisterPair()) << destination;
3614 __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3615 SP,
3616 source.GetStackIndex());
3617 }
3618 } else if (source.IsRegisterPair()) {
3619 if (destination.IsRegisterPair()) {
3620 __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
3621 __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
3622 } else {
3623 DCHECK(destination.IsDoubleStackSlot()) << destination;
3624 DCHECK(ExpectedPairLayout(source));
3625 __ StoreToOffset(
3626 kStoreWordPair, source.AsRegisterPairLow<Register>(), SP, destination.GetStackIndex());
3627 }
3628 } else if (source.IsFpuRegisterPair()) {
3629 if (destination.IsFpuRegisterPair()) {
3630 __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3631 FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
3632 } else {
3633 DCHECK(destination.IsDoubleStackSlot()) << destination;
3634 __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
3635 SP,
3636 destination.GetStackIndex());
3637 }
3638 } else {
3639 DCHECK(source.IsConstant()) << source;
3640 HConstant* constant = source.GetConstant();
3641 if (constant->IsIntConstant() || constant->IsNullConstant()) {
3642 int32_t value = CodeGenerator::GetInt32ValueOf(constant);
3643 if (destination.IsRegister()) {
3644 __ LoadImmediate(destination.AsRegister<Register>(), value);
3645 } else {
3646 DCHECK(destination.IsStackSlot());
3647 __ LoadImmediate(IP, value);
3648 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3649 }
3650 } else if (constant->IsLongConstant()) {
3651 int64_t value = constant->AsLongConstant()->GetValue();
3652 if (destination.IsRegisterPair()) {
3653 __ LoadImmediate(destination.AsRegisterPairLow<Register>(), Low32Bits(value));
3654 __ LoadImmediate(destination.AsRegisterPairHigh<Register>(), High32Bits(value));
3655 } else {
3656 DCHECK(destination.IsDoubleStackSlot()) << destination;
3657 __ LoadImmediate(IP, Low32Bits(value));
3658 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3659 __ LoadImmediate(IP, High32Bits(value));
3660 __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3661 }
3662 } else if (constant->IsDoubleConstant()) {
3663 double value = constant->AsDoubleConstant()->GetValue();
3664 if (destination.IsFpuRegisterPair()) {
3665 __ LoadDImmediate(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), value);
3666 } else {
3667 DCHECK(destination.IsDoubleStackSlot()) << destination;
3668 uint64_t int_value = bit_cast<uint64_t, double>(value);
3669 __ LoadImmediate(IP, Low32Bits(int_value));
3670 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3671 __ LoadImmediate(IP, High32Bits(int_value));
3672 __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3673 }
3674 } else {
3675 DCHECK(constant->IsFloatConstant()) << constant->DebugName();
3676 float value = constant->AsFloatConstant()->GetValue();
3677 if (destination.IsFpuRegister()) {
3678 __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value);
3679 } else {
3680 DCHECK(destination.IsStackSlot());
3681 __ LoadImmediate(IP, bit_cast<int32_t, float>(value));
3682 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3683 }
3684 }
3685 }
3686 }
3687
Exchange(Register reg,int mem)3688 void ParallelMoveResolverARM::Exchange(Register reg, int mem) {
3689 __ Mov(IP, reg);
3690 __ LoadFromOffset(kLoadWord, reg, SP, mem);
3691 __ StoreToOffset(kStoreWord, IP, SP, mem);
3692 }
3693
Exchange(int mem1,int mem2)3694 void ParallelMoveResolverARM::Exchange(int mem1, int mem2) {
3695 ScratchRegisterScope ensure_scratch(this, IP, R0, codegen_->GetNumberOfCoreRegisters());
3696 int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0;
3697 __ LoadFromOffset(kLoadWord, static_cast<Register>(ensure_scratch.GetRegister()),
3698 SP, mem1 + stack_offset);
3699 __ LoadFromOffset(kLoadWord, IP, SP, mem2 + stack_offset);
3700 __ StoreToOffset(kStoreWord, static_cast<Register>(ensure_scratch.GetRegister()),
3701 SP, mem2 + stack_offset);
3702 __ StoreToOffset(kStoreWord, IP, SP, mem1 + stack_offset);
3703 }
3704
EmitSwap(size_t index)3705 void ParallelMoveResolverARM::EmitSwap(size_t index) {
3706 MoveOperands* move = moves_.Get(index);
3707 Location source = move->GetSource();
3708 Location destination = move->GetDestination();
3709
3710 if (source.IsRegister() && destination.IsRegister()) {
3711 DCHECK_NE(source.AsRegister<Register>(), IP);
3712 DCHECK_NE(destination.AsRegister<Register>(), IP);
3713 __ Mov(IP, source.AsRegister<Register>());
3714 __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>());
3715 __ Mov(destination.AsRegister<Register>(), IP);
3716 } else if (source.IsRegister() && destination.IsStackSlot()) {
3717 Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
3718 } else if (source.IsStackSlot() && destination.IsRegister()) {
3719 Exchange(destination.AsRegister<Register>(), source.GetStackIndex());
3720 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
3721 Exchange(source.GetStackIndex(), destination.GetStackIndex());
3722 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
3723 __ vmovrs(IP, source.AsFpuRegister<SRegister>());
3724 __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>());
3725 __ vmovsr(destination.AsFpuRegister<SRegister>(), IP);
3726 } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
3727 __ vmovdrr(DTMP, source.AsRegisterPairLow<Register>(), source.AsRegisterPairHigh<Register>());
3728 __ Mov(source.AsRegisterPairLow<Register>(), destination.AsRegisterPairLow<Register>());
3729 __ Mov(source.AsRegisterPairHigh<Register>(), destination.AsRegisterPairHigh<Register>());
3730 __ vmovrrd(destination.AsRegisterPairLow<Register>(),
3731 destination.AsRegisterPairHigh<Register>(),
3732 DTMP);
3733 } else if (source.IsRegisterPair() || destination.IsRegisterPair()) {
3734 Register low_reg = source.IsRegisterPair()
3735 ? source.AsRegisterPairLow<Register>()
3736 : destination.AsRegisterPairLow<Register>();
3737 int mem = source.IsRegisterPair()
3738 ? destination.GetStackIndex()
3739 : source.GetStackIndex();
3740 DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination));
3741 __ vmovdrr(DTMP, low_reg, static_cast<Register>(low_reg + 1));
3742 __ LoadFromOffset(kLoadWordPair, low_reg, SP, mem);
3743 __ StoreDToOffset(DTMP, SP, mem);
3744 } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) {
3745 DRegister first = FromLowSToD(source.AsFpuRegisterPairLow<SRegister>());
3746 DRegister second = FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3747 __ vmovd(DTMP, first);
3748 __ vmovd(first, second);
3749 __ vmovd(second, DTMP);
3750 } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
3751 DRegister reg = source.IsFpuRegisterPair()
3752 ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())
3753 : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3754 int mem = source.IsFpuRegisterPair()
3755 ? destination.GetStackIndex()
3756 : source.GetStackIndex();
3757 __ vmovd(DTMP, reg);
3758 __ LoadDFromOffset(reg, SP, mem);
3759 __ StoreDToOffset(DTMP, SP, mem);
3760 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
3761 SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>()
3762 : destination.AsFpuRegister<SRegister>();
3763 int mem = source.IsFpuRegister()
3764 ? destination.GetStackIndex()
3765 : source.GetStackIndex();
3766
3767 __ vmovrs(IP, reg);
3768 __ LoadSFromOffset(reg, SP, mem);
3769 __ StoreToOffset(kStoreWord, IP, SP, mem);
3770 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
3771 Exchange(source.GetStackIndex(), destination.GetStackIndex());
3772 Exchange(source.GetHighStackIndex(kArmWordSize), destination.GetHighStackIndex(kArmWordSize));
3773 } else {
3774 LOG(FATAL) << "Unimplemented" << source << " <-> " << destination;
3775 }
3776 }
3777
SpillScratch(int reg)3778 void ParallelMoveResolverARM::SpillScratch(int reg) {
3779 __ Push(static_cast<Register>(reg));
3780 }
3781
RestoreScratch(int reg)3782 void ParallelMoveResolverARM::RestoreScratch(int reg) {
3783 __ Pop(static_cast<Register>(reg));
3784 }
3785
VisitLoadClass(HLoadClass * cls)3786 void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
3787 LocationSummary::CallKind call_kind = cls->CanCallRuntime()
3788 ? LocationSummary::kCallOnSlowPath
3789 : LocationSummary::kNoCall;
3790 LocationSummary* locations =
3791 new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
3792 locations->SetOut(Location::RequiresRegister());
3793 }
3794
VisitLoadClass(HLoadClass * cls)3795 void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
3796 Register out = cls->GetLocations()->Out().AsRegister<Register>();
3797 if (cls->IsReferrersClass()) {
3798 DCHECK(!cls->CanCallRuntime());
3799 DCHECK(!cls->MustGenerateClinitCheck());
3800 codegen_->LoadCurrentMethod(out);
3801 __ LoadFromOffset(kLoadWord, out, out, ArtMethod::DeclaringClassOffset().Int32Value());
3802 } else {
3803 DCHECK(cls->CanCallRuntime());
3804 codegen_->LoadCurrentMethod(out);
3805 __ LoadFromOffset(
3806 kLoadWord, out, out, ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
3807 __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
3808
3809 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
3810 cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
3811 codegen_->AddSlowPath(slow_path);
3812 __ cmp(out, ShifterOperand(0));
3813 __ b(slow_path->GetEntryLabel(), EQ);
3814 if (cls->MustGenerateClinitCheck()) {
3815 GenerateClassInitializationCheck(slow_path, out);
3816 } else {
3817 __ Bind(slow_path->GetExitLabel());
3818 }
3819 }
3820 }
3821
VisitClinitCheck(HClinitCheck * check)3822 void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
3823 LocationSummary* locations =
3824 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
3825 locations->SetInAt(0, Location::RequiresRegister());
3826 if (check->HasUses()) {
3827 locations->SetOut(Location::SameAsFirstInput());
3828 }
3829 }
3830
VisitClinitCheck(HClinitCheck * check)3831 void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
3832 // We assume the class is not null.
3833 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
3834 check->GetLoadClass(), check, check->GetDexPc(), true);
3835 codegen_->AddSlowPath(slow_path);
3836 GenerateClassInitializationCheck(slow_path,
3837 check->GetLocations()->InAt(0).AsRegister<Register>());
3838 }
3839
GenerateClassInitializationCheck(SlowPathCodeARM * slow_path,Register class_reg)3840 void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
3841 SlowPathCodeARM* slow_path, Register class_reg) {
3842 __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
3843 __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
3844 __ b(slow_path->GetEntryLabel(), LT);
3845 // Even if the initialized flag is set, we may be in a situation where caches are not synced
3846 // properly. Therefore, we do a memory fence.
3847 __ dmb(ISH);
3848 __ Bind(slow_path->GetExitLabel());
3849 }
3850
VisitLoadString(HLoadString * load)3851 void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
3852 LocationSummary* locations =
3853 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
3854 locations->SetOut(Location::RequiresRegister());
3855 }
3856
VisitLoadString(HLoadString * load)3857 void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
3858 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
3859 codegen_->AddSlowPath(slow_path);
3860
3861 Register out = load->GetLocations()->Out().AsRegister<Register>();
3862 codegen_->LoadCurrentMethod(out);
3863 __ LoadFromOffset(kLoadWord, out, out, ArtMethod::DeclaringClassOffset().Int32Value());
3864 __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
3865 __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
3866 __ cmp(out, ShifterOperand(0));
3867 __ b(slow_path->GetEntryLabel(), EQ);
3868 __ Bind(slow_path->GetExitLabel());
3869 }
3870
VisitLoadException(HLoadException * load)3871 void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
3872 LocationSummary* locations =
3873 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
3874 locations->SetOut(Location::RequiresRegister());
3875 }
3876
VisitLoadException(HLoadException * load)3877 void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
3878 Register out = load->GetLocations()->Out().AsRegister<Register>();
3879 int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value();
3880 __ LoadFromOffset(kLoadWord, out, TR, offset);
3881 __ LoadImmediate(IP, 0);
3882 __ StoreToOffset(kStoreWord, IP, TR, offset);
3883 }
3884
VisitThrow(HThrow * instruction)3885 void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
3886 LocationSummary* locations =
3887 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3888 InvokeRuntimeCallingConvention calling_convention;
3889 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3890 }
3891
VisitThrow(HThrow * instruction)3892 void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
3893 codegen_->InvokeRuntime(
3894 QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
3895 }
3896
VisitInstanceOf(HInstanceOf * instruction)3897 void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
3898 LocationSummary::CallKind call_kind = instruction->IsClassFinal()
3899 ? LocationSummary::kNoCall
3900 : LocationSummary::kCallOnSlowPath;
3901 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
3902 locations->SetInAt(0, Location::RequiresRegister());
3903 locations->SetInAt(1, Location::RequiresRegister());
3904 // The out register is used as a temporary, so it overlaps with the inputs.
3905 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
3906 }
3907
VisitInstanceOf(HInstanceOf * instruction)3908 void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
3909 LocationSummary* locations = instruction->GetLocations();
3910 Register obj = locations->InAt(0).AsRegister<Register>();
3911 Register cls = locations->InAt(1).AsRegister<Register>();
3912 Register out = locations->Out().AsRegister<Register>();
3913 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
3914 Label done, zero;
3915 SlowPathCodeARM* slow_path = nullptr;
3916
3917 // Return 0 if `obj` is null.
3918 // avoid null check if we know obj is not null.
3919 if (instruction->MustDoNullCheck()) {
3920 __ cmp(obj, ShifterOperand(0));
3921 __ b(&zero, EQ);
3922 }
3923 // Compare the class of `obj` with `cls`.
3924 __ LoadFromOffset(kLoadWord, out, obj, class_offset);
3925 __ cmp(out, ShifterOperand(cls));
3926 if (instruction->IsClassFinal()) {
3927 // Classes must be equal for the instanceof to succeed.
3928 __ b(&zero, NE);
3929 __ LoadImmediate(out, 1);
3930 __ b(&done);
3931 } else {
3932 // If the classes are not equal, we go into a slow path.
3933 DCHECK(locations->OnlyCallsOnSlowPath());
3934 slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
3935 instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
3936 codegen_->AddSlowPath(slow_path);
3937 __ b(slow_path->GetEntryLabel(), NE);
3938 __ LoadImmediate(out, 1);
3939 __ b(&done);
3940 }
3941
3942 if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
3943 __ Bind(&zero);
3944 __ LoadImmediate(out, 0);
3945 }
3946
3947 if (slow_path != nullptr) {
3948 __ Bind(slow_path->GetExitLabel());
3949 }
3950 __ Bind(&done);
3951 }
3952
VisitCheckCast(HCheckCast * instruction)3953 void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
3954 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
3955 instruction, LocationSummary::kCallOnSlowPath);
3956 locations->SetInAt(0, Location::RequiresRegister());
3957 locations->SetInAt(1, Location::RequiresRegister());
3958 locations->AddTemp(Location::RequiresRegister());
3959 }
3960
VisitCheckCast(HCheckCast * instruction)3961 void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
3962 LocationSummary* locations = instruction->GetLocations();
3963 Register obj = locations->InAt(0).AsRegister<Register>();
3964 Register cls = locations->InAt(1).AsRegister<Register>();
3965 Register temp = locations->GetTemp(0).AsRegister<Register>();
3966 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
3967
3968 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
3969 instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
3970 codegen_->AddSlowPath(slow_path);
3971
3972 // avoid null check if we know obj is not null.
3973 if (instruction->MustDoNullCheck()) {
3974 __ cmp(obj, ShifterOperand(0));
3975 __ b(slow_path->GetExitLabel(), EQ);
3976 }
3977 // Compare the class of `obj` with `cls`.
3978 __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
3979 __ cmp(temp, ShifterOperand(cls));
3980 __ b(slow_path->GetEntryLabel(), NE);
3981 __ Bind(slow_path->GetExitLabel());
3982 }
3983
VisitMonitorOperation(HMonitorOperation * instruction)3984 void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
3985 LocationSummary* locations =
3986 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3987 InvokeRuntimeCallingConvention calling_convention;
3988 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3989 }
3990
VisitMonitorOperation(HMonitorOperation * instruction)3991 void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
3992 codegen_->InvokeRuntime(instruction->IsEnter()
3993 ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
3994 instruction,
3995 instruction->GetDexPc(),
3996 nullptr);
3997 }
3998
VisitAnd(HAnd * instruction)3999 void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
VisitOr(HOr * instruction)4000 void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
VisitXor(HXor * instruction)4001 void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
4002
HandleBitwiseOperation(HBinaryOperation * instruction)4003 void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4004 LocationSummary* locations =
4005 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4006 DCHECK(instruction->GetResultType() == Primitive::kPrimInt
4007 || instruction->GetResultType() == Primitive::kPrimLong);
4008 locations->SetInAt(0, Location::RequiresRegister());
4009 locations->SetInAt(1, Location::RequiresRegister());
4010 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4011 }
4012
VisitAnd(HAnd * instruction)4013 void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
4014 HandleBitwiseOperation(instruction);
4015 }
4016
VisitOr(HOr * instruction)4017 void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
4018 HandleBitwiseOperation(instruction);
4019 }
4020
VisitXor(HXor * instruction)4021 void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
4022 HandleBitwiseOperation(instruction);
4023 }
4024
HandleBitwiseOperation(HBinaryOperation * instruction)4025 void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4026 LocationSummary* locations = instruction->GetLocations();
4027
4028 if (instruction->GetResultType() == Primitive::kPrimInt) {
4029 Register first = locations->InAt(0).AsRegister<Register>();
4030 Register second = locations->InAt(1).AsRegister<Register>();
4031 Register out = locations->Out().AsRegister<Register>();
4032 if (instruction->IsAnd()) {
4033 __ and_(out, first, ShifterOperand(second));
4034 } else if (instruction->IsOr()) {
4035 __ orr(out, first, ShifterOperand(second));
4036 } else {
4037 DCHECK(instruction->IsXor());
4038 __ eor(out, first, ShifterOperand(second));
4039 }
4040 } else {
4041 DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
4042 Location first = locations->InAt(0);
4043 Location second = locations->InAt(1);
4044 Location out = locations->Out();
4045 if (instruction->IsAnd()) {
4046 __ and_(out.AsRegisterPairLow<Register>(),
4047 first.AsRegisterPairLow<Register>(),
4048 ShifterOperand(second.AsRegisterPairLow<Register>()));
4049 __ and_(out.AsRegisterPairHigh<Register>(),
4050 first.AsRegisterPairHigh<Register>(),
4051 ShifterOperand(second.AsRegisterPairHigh<Register>()));
4052 } else if (instruction->IsOr()) {
4053 __ orr(out.AsRegisterPairLow<Register>(),
4054 first.AsRegisterPairLow<Register>(),
4055 ShifterOperand(second.AsRegisterPairLow<Register>()));
4056 __ orr(out.AsRegisterPairHigh<Register>(),
4057 first.AsRegisterPairHigh<Register>(),
4058 ShifterOperand(second.AsRegisterPairHigh<Register>()));
4059 } else {
4060 DCHECK(instruction->IsXor());
4061 __ eor(out.AsRegisterPairLow<Register>(),
4062 first.AsRegisterPairLow<Register>(),
4063 ShifterOperand(second.AsRegisterPairLow<Register>()));
4064 __ eor(out.AsRegisterPairHigh<Register>(),
4065 first.AsRegisterPairHigh<Register>(),
4066 ShifterOperand(second.AsRegisterPairHigh<Register>()));
4067 }
4068 }
4069 }
4070
GenerateStaticOrDirectCall(HInvokeStaticOrDirect * invoke,Register temp)4071 void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) {
4072 DCHECK_EQ(temp, kArtMethodRegister);
4073
4074 // TODO: Implement all kinds of calls:
4075 // 1) boot -> boot
4076 // 2) app -> boot
4077 // 3) app -> app
4078 //
4079 // Currently we implement the app -> app logic, which looks up in the resolve cache.
4080
4081 if (invoke->IsStringInit()) {
4082 // temp = thread->string_init_entrypoint
4083 __ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset());
4084 // LR = temp[offset_of_quick_compiled_code]
4085 __ LoadFromOffset(kLoadWord, LR, temp,
4086 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4087 kArmWordSize).Int32Value());
4088 // LR()
4089 __ blx(LR);
4090 } else {
4091 // temp = method;
4092 LoadCurrentMethod(temp);
4093 if (!invoke->IsRecursive()) {
4094 // temp = temp->dex_cache_resolved_methods_;
4095 __ LoadFromOffset(
4096 kLoadWord, temp, temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
4097 // temp = temp[index_in_cache]
4098 __ LoadFromOffset(
4099 kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
4100 // LR = temp[offset_of_quick_compiled_code]
4101 __ LoadFromOffset(kLoadWord, LR, temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4102 kArmWordSize).Int32Value());
4103 // LR()
4104 __ blx(LR);
4105 } else {
4106 __ bl(GetFrameEntryLabel());
4107 }
4108 }
4109
4110 DCHECK(!IsLeafMethod());
4111 }
4112
VisitBoundType(HBoundType * instruction)4113 void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) {
4114 // Nothing to do, this should be removed during prepare for register allocator.
4115 UNUSED(instruction);
4116 LOG(FATAL) << "Unreachable";
4117 }
4118
VisitBoundType(HBoundType * instruction)4119 void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
4120 // Nothing to do, this should be removed during prepare for register allocator.
4121 UNUSED(instruction);
4122 LOG(FATAL) << "Unreachable";
4123 }
4124
4125 } // namespace arm
4126 } // namespace art
4127