1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "code_generator_mips64.h"
18
19 #include "entrypoints/quick/quick_entrypoints.h"
20 #include "entrypoints/quick/quick_entrypoints_enum.h"
21 #include "gc/accounting/card_table.h"
22 #include "intrinsics.h"
23 #include "art_method.h"
24 #include "mirror/array-inl.h"
25 #include "mirror/class-inl.h"
26 #include "offsets.h"
27 #include "thread.h"
28 #include "utils/mips64/assembler_mips64.h"
29 #include "utils/assembler.h"
30 #include "utils/stack_checks.h"
31
32 namespace art {
33 namespace mips64 {
34
35 static constexpr int kCurrentMethodStackOffset = 0;
36 static constexpr GpuRegister kMethodRegisterArgument = A0;
37
38 // We need extra temporary/scratch registers (in addition to AT) in some cases.
39 static constexpr GpuRegister TMP = T8;
40 static constexpr FpuRegister FTMP = F8;
41
42 // ART Thread Register.
43 static constexpr GpuRegister TR = S1;
44
Mips64ReturnLocation(Primitive::Type return_type)45 Location Mips64ReturnLocation(Primitive::Type return_type) {
46 switch (return_type) {
47 case Primitive::kPrimBoolean:
48 case Primitive::kPrimByte:
49 case Primitive::kPrimChar:
50 case Primitive::kPrimShort:
51 case Primitive::kPrimInt:
52 case Primitive::kPrimNot:
53 case Primitive::kPrimLong:
54 return Location::RegisterLocation(V0);
55
56 case Primitive::kPrimFloat:
57 case Primitive::kPrimDouble:
58 return Location::FpuRegisterLocation(F0);
59
60 case Primitive::kPrimVoid:
61 return Location();
62 }
63 UNREACHABLE();
64 }
65
GetReturnLocation(Primitive::Type type) const66 Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(Primitive::Type type) const {
67 return Mips64ReturnLocation(type);
68 }
69
GetNextLocation(Primitive::Type type)70 Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Type type) {
71 Location next_location;
72 if (type == Primitive::kPrimVoid) {
73 LOG(FATAL) << "Unexpected parameter type " << type;
74 }
75
76 if (Primitive::IsFloatingPointType(type) &&
77 (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
78 next_location = Location::FpuRegisterLocation(
79 calling_convention.GetFpuRegisterAt(float_index_++));
80 gp_index_++;
81 } else if (!Primitive::IsFloatingPointType(type) &&
82 (gp_index_ < calling_convention.GetNumberOfRegisters())) {
83 next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++));
84 float_index_++;
85 } else {
86 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
87 next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
88 : Location::StackSlot(stack_offset);
89 }
90
91 // Space on the stack is reserved for all arguments.
92 stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
93
94 // TODO: review
95
96 // TODO: shouldn't we use a whole machine word per argument on the stack?
97 // Implicit 4-byte method pointer (and such) will cause misalignment.
98
99 return next_location;
100 }
101
GetReturnLocation(Primitive::Type type)102 Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
103 return Mips64ReturnLocation(type);
104 }
105
106 #define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->
107 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
108
109 class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
110 public:
BoundsCheckSlowPathMIPS64(HBoundsCheck * instruction,Location index_location,Location length_location)111 BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction,
112 Location index_location,
113 Location length_location)
114 : instruction_(instruction),
115 index_location_(index_location),
116 length_location_(length_location) {}
117
EmitNativeCode(CodeGenerator * codegen)118 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
119 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
120 __ Bind(GetEntryLabel());
121 // We're moving two locations to locations that could overlap, so we need a parallel
122 // move resolver.
123 InvokeRuntimeCallingConvention calling_convention;
124 codegen->EmitParallelMoves(index_location_,
125 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
126 Primitive::kPrimInt,
127 length_location_,
128 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
129 Primitive::kPrimInt);
130 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
131 instruction_,
132 instruction_->GetDexPc(),
133 this);
134 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
135 }
136
137 private:
138 HBoundsCheck* const instruction_;
139 const Location index_location_;
140 const Location length_location_;
141
142 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
143 };
144
145 class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
146 public:
DivZeroCheckSlowPathMIPS64(HDivZeroCheck * instruction)147 explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : instruction_(instruction) {}
148
EmitNativeCode(CodeGenerator * codegen)149 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
150 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
151 __ Bind(GetEntryLabel());
152 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
153 instruction_,
154 instruction_->GetDexPc(),
155 this);
156 CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
157 }
158
159 private:
160 HDivZeroCheck* const instruction_;
161 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
162 };
163
164 class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
165 public:
LoadClassSlowPathMIPS64(HLoadClass * cls,HInstruction * at,uint32_t dex_pc,bool do_clinit)166 LoadClassSlowPathMIPS64(HLoadClass* cls,
167 HInstruction* at,
168 uint32_t dex_pc,
169 bool do_clinit)
170 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
171 DCHECK(at->IsLoadClass() || at->IsClinitCheck());
172 }
173
EmitNativeCode(CodeGenerator * codegen)174 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
175 LocationSummary* locations = at_->GetLocations();
176 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
177
178 __ Bind(GetEntryLabel());
179 SaveLiveRegisters(codegen, locations);
180
181 InvokeRuntimeCallingConvention calling_convention;
182 __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
183 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
184 : QUICK_ENTRY_POINT(pInitializeType);
185 mips64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
186 if (do_clinit_) {
187 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
188 } else {
189 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
190 }
191
192 // Move the class to the desired location.
193 Location out = locations->Out();
194 if (out.IsValid()) {
195 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
196 Primitive::Type type = at_->GetType();
197 mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
198 }
199
200 RestoreLiveRegisters(codegen, locations);
201 __ B(GetExitLabel());
202 }
203
204 private:
205 // The class this slow path will load.
206 HLoadClass* const cls_;
207
208 // The instruction where this slow path is happening.
209 // (Might be the load class or an initialization check).
210 HInstruction* const at_;
211
212 // The dex PC of `at_`.
213 const uint32_t dex_pc_;
214
215 // Whether to initialize the class.
216 const bool do_clinit_;
217
218 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
219 };
220
221 class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
222 public:
LoadStringSlowPathMIPS64(HLoadString * instruction)223 explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : instruction_(instruction) {}
224
EmitNativeCode(CodeGenerator * codegen)225 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
226 LocationSummary* locations = instruction_->GetLocations();
227 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
228 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
229
230 __ Bind(GetEntryLabel());
231 SaveLiveRegisters(codegen, locations);
232
233 InvokeRuntimeCallingConvention calling_convention;
234 __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
235 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
236 instruction_,
237 instruction_->GetDexPc(),
238 this);
239 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
240 Primitive::Type type = instruction_->GetType();
241 mips64_codegen->MoveLocation(locations->Out(),
242 calling_convention.GetReturnLocation(type),
243 type);
244
245 RestoreLiveRegisters(codegen, locations);
246 __ B(GetExitLabel());
247 }
248
249 private:
250 HLoadString* const instruction_;
251
252 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
253 };
254
255 class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
256 public:
NullCheckSlowPathMIPS64(HNullCheck * instr)257 explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : instruction_(instr) {}
258
EmitNativeCode(CodeGenerator * codegen)259 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
260 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
261 __ Bind(GetEntryLabel());
262 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
263 instruction_,
264 instruction_->GetDexPc(),
265 this);
266 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
267 }
268
269 private:
270 HNullCheck* const instruction_;
271
272 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
273 };
274
275 class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
276 public:
SuspendCheckSlowPathMIPS64(HSuspendCheck * instruction,HBasicBlock * successor)277 explicit SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction,
278 HBasicBlock* successor)
279 : instruction_(instruction), successor_(successor) {}
280
EmitNativeCode(CodeGenerator * codegen)281 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
282 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
283 __ Bind(GetEntryLabel());
284 SaveLiveRegisters(codegen, instruction_->GetLocations());
285 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
286 instruction_,
287 instruction_->GetDexPc(),
288 this);
289 CheckEntrypointTypes<kQuickTestSuspend, void, void>();
290 RestoreLiveRegisters(codegen, instruction_->GetLocations());
291 if (successor_ == nullptr) {
292 __ B(GetReturnLabel());
293 } else {
294 __ B(mips64_codegen->GetLabelOf(successor_));
295 }
296 }
297
GetReturnLabel()298 Label* GetReturnLabel() {
299 DCHECK(successor_ == nullptr);
300 return &return_label_;
301 }
302
303 private:
304 HSuspendCheck* const instruction_;
305 // If not null, the block to branch to after the suspend check.
306 HBasicBlock* const successor_;
307
308 // If `successor_` is null, the label to branch to after the suspend check.
309 Label return_label_;
310
311 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS64);
312 };
313
314 class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
315 public:
TypeCheckSlowPathMIPS64(HInstruction * instruction,Location class_to_check,Location object_class,uint32_t dex_pc)316 TypeCheckSlowPathMIPS64(HInstruction* instruction,
317 Location class_to_check,
318 Location object_class,
319 uint32_t dex_pc)
320 : instruction_(instruction),
321 class_to_check_(class_to_check),
322 object_class_(object_class),
323 dex_pc_(dex_pc) {}
324
EmitNativeCode(CodeGenerator * codegen)325 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
326 LocationSummary* locations = instruction_->GetLocations();
327 DCHECK(instruction_->IsCheckCast()
328 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
329 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
330
331 __ Bind(GetEntryLabel());
332 SaveLiveRegisters(codegen, locations);
333
334 // We're moving two locations to locations that could overlap, so we need a parallel
335 // move resolver.
336 InvokeRuntimeCallingConvention calling_convention;
337 codegen->EmitParallelMoves(class_to_check_,
338 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
339 Primitive::kPrimNot,
340 object_class_,
341 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
342 Primitive::kPrimNot);
343
344 if (instruction_->IsInstanceOf()) {
345 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
346 instruction_,
347 dex_pc_,
348 this);
349 Primitive::Type ret_type = instruction_->GetType();
350 Location ret_loc = calling_convention.GetReturnLocation(ret_type);
351 mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
352 CheckEntrypointTypes<kQuickInstanceofNonTrivial,
353 uint32_t,
354 const mirror::Class*,
355 const mirror::Class*>();
356 } else {
357 DCHECK(instruction_->IsCheckCast());
358 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
359 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
360 }
361
362 RestoreLiveRegisters(codegen, locations);
363 __ B(GetExitLabel());
364 }
365
366 private:
367 HInstruction* const instruction_;
368 const Location class_to_check_;
369 const Location object_class_;
370 uint32_t dex_pc_;
371
372 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
373 };
374
375 class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
376 public:
DeoptimizationSlowPathMIPS64(HInstruction * instruction)377 explicit DeoptimizationSlowPathMIPS64(HInstruction* instruction)
378 : instruction_(instruction) {}
379
EmitNativeCode(CodeGenerator * codegen)380 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
381 __ Bind(GetEntryLabel());
382 SaveLiveRegisters(codegen, instruction_->GetLocations());
383 DCHECK(instruction_->IsDeoptimize());
384 HDeoptimize* deoptimize = instruction_->AsDeoptimize();
385 uint32_t dex_pc = deoptimize->GetDexPc();
386 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
387 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
388 }
389
390 private:
391 HInstruction* const instruction_;
392 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
393 };
394
CodeGeneratorMIPS64(HGraph * graph,const Mips64InstructionSetFeatures & isa_features,const CompilerOptions & compiler_options)395 CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
396 const Mips64InstructionSetFeatures& isa_features,
397 const CompilerOptions& compiler_options)
398 : CodeGenerator(graph,
399 kNumberOfGpuRegisters,
400 kNumberOfFpuRegisters,
401 0, // kNumberOfRegisterPairs
402 ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
403 arraysize(kCoreCalleeSaves)),
404 ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
405 arraysize(kFpuCalleeSaves)),
406 compiler_options),
407 block_labels_(graph->GetArena(), 0),
408 location_builder_(graph, this),
409 instruction_visitor_(graph, this),
410 move_resolver_(graph->GetArena(), this),
411 isa_features_(isa_features) {
412 // Save RA (containing the return address) to mimic Quick.
413 AddAllocatedRegister(Location::RegisterLocation(RA));
414 }
415
416 #undef __
417 #define __ down_cast<Mips64Assembler*>(GetAssembler())->
418 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
419
Finalize(CodeAllocator * allocator)420 void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
421 CodeGenerator::Finalize(allocator);
422 }
423
GetAssembler() const424 Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
425 return codegen_->GetAssembler();
426 }
427
EmitMove(size_t index)428 void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
429 MoveOperands* move = moves_.Get(index);
430 codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
431 }
432
EmitSwap(size_t index)433 void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
434 MoveOperands* move = moves_.Get(index);
435 codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
436 }
437
RestoreScratch(int reg)438 void ParallelMoveResolverMIPS64::RestoreScratch(int reg) {
439 // Pop reg
440 __ Ld(GpuRegister(reg), SP, 0);
441 __ DecreaseFrameSize(kMips64WordSize);
442 }
443
SpillScratch(int reg)444 void ParallelMoveResolverMIPS64::SpillScratch(int reg) {
445 // Push reg
446 __ IncreaseFrameSize(kMips64WordSize);
447 __ Sd(GpuRegister(reg), SP, 0);
448 }
449
Exchange(int index1,int index2,bool double_slot)450 void ParallelMoveResolverMIPS64::Exchange(int index1, int index2, bool double_slot) {
451 LoadOperandType load_type = double_slot ? kLoadDoubleword : kLoadWord;
452 StoreOperandType store_type = double_slot ? kStoreDoubleword : kStoreWord;
453 // Allocate a scratch register other than TMP, if available.
454 // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
455 // automatically unspilled when the scratch scope object is destroyed).
456 ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
457 // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
458 int stack_offset = ensure_scratch.IsSpilled() ? kMips64WordSize : 0;
459 __ LoadFromOffset(load_type,
460 GpuRegister(ensure_scratch.GetRegister()),
461 SP,
462 index1 + stack_offset);
463 __ LoadFromOffset(load_type,
464 TMP,
465 SP,
466 index2 + stack_offset);
467 __ StoreToOffset(store_type,
468 GpuRegister(ensure_scratch.GetRegister()),
469 SP,
470 index2 + stack_offset);
471 __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
472 }
473
DWARFReg(GpuRegister reg)474 static dwarf::Reg DWARFReg(GpuRegister reg) {
475 return dwarf::Reg::Mips64Core(static_cast<int>(reg));
476 }
477
478 // TODO: mapping of floating-point registers to DWARF
479
GenerateFrameEntry()480 void CodeGeneratorMIPS64::GenerateFrameEntry() {
481 __ Bind(&frame_entry_label_);
482
483 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
484
485 if (do_overflow_check) {
486 __ LoadFromOffset(kLoadWord,
487 ZERO,
488 SP,
489 -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
490 RecordPcInfo(nullptr, 0);
491 }
492
493 // TODO: anything related to T9/GP/GOT/PIC/.so's?
494
495 if (HasEmptyFrame()) {
496 return;
497 }
498
499 // Make sure the frame size isn't unreasonably large. Per the various APIs
500 // it looks like it should always be less than 2GB in size, which allows
501 // us using 32-bit signed offsets from the stack pointer.
502 if (GetFrameSize() > 0x7FFFFFFF)
503 LOG(FATAL) << "Stack frame larger than 2GB";
504
505 // Spill callee-saved registers.
506 // Note that their cumulative size is small and they can be indexed using
507 // 16-bit offsets.
508
509 // TODO: increment/decrement SP in one step instead of two or remove this comment.
510
511 uint32_t ofs = FrameEntrySpillSize();
512 __ IncreaseFrameSize(ofs);
513
514 for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
515 GpuRegister reg = kCoreCalleeSaves[i];
516 if (allocated_registers_.ContainsCoreRegister(reg)) {
517 ofs -= kMips64WordSize;
518 __ Sd(reg, SP, ofs);
519 __ cfi().RelOffset(DWARFReg(reg), ofs);
520 }
521 }
522
523 for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
524 FpuRegister reg = kFpuCalleeSaves[i];
525 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
526 ofs -= kMips64WordSize;
527 __ Sdc1(reg, SP, ofs);
528 // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
529 }
530 }
531
532 // Allocate the rest of the frame and store the current method pointer
533 // at its end.
534
535 __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
536
537 static_assert(IsInt<16>(kCurrentMethodStackOffset),
538 "kCurrentMethodStackOffset must fit into int16_t");
539 __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
540 }
541
GenerateFrameExit()542 void CodeGeneratorMIPS64::GenerateFrameExit() {
543 __ cfi().RememberState();
544
545 // TODO: anything related to T9/GP/GOT/PIC/.so's?
546
547 if (!HasEmptyFrame()) {
548 // Deallocate the rest of the frame.
549
550 __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
551
552 // Restore callee-saved registers.
553 // Note that their cumulative size is small and they can be indexed using
554 // 16-bit offsets.
555
556 // TODO: increment/decrement SP in one step instead of two or remove this comment.
557
558 uint32_t ofs = 0;
559
560 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
561 FpuRegister reg = kFpuCalleeSaves[i];
562 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
563 __ Ldc1(reg, SP, ofs);
564 ofs += kMips64WordSize;
565 // TODO: __ cfi().Restore(DWARFReg(reg));
566 }
567 }
568
569 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
570 GpuRegister reg = kCoreCalleeSaves[i];
571 if (allocated_registers_.ContainsCoreRegister(reg)) {
572 __ Ld(reg, SP, ofs);
573 ofs += kMips64WordSize;
574 __ cfi().Restore(DWARFReg(reg));
575 }
576 }
577
578 DCHECK_EQ(ofs, FrameEntrySpillSize());
579 __ DecreaseFrameSize(ofs);
580 }
581
582 __ Jr(RA);
583
584 __ cfi().RestoreState();
585 __ cfi().DefCFAOffset(GetFrameSize());
586 }
587
Bind(HBasicBlock * block)588 void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
589 __ Bind(GetLabelOf(block));
590 }
591
MoveLocation(Location destination,Location source,Primitive::Type type)592 void CodeGeneratorMIPS64::MoveLocation(Location destination,
593 Location source,
594 Primitive::Type type) {
595 if (source.Equals(destination)) {
596 return;
597 }
598
599 // A valid move can always be inferred from the destination and source
600 // locations. When moving from and to a register, the argument type can be
601 // used to generate 32bit instead of 64bit moves.
602 bool unspecified_type = (type == Primitive::kPrimVoid);
603 DCHECK_EQ(unspecified_type, false);
604
605 if (destination.IsRegister() || destination.IsFpuRegister()) {
606 if (unspecified_type) {
607 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
608 if (source.IsStackSlot() ||
609 (src_cst != nullptr && (src_cst->IsIntConstant()
610 || src_cst->IsFloatConstant()
611 || src_cst->IsNullConstant()))) {
612 // For stack slots and 32bit constants, a 64bit type is appropriate.
613 type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
614 } else {
615 // If the source is a double stack slot or a 64bit constant, a 64bit
616 // type is appropriate. Else the source is a register, and since the
617 // type has not been specified, we chose a 64bit type to force a 64bit
618 // move.
619 type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
620 }
621 }
622 DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
623 (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
624 if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
625 // Move to GPR/FPR from stack
626 LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
627 if (Primitive::IsFloatingPointType(type)) {
628 __ LoadFpuFromOffset(load_type,
629 destination.AsFpuRegister<FpuRegister>(),
630 SP,
631 source.GetStackIndex());
632 } else {
633 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
634 __ LoadFromOffset(load_type,
635 destination.AsRegister<GpuRegister>(),
636 SP,
637 source.GetStackIndex());
638 }
639 } else if (source.IsConstant()) {
640 // Move to GPR/FPR from constant
641 GpuRegister gpr = AT;
642 if (!Primitive::IsFloatingPointType(type)) {
643 gpr = destination.AsRegister<GpuRegister>();
644 }
645 if (type == Primitive::kPrimInt || type == Primitive::kPrimFloat) {
646 __ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant()));
647 } else {
648 __ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant()));
649 }
650 if (type == Primitive::kPrimFloat) {
651 __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
652 } else if (type == Primitive::kPrimDouble) {
653 __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
654 }
655 } else {
656 if (destination.IsRegister()) {
657 // Move to GPR from GPR
658 __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
659 } else {
660 // Move to FPR from FPR
661 if (type == Primitive::kPrimFloat) {
662 __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
663 } else {
664 DCHECK_EQ(type, Primitive::kPrimDouble);
665 __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
666 }
667 }
668 }
669 } else { // The destination is not a register. It must be a stack slot.
670 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
671 if (source.IsRegister() || source.IsFpuRegister()) {
672 if (unspecified_type) {
673 if (source.IsRegister()) {
674 type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
675 } else {
676 type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
677 }
678 }
679 DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
680 (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
681 // Move to stack from GPR/FPR
682 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
683 if (source.IsRegister()) {
684 __ StoreToOffset(store_type,
685 source.AsRegister<GpuRegister>(),
686 SP,
687 destination.GetStackIndex());
688 } else {
689 __ StoreFpuToOffset(store_type,
690 source.AsFpuRegister<FpuRegister>(),
691 SP,
692 destination.GetStackIndex());
693 }
694 } else if (source.IsConstant()) {
695 // Move to stack from constant
696 HConstant* src_cst = source.GetConstant();
697 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
698 if (destination.IsStackSlot()) {
699 __ LoadConst32(TMP, GetInt32ValueOf(src_cst->AsConstant()));
700 } else {
701 __ LoadConst64(TMP, GetInt64ValueOf(src_cst->AsConstant()));
702 }
703 __ StoreToOffset(store_type, TMP, SP, destination.GetStackIndex());
704 } else {
705 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
706 DCHECK_EQ(source.IsDoubleStackSlot(), destination.IsDoubleStackSlot());
707 // Move to stack from stack
708 if (destination.IsStackSlot()) {
709 __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
710 __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
711 } else {
712 __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
713 __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
714 }
715 }
716 }
717 }
718
SwapLocations(Location loc1,Location loc2,Primitive::Type type ATTRIBUTE_UNUSED)719 void CodeGeneratorMIPS64::SwapLocations(Location loc1,
720 Location loc2,
721 Primitive::Type type ATTRIBUTE_UNUSED) {
722 DCHECK(!loc1.IsConstant());
723 DCHECK(!loc2.IsConstant());
724
725 if (loc1.Equals(loc2)) {
726 return;
727 }
728
729 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
730 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
731 bool is_fp_reg1 = loc1.IsFpuRegister();
732 bool is_fp_reg2 = loc2.IsFpuRegister();
733
734 if (loc2.IsRegister() && loc1.IsRegister()) {
735 // Swap 2 GPRs
736 GpuRegister r1 = loc1.AsRegister<GpuRegister>();
737 GpuRegister r2 = loc2.AsRegister<GpuRegister>();
738 __ Move(TMP, r2);
739 __ Move(r2, r1);
740 __ Move(r1, TMP);
741 } else if (is_fp_reg2 && is_fp_reg1) {
742 // Swap 2 FPRs
743 FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>();
744 FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>();
745 // TODO: Can MOV.S/MOV.D be used here to save one instruction?
746 // Need to distinguish float from double, right?
747 __ Dmfc1(TMP, r2);
748 __ Dmfc1(AT, r1);
749 __ Dmtc1(TMP, r1);
750 __ Dmtc1(AT, r2);
751 } else if (is_slot1 != is_slot2) {
752 // Swap GPR/FPR and stack slot
753 Location reg_loc = is_slot1 ? loc2 : loc1;
754 Location mem_loc = is_slot1 ? loc1 : loc2;
755 LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword;
756 StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword;
757 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
758 __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
759 if (reg_loc.IsFpuRegister()) {
760 __ StoreFpuToOffset(store_type,
761 reg_loc.AsFpuRegister<FpuRegister>(),
762 SP,
763 mem_loc.GetStackIndex());
764 // TODO: review this MTC1/DMTC1 move
765 if (mem_loc.IsStackSlot()) {
766 __ Mtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
767 } else {
768 DCHECK(mem_loc.IsDoubleStackSlot());
769 __ Dmtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
770 }
771 } else {
772 __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
773 __ Move(reg_loc.AsRegister<GpuRegister>(), TMP);
774 }
775 } else if (is_slot1 && is_slot2) {
776 move_resolver_.Exchange(loc1.GetStackIndex(),
777 loc2.GetStackIndex(),
778 loc1.IsDoubleStackSlot());
779 } else {
780 LOG(FATAL) << "Unimplemented swap between locations " << loc1 << " and " << loc2;
781 }
782 }
783
Move(HInstruction * instruction,Location location,HInstruction * move_for)784 void CodeGeneratorMIPS64::Move(HInstruction* instruction,
785 Location location,
786 HInstruction* move_for) {
787 LocationSummary* locations = instruction->GetLocations();
788 if (locations != nullptr && locations->Out().Equals(location)) {
789 return;
790 }
791
792 Primitive::Type type = instruction->GetType();
793 DCHECK_NE(type, Primitive::kPrimVoid);
794
795 if (instruction->IsIntConstant()
796 || instruction->IsLongConstant()
797 || instruction->IsNullConstant()) {
798 if (location.IsRegister()) {
799 // Move to GPR from constant
800 GpuRegister dst = location.AsRegister<GpuRegister>();
801 if (instruction->IsNullConstant() || instruction->IsIntConstant()) {
802 __ LoadConst32(dst, GetInt32ValueOf(instruction->AsConstant()));
803 } else {
804 __ LoadConst64(dst, instruction->AsLongConstant()->GetValue());
805 }
806 } else {
807 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
808 // Move to stack from constant
809 if (location.IsStackSlot()) {
810 __ LoadConst32(TMP, GetInt32ValueOf(instruction->AsConstant()));
811 __ StoreToOffset(kStoreWord, TMP, SP, location.GetStackIndex());
812 } else {
813 __ LoadConst64(TMP, instruction->AsLongConstant()->GetValue());
814 __ StoreToOffset(kStoreDoubleword, TMP, SP, location.GetStackIndex());
815 }
816 }
817 } else if (instruction->IsTemporary()) {
818 Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
819 MoveLocation(location, temp_location, type);
820 } else if (instruction->IsLoadLocal()) {
821 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
822 if (Primitive::Is64BitType(type)) {
823 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
824 } else {
825 MoveLocation(location, Location::StackSlot(stack_slot), type);
826 }
827 } else {
828 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
829 MoveLocation(location, locations->Out(), type);
830 }
831 }
832
GetStackLocation(HLoadLocal * load) const833 Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
834 Primitive::Type type = load->GetType();
835
836 switch (type) {
837 case Primitive::kPrimNot:
838 case Primitive::kPrimInt:
839 case Primitive::kPrimFloat:
840 return Location::StackSlot(GetStackSlot(load->GetLocal()));
841
842 case Primitive::kPrimLong:
843 case Primitive::kPrimDouble:
844 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
845
846 case Primitive::kPrimBoolean:
847 case Primitive::kPrimByte:
848 case Primitive::kPrimChar:
849 case Primitive::kPrimShort:
850 case Primitive::kPrimVoid:
851 LOG(FATAL) << "Unexpected type " << type;
852 }
853
854 LOG(FATAL) << "Unreachable";
855 return Location::NoLocation();
856 }
857
MarkGCCard(GpuRegister object,GpuRegister value)858 void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object, GpuRegister value) {
859 Label done;
860 GpuRegister card = AT;
861 GpuRegister temp = TMP;
862 __ Beqzc(value, &done);
863 __ LoadFromOffset(kLoadDoubleword,
864 card,
865 TR,
866 Thread::CardTableOffset<kMips64WordSize>().Int32Value());
867 __ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
868 __ Daddu(temp, card, temp);
869 __ Sb(card, temp, 0);
870 __ Bind(&done);
871 }
872
SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const873 void CodeGeneratorMIPS64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
874 // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
875 blocked_core_registers_[ZERO] = true;
876 blocked_core_registers_[K0] = true;
877 blocked_core_registers_[K1] = true;
878 blocked_core_registers_[GP] = true;
879 blocked_core_registers_[SP] = true;
880 blocked_core_registers_[RA] = true;
881
882 // AT and TMP(T8) are used as temporary/scratch registers
883 // (similar to how AT is used by MIPS assemblers).
884 blocked_core_registers_[AT] = true;
885 blocked_core_registers_[TMP] = true;
886 blocked_fpu_registers_[FTMP] = true;
887
888 // Reserve suspend and thread registers.
889 blocked_core_registers_[S0] = true;
890 blocked_core_registers_[TR] = true;
891
892 // Reserve T9 for function calls
893 blocked_core_registers_[T9] = true;
894
895 // TODO: review; anything else?
896
897 // TODO: make these two for's conditional on is_baseline once
898 // all the issues with register saving/restoring are sorted out.
899 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
900 blocked_core_registers_[kCoreCalleeSaves[i]] = true;
901 }
902
903 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
904 blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
905 }
906 }
907
AllocateFreeRegister(Primitive::Type type) const908 Location CodeGeneratorMIPS64::AllocateFreeRegister(Primitive::Type type) const {
909 if (type == Primitive::kPrimVoid) {
910 LOG(FATAL) << "Unreachable type " << type;
911 }
912
913 if (Primitive::IsFloatingPointType(type)) {
914 size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFpuRegisters);
915 return Location::FpuRegisterLocation(reg);
916 } else {
917 size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfGpuRegisters);
918 return Location::RegisterLocation(reg);
919 }
920 }
921
SaveCoreRegister(size_t stack_index,uint32_t reg_id)922 size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
923 __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
924 return kMips64WordSize;
925 }
926
RestoreCoreRegister(size_t stack_index,uint32_t reg_id)927 size_t CodeGeneratorMIPS64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
928 __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
929 return kMips64WordSize;
930 }
931
SaveFloatingPointRegister(size_t stack_index,uint32_t reg_id)932 size_t CodeGeneratorMIPS64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
933 __ StoreFpuToOffset(kStoreDoubleword, FpuRegister(reg_id), SP, stack_index);
934 return kMips64WordSize;
935 }
936
RestoreFloatingPointRegister(size_t stack_index,uint32_t reg_id)937 size_t CodeGeneratorMIPS64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
938 __ LoadFpuFromOffset(kLoadDoubleword, FpuRegister(reg_id), SP, stack_index);
939 return kMips64WordSize;
940 }
941
DumpCoreRegister(std::ostream & stream,int reg) const942 void CodeGeneratorMIPS64::DumpCoreRegister(std::ostream& stream, int reg) const {
943 stream << Mips64ManagedRegister::FromGpuRegister(GpuRegister(reg));
944 }
945
DumpFloatingPointRegister(std::ostream & stream,int reg) const946 void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
947 stream << Mips64ManagedRegister::FromFpuRegister(FpuRegister(reg));
948 }
949
LoadCurrentMethod(GpuRegister current_method)950 void CodeGeneratorMIPS64::LoadCurrentMethod(GpuRegister current_method) {
951 DCHECK(RequiresCurrentMethod());
952 __ Ld(current_method, SP, kCurrentMethodStackOffset);
953 }
954
InvokeRuntime(int32_t entry_point_offset,HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)955 void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset,
956 HInstruction* instruction,
957 uint32_t dex_pc,
958 SlowPathCode* slow_path) {
959 // TODO: anything related to T9/GP/GOT/PIC/.so's?
960 __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
961 __ Jalr(T9);
962 RecordPcInfo(instruction, dex_pc, slow_path);
963 DCHECK(instruction->IsSuspendCheck()
964 || instruction->IsBoundsCheck()
965 || instruction->IsNullCheck()
966 || instruction->IsDivZeroCheck()
967 || !IsLeafMethod());
968 }
969
GenerateClassInitializationCheck(SlowPathCodeMIPS64 * slow_path,GpuRegister class_reg)970 void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
971 GpuRegister class_reg) {
972 __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
973 __ LoadConst32(AT, mirror::Class::kStatusInitialized);
974 __ Bltc(TMP, AT, slow_path->GetEntryLabel());
975 // TODO: barrier needed?
976 __ Bind(slow_path->GetExitLabel());
977 }
978
GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED)979 void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
980 __ Sync(0); // only stype 0 is supported
981 }
982
GenerateSuspendCheck(HSuspendCheck * instruction,HBasicBlock * successor)983 void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
984 HBasicBlock* successor) {
985 SuspendCheckSlowPathMIPS64* slow_path =
986 new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
987 codegen_->AddSlowPath(slow_path);
988
989 __ LoadFromOffset(kLoadUnsignedHalfword,
990 TMP,
991 TR,
992 Thread::ThreadFlagsOffset<kMips64WordSize>().Int32Value());
993 if (successor == nullptr) {
994 __ Bnezc(TMP, slow_path->GetEntryLabel());
995 __ Bind(slow_path->GetReturnLabel());
996 } else {
997 __ Beqzc(TMP, codegen_->GetLabelOf(successor));
998 __ B(slow_path->GetEntryLabel());
999 // slow_path will return to GetLabelOf(successor).
1000 }
1001 }
1002
InstructionCodeGeneratorMIPS64(HGraph * graph,CodeGeneratorMIPS64 * codegen)1003 InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
1004 CodeGeneratorMIPS64* codegen)
1005 : HGraphVisitor(graph),
1006 assembler_(codegen->GetAssembler()),
1007 codegen_(codegen) {}
1008
HandleBinaryOp(HBinaryOperation * instruction)1009 void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1010 DCHECK_EQ(instruction->InputCount(), 2U);
1011 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1012 Primitive::Type type = instruction->GetResultType();
1013 switch (type) {
1014 case Primitive::kPrimInt:
1015 case Primitive::kPrimLong: {
1016 locations->SetInAt(0, Location::RequiresRegister());
1017 HInstruction* right = instruction->InputAt(1);
1018 bool can_use_imm = false;
1019 if (right->IsConstant()) {
1020 int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant());
1021 if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
1022 can_use_imm = IsUint<16>(imm);
1023 } else if (instruction->IsAdd()) {
1024 can_use_imm = IsInt<16>(imm);
1025 } else {
1026 DCHECK(instruction->IsSub());
1027 can_use_imm = IsInt<16>(-imm);
1028 }
1029 }
1030 if (can_use_imm)
1031 locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
1032 else
1033 locations->SetInAt(1, Location::RequiresRegister());
1034 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1035 }
1036 break;
1037
1038 case Primitive::kPrimFloat:
1039 case Primitive::kPrimDouble:
1040 locations->SetInAt(0, Location::RequiresFpuRegister());
1041 locations->SetInAt(1, Location::RequiresFpuRegister());
1042 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1043 break;
1044
1045 default:
1046 LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
1047 }
1048 }
1049
HandleBinaryOp(HBinaryOperation * instruction)1050 void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1051 Primitive::Type type = instruction->GetType();
1052 LocationSummary* locations = instruction->GetLocations();
1053
1054 switch (type) {
1055 case Primitive::kPrimInt:
1056 case Primitive::kPrimLong: {
1057 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1058 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1059 Location rhs_location = locations->InAt(1);
1060
1061 GpuRegister rhs_reg = ZERO;
1062 int64_t rhs_imm = 0;
1063 bool use_imm = rhs_location.IsConstant();
1064 if (use_imm) {
1065 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1066 } else {
1067 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1068 }
1069
1070 if (instruction->IsAnd()) {
1071 if (use_imm)
1072 __ Andi(dst, lhs, rhs_imm);
1073 else
1074 __ And(dst, lhs, rhs_reg);
1075 } else if (instruction->IsOr()) {
1076 if (use_imm)
1077 __ Ori(dst, lhs, rhs_imm);
1078 else
1079 __ Or(dst, lhs, rhs_reg);
1080 } else if (instruction->IsXor()) {
1081 if (use_imm)
1082 __ Xori(dst, lhs, rhs_imm);
1083 else
1084 __ Xor(dst, lhs, rhs_reg);
1085 } else if (instruction->IsAdd()) {
1086 if (type == Primitive::kPrimInt) {
1087 if (use_imm)
1088 __ Addiu(dst, lhs, rhs_imm);
1089 else
1090 __ Addu(dst, lhs, rhs_reg);
1091 } else {
1092 if (use_imm)
1093 __ Daddiu(dst, lhs, rhs_imm);
1094 else
1095 __ Daddu(dst, lhs, rhs_reg);
1096 }
1097 } else {
1098 DCHECK(instruction->IsSub());
1099 if (type == Primitive::kPrimInt) {
1100 if (use_imm)
1101 __ Addiu(dst, lhs, -rhs_imm);
1102 else
1103 __ Subu(dst, lhs, rhs_reg);
1104 } else {
1105 if (use_imm)
1106 __ Daddiu(dst, lhs, -rhs_imm);
1107 else
1108 __ Dsubu(dst, lhs, rhs_reg);
1109 }
1110 }
1111 break;
1112 }
1113 case Primitive::kPrimFloat:
1114 case Primitive::kPrimDouble: {
1115 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1116 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1117 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1118 if (instruction->IsAdd()) {
1119 if (type == Primitive::kPrimFloat)
1120 __ AddS(dst, lhs, rhs);
1121 else
1122 __ AddD(dst, lhs, rhs);
1123 } else if (instruction->IsSub()) {
1124 if (type == Primitive::kPrimFloat)
1125 __ SubS(dst, lhs, rhs);
1126 else
1127 __ SubD(dst, lhs, rhs);
1128 } else {
1129 LOG(FATAL) << "Unexpected floating-point binary operation";
1130 }
1131 break;
1132 }
1133 default:
1134 LOG(FATAL) << "Unexpected binary operation type " << type;
1135 }
1136 }
1137
HandleShift(HBinaryOperation * instr)1138 void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
1139 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1140
1141 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1142 Primitive::Type type = instr->GetResultType();
1143 switch (type) {
1144 case Primitive::kPrimInt:
1145 case Primitive::kPrimLong: {
1146 locations->SetInAt(0, Location::RequiresRegister());
1147 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1148 locations->SetOut(Location::RequiresRegister());
1149 break;
1150 }
1151 default:
1152 LOG(FATAL) << "Unexpected shift type " << type;
1153 }
1154 }
1155
HandleShift(HBinaryOperation * instr)1156 void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
1157 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1158 LocationSummary* locations = instr->GetLocations();
1159 Primitive::Type type = instr->GetType();
1160
1161 switch (type) {
1162 case Primitive::kPrimInt:
1163 case Primitive::kPrimLong: {
1164 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1165 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1166 Location rhs_location = locations->InAt(1);
1167
1168 GpuRegister rhs_reg = ZERO;
1169 int64_t rhs_imm = 0;
1170 bool use_imm = rhs_location.IsConstant();
1171 if (use_imm) {
1172 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1173 } else {
1174 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1175 }
1176
1177 if (use_imm) {
1178 uint32_t shift_value = (type == Primitive::kPrimInt)
1179 ? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
1180 : static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
1181
1182 if (type == Primitive::kPrimInt) {
1183 if (instr->IsShl()) {
1184 __ Sll(dst, lhs, shift_value);
1185 } else if (instr->IsShr()) {
1186 __ Sra(dst, lhs, shift_value);
1187 } else {
1188 __ Srl(dst, lhs, shift_value);
1189 }
1190 } else {
1191 if (shift_value < 32) {
1192 if (instr->IsShl()) {
1193 __ Dsll(dst, lhs, shift_value);
1194 } else if (instr->IsShr()) {
1195 __ Dsra(dst, lhs, shift_value);
1196 } else {
1197 __ Dsrl(dst, lhs, shift_value);
1198 }
1199 } else {
1200 shift_value -= 32;
1201 if (instr->IsShl()) {
1202 __ Dsll32(dst, lhs, shift_value);
1203 } else if (instr->IsShr()) {
1204 __ Dsra32(dst, lhs, shift_value);
1205 } else {
1206 __ Dsrl32(dst, lhs, shift_value);
1207 }
1208 }
1209 }
1210 } else {
1211 if (type == Primitive::kPrimInt) {
1212 if (instr->IsShl()) {
1213 __ Sllv(dst, lhs, rhs_reg);
1214 } else if (instr->IsShr()) {
1215 __ Srav(dst, lhs, rhs_reg);
1216 } else {
1217 __ Srlv(dst, lhs, rhs_reg);
1218 }
1219 } else {
1220 if (instr->IsShl()) {
1221 __ Dsllv(dst, lhs, rhs_reg);
1222 } else if (instr->IsShr()) {
1223 __ Dsrav(dst, lhs, rhs_reg);
1224 } else {
1225 __ Dsrlv(dst, lhs, rhs_reg);
1226 }
1227 }
1228 }
1229 break;
1230 }
1231 default:
1232 LOG(FATAL) << "Unexpected shift operation type " << type;
1233 }
1234 }
1235
VisitAdd(HAdd * instruction)1236 void LocationsBuilderMIPS64::VisitAdd(HAdd* instruction) {
1237 HandleBinaryOp(instruction);
1238 }
1239
VisitAdd(HAdd * instruction)1240 void InstructionCodeGeneratorMIPS64::VisitAdd(HAdd* instruction) {
1241 HandleBinaryOp(instruction);
1242 }
1243
VisitAnd(HAnd * instruction)1244 void LocationsBuilderMIPS64::VisitAnd(HAnd* instruction) {
1245 HandleBinaryOp(instruction);
1246 }
1247
VisitAnd(HAnd * instruction)1248 void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) {
1249 HandleBinaryOp(instruction);
1250 }
1251
VisitArrayGet(HArrayGet * instruction)1252 void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
1253 LocationSummary* locations =
1254 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1255 locations->SetInAt(0, Location::RequiresRegister());
1256 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1257 if (Primitive::IsFloatingPointType(instruction->GetType())) {
1258 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1259 } else {
1260 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1261 }
1262 }
1263
VisitArrayGet(HArrayGet * instruction)1264 void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
1265 LocationSummary* locations = instruction->GetLocations();
1266 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1267 Location index = locations->InAt(1);
1268 Primitive::Type type = instruction->GetType();
1269
1270 switch (type) {
1271 case Primitive::kPrimBoolean: {
1272 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1273 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1274 if (index.IsConstant()) {
1275 size_t offset =
1276 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1277 __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
1278 } else {
1279 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1280 __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
1281 }
1282 break;
1283 }
1284
1285 case Primitive::kPrimByte: {
1286 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
1287 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1288 if (index.IsConstant()) {
1289 size_t offset =
1290 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1291 __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
1292 } else {
1293 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1294 __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
1295 }
1296 break;
1297 }
1298
1299 case Primitive::kPrimShort: {
1300 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
1301 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1302 if (index.IsConstant()) {
1303 size_t offset =
1304 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1305 __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
1306 } else {
1307 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1308 __ Daddu(TMP, obj, TMP);
1309 __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
1310 }
1311 break;
1312 }
1313
1314 case Primitive::kPrimChar: {
1315 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1316 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1317 if (index.IsConstant()) {
1318 size_t offset =
1319 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1320 __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
1321 } else {
1322 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1323 __ Daddu(TMP, obj, TMP);
1324 __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
1325 }
1326 break;
1327 }
1328
1329 case Primitive::kPrimInt:
1330 case Primitive::kPrimNot: {
1331 DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
1332 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1333 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1334 LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
1335 if (index.IsConstant()) {
1336 size_t offset =
1337 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1338 __ LoadFromOffset(load_type, out, obj, offset);
1339 } else {
1340 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1341 __ Daddu(TMP, obj, TMP);
1342 __ LoadFromOffset(load_type, out, TMP, data_offset);
1343 }
1344 break;
1345 }
1346
1347 case Primitive::kPrimLong: {
1348 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1349 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1350 if (index.IsConstant()) {
1351 size_t offset =
1352 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1353 __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
1354 } else {
1355 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1356 __ Daddu(TMP, obj, TMP);
1357 __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
1358 }
1359 break;
1360 }
1361
1362 case Primitive::kPrimFloat: {
1363 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1364 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1365 if (index.IsConstant()) {
1366 size_t offset =
1367 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1368 __ LoadFpuFromOffset(kLoadWord, out, obj, offset);
1369 } else {
1370 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1371 __ Daddu(TMP, obj, TMP);
1372 __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset);
1373 }
1374 break;
1375 }
1376
1377 case Primitive::kPrimDouble: {
1378 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1379 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1380 if (index.IsConstant()) {
1381 size_t offset =
1382 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1383 __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset);
1384 } else {
1385 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1386 __ Daddu(TMP, obj, TMP);
1387 __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset);
1388 }
1389 break;
1390 }
1391
1392 case Primitive::kPrimVoid:
1393 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1394 UNREACHABLE();
1395 }
1396 codegen_->MaybeRecordImplicitNullCheck(instruction);
1397 }
1398
VisitArrayLength(HArrayLength * instruction)1399 void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
1400 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1401 locations->SetInAt(0, Location::RequiresRegister());
1402 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1403 }
1404
VisitArrayLength(HArrayLength * instruction)1405 void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
1406 LocationSummary* locations = instruction->GetLocations();
1407 uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
1408 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1409 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1410 __ LoadFromOffset(kLoadWord, out, obj, offset);
1411 codegen_->MaybeRecordImplicitNullCheck(instruction);
1412 }
1413
VisitArraySet(HArraySet * instruction)1414 void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
1415 Primitive::Type value_type = instruction->GetComponentType();
1416 bool is_object = value_type == Primitive::kPrimNot;
1417 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1418 instruction,
1419 is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
1420 if (is_object) {
1421 InvokeRuntimeCallingConvention calling_convention;
1422 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1423 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
1424 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
1425 } else {
1426 locations->SetInAt(0, Location::RequiresRegister());
1427 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1428 if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1429 locations->SetInAt(2, Location::RequiresFpuRegister());
1430 } else {
1431 locations->SetInAt(2, Location::RequiresRegister());
1432 }
1433 }
1434 }
1435
VisitArraySet(HArraySet * instruction)1436 void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
1437 LocationSummary* locations = instruction->GetLocations();
1438 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1439 Location index = locations->InAt(1);
1440 Primitive::Type value_type = instruction->GetComponentType();
1441 bool needs_runtime_call = locations->WillCall();
1442 bool needs_write_barrier =
1443 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
1444
1445 switch (value_type) {
1446 case Primitive::kPrimBoolean:
1447 case Primitive::kPrimByte: {
1448 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1449 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1450 if (index.IsConstant()) {
1451 size_t offset =
1452 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1453 __ StoreToOffset(kStoreByte, value, obj, offset);
1454 } else {
1455 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1456 __ StoreToOffset(kStoreByte, value, TMP, data_offset);
1457 }
1458 break;
1459 }
1460
1461 case Primitive::kPrimShort:
1462 case Primitive::kPrimChar: {
1463 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1464 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1465 if (index.IsConstant()) {
1466 size_t offset =
1467 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1468 __ StoreToOffset(kStoreHalfword, value, obj, offset);
1469 } else {
1470 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1471 __ Daddu(TMP, obj, TMP);
1472 __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
1473 }
1474 break;
1475 }
1476
1477 case Primitive::kPrimInt:
1478 case Primitive::kPrimNot: {
1479 if (!needs_runtime_call) {
1480 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1481 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1482 if (index.IsConstant()) {
1483 size_t offset =
1484 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1485 __ StoreToOffset(kStoreWord, value, obj, offset);
1486 } else {
1487 DCHECK(index.IsRegister()) << index;
1488 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1489 __ Daddu(TMP, obj, TMP);
1490 __ StoreToOffset(kStoreWord, value, TMP, data_offset);
1491 }
1492 codegen_->MaybeRecordImplicitNullCheck(instruction);
1493 if (needs_write_barrier) {
1494 DCHECK_EQ(value_type, Primitive::kPrimNot);
1495 codegen_->MarkGCCard(obj, value);
1496 }
1497 } else {
1498 DCHECK_EQ(value_type, Primitive::kPrimNot);
1499 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
1500 instruction,
1501 instruction->GetDexPc(),
1502 nullptr);
1503 }
1504 break;
1505 }
1506
1507 case Primitive::kPrimLong: {
1508 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1509 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1510 if (index.IsConstant()) {
1511 size_t offset =
1512 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1513 __ StoreToOffset(kStoreDoubleword, value, obj, offset);
1514 } else {
1515 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1516 __ Daddu(TMP, obj, TMP);
1517 __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
1518 }
1519 break;
1520 }
1521
1522 case Primitive::kPrimFloat: {
1523 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1524 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1525 DCHECK(locations->InAt(2).IsFpuRegister());
1526 if (index.IsConstant()) {
1527 size_t offset =
1528 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1529 __ StoreFpuToOffset(kStoreWord, value, obj, offset);
1530 } else {
1531 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1532 __ Daddu(TMP, obj, TMP);
1533 __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset);
1534 }
1535 break;
1536 }
1537
1538 case Primitive::kPrimDouble: {
1539 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1540 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1541 DCHECK(locations->InAt(2).IsFpuRegister());
1542 if (index.IsConstant()) {
1543 size_t offset =
1544 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1545 __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset);
1546 } else {
1547 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1548 __ Daddu(TMP, obj, TMP);
1549 __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset);
1550 }
1551 break;
1552 }
1553
1554 case Primitive::kPrimVoid:
1555 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1556 UNREACHABLE();
1557 }
1558
1559 // Ints and objects are handled in the switch.
1560 if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
1561 codegen_->MaybeRecordImplicitNullCheck(instruction);
1562 }
1563 }
1564
VisitBoundsCheck(HBoundsCheck * instruction)1565 void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
1566 LocationSummary* locations =
1567 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1568 locations->SetInAt(0, Location::RequiresRegister());
1569 locations->SetInAt(1, Location::RequiresRegister());
1570 if (instruction->HasUses()) {
1571 locations->SetOut(Location::SameAsFirstInput());
1572 }
1573 }
1574
VisitBoundsCheck(HBoundsCheck * instruction)1575 void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
1576 LocationSummary* locations = instruction->GetLocations();
1577 BoundsCheckSlowPathMIPS64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(
1578 instruction,
1579 locations->InAt(0),
1580 locations->InAt(1));
1581 codegen_->AddSlowPath(slow_path);
1582
1583 GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
1584 GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
1585
1586 // length is limited by the maximum positive signed 32-bit integer.
1587 // Unsigned comparison of length and index checks for index < 0
1588 // and for length <= index simultaneously.
1589 // Mips R6 requires lhs != rhs for compact branches.
1590 if (index == length) {
1591 __ B(slow_path->GetEntryLabel());
1592 } else {
1593 __ Bgeuc(index, length, slow_path->GetEntryLabel());
1594 }
1595 }
1596
VisitCheckCast(HCheckCast * instruction)1597 void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
1598 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1599 instruction,
1600 LocationSummary::kCallOnSlowPath);
1601 locations->SetInAt(0, Location::RequiresRegister());
1602 locations->SetInAt(1, Location::RequiresRegister());
1603 locations->AddTemp(Location::RequiresRegister());
1604 }
1605
VisitCheckCast(HCheckCast * instruction)1606 void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
1607 LocationSummary* locations = instruction->GetLocations();
1608 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1609 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
1610 GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
1611
1612 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(
1613 instruction,
1614 locations->InAt(1),
1615 Location::RegisterLocation(obj_cls),
1616 instruction->GetDexPc());
1617 codegen_->AddSlowPath(slow_path);
1618
1619 // TODO: avoid this check if we know obj is not null.
1620 __ Beqzc(obj, slow_path->GetExitLabel());
1621 // Compare the class of `obj` with `cls`.
1622 __ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
1623 __ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
1624 __ Bind(slow_path->GetExitLabel());
1625 }
1626
VisitClinitCheck(HClinitCheck * check)1627 void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
1628 LocationSummary* locations =
1629 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1630 locations->SetInAt(0, Location::RequiresRegister());
1631 if (check->HasUses()) {
1632 locations->SetOut(Location::SameAsFirstInput());
1633 }
1634 }
1635
VisitClinitCheck(HClinitCheck * check)1636 void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
1637 // We assume the class is not null.
1638 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
1639 check->GetLoadClass(),
1640 check,
1641 check->GetDexPc(),
1642 true);
1643 codegen_->AddSlowPath(slow_path);
1644 GenerateClassInitializationCheck(slow_path,
1645 check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
1646 }
1647
VisitCompare(HCompare * compare)1648 void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
1649 Primitive::Type in_type = compare->InputAt(0)->GetType();
1650
1651 LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
1652 ? LocationSummary::kCall
1653 : LocationSummary::kNoCall;
1654
1655 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
1656
1657 switch (in_type) {
1658 case Primitive::kPrimLong:
1659 locations->SetInAt(0, Location::RequiresRegister());
1660 locations->SetInAt(1, Location::RequiresRegister());
1661 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1662 break;
1663
1664 case Primitive::kPrimFloat:
1665 case Primitive::kPrimDouble: {
1666 InvokeRuntimeCallingConvention calling_convention;
1667 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1668 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
1669 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
1670 break;
1671 }
1672
1673 default:
1674 LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1675 }
1676 }
1677
VisitCompare(HCompare * instruction)1678 void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
1679 LocationSummary* locations = instruction->GetLocations();
1680 Primitive::Type in_type = instruction->InputAt(0)->GetType();
1681
1682 // 0 if: left == right
1683 // 1 if: left > right
1684 // -1 if: left < right
1685 switch (in_type) {
1686 case Primitive::kPrimLong: {
1687 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1688 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1689 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1690 // TODO: more efficient (direct) comparison with a constant
1691 __ Slt(TMP, lhs, rhs);
1692 __ Slt(dst, rhs, lhs);
1693 __ Subu(dst, dst, TMP);
1694 break;
1695 }
1696
1697 case Primitive::kPrimFloat:
1698 case Primitive::kPrimDouble: {
1699 int32_t entry_point_offset;
1700 if (in_type == Primitive::kPrimFloat) {
1701 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgFloat)
1702 : QUICK_ENTRY_POINT(pCmplFloat);
1703 } else {
1704 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgDouble)
1705 : QUICK_ENTRY_POINT(pCmplDouble);
1706 }
1707 codegen_->InvokeRuntime(entry_point_offset, instruction, instruction->GetDexPc(), nullptr);
1708 break;
1709 }
1710
1711 default:
1712 LOG(FATAL) << "Unimplemented compare type " << in_type;
1713 }
1714 }
1715
VisitCondition(HCondition * instruction)1716 void LocationsBuilderMIPS64::VisitCondition(HCondition* instruction) {
1717 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1718 locations->SetInAt(0, Location::RequiresRegister());
1719 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1720 if (instruction->NeedsMaterialization()) {
1721 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1722 }
1723 }
1724
VisitCondition(HCondition * instruction)1725 void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
1726 if (!instruction->NeedsMaterialization()) {
1727 return;
1728 }
1729
1730 LocationSummary* locations = instruction->GetLocations();
1731
1732 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1733 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1734 Location rhs_location = locations->InAt(1);
1735
1736 GpuRegister rhs_reg = ZERO;
1737 int64_t rhs_imm = 0;
1738 bool use_imm = rhs_location.IsConstant();
1739 if (use_imm) {
1740 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
1741 } else {
1742 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1743 }
1744
1745 IfCondition if_cond = instruction->GetCondition();
1746
1747 switch (if_cond) {
1748 case kCondEQ:
1749 case kCondNE:
1750 if (use_imm && IsUint<16>(rhs_imm)) {
1751 __ Xori(dst, lhs, rhs_imm);
1752 } else {
1753 if (use_imm) {
1754 rhs_reg = TMP;
1755 __ LoadConst32(rhs_reg, rhs_imm);
1756 }
1757 __ Xor(dst, lhs, rhs_reg);
1758 }
1759 if (if_cond == kCondEQ) {
1760 __ Sltiu(dst, dst, 1);
1761 } else {
1762 __ Sltu(dst, ZERO, dst);
1763 }
1764 break;
1765
1766 case kCondLT:
1767 case kCondGE:
1768 if (use_imm && IsInt<16>(rhs_imm)) {
1769 __ Slti(dst, lhs, rhs_imm);
1770 } else {
1771 if (use_imm) {
1772 rhs_reg = TMP;
1773 __ LoadConst32(rhs_reg, rhs_imm);
1774 }
1775 __ Slt(dst, lhs, rhs_reg);
1776 }
1777 if (if_cond == kCondGE) {
1778 // Simulate lhs >= rhs via !(lhs < rhs) since there's
1779 // only the slt instruction but no sge.
1780 __ Xori(dst, dst, 1);
1781 }
1782 break;
1783
1784 case kCondLE:
1785 case kCondGT:
1786 if (use_imm && IsInt<16>(rhs_imm + 1)) {
1787 // Simulate lhs <= rhs via lhs < rhs + 1.
1788 __ Slti(dst, lhs, rhs_imm + 1);
1789 if (if_cond == kCondGT) {
1790 // Simulate lhs > rhs via !(lhs <= rhs) since there's
1791 // only the slti instruction but no sgti.
1792 __ Xori(dst, dst, 1);
1793 }
1794 } else {
1795 if (use_imm) {
1796 rhs_reg = TMP;
1797 __ LoadConst32(rhs_reg, rhs_imm);
1798 }
1799 __ Slt(dst, rhs_reg, lhs);
1800 if (if_cond == kCondLE) {
1801 // Simulate lhs <= rhs via !(rhs < lhs) since there's
1802 // only the slt instruction but no sle.
1803 __ Xori(dst, dst, 1);
1804 }
1805 }
1806 break;
1807 }
1808 }
1809
VisitDiv(HDiv * div)1810 void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
1811 LocationSummary* locations =
1812 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
1813 switch (div->GetResultType()) {
1814 case Primitive::kPrimInt:
1815 case Primitive::kPrimLong:
1816 locations->SetInAt(0, Location::RequiresRegister());
1817 locations->SetInAt(1, Location::RequiresRegister());
1818 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1819 break;
1820
1821 case Primitive::kPrimFloat:
1822 case Primitive::kPrimDouble:
1823 locations->SetInAt(0, Location::RequiresFpuRegister());
1824 locations->SetInAt(1, Location::RequiresFpuRegister());
1825 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1826 break;
1827
1828 default:
1829 LOG(FATAL) << "Unexpected div type " << div->GetResultType();
1830 }
1831 }
1832
VisitDiv(HDiv * instruction)1833 void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
1834 Primitive::Type type = instruction->GetType();
1835 LocationSummary* locations = instruction->GetLocations();
1836
1837 switch (type) {
1838 case Primitive::kPrimInt:
1839 case Primitive::kPrimLong: {
1840 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1841 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1842 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1843 if (type == Primitive::kPrimInt)
1844 __ DivR6(dst, lhs, rhs);
1845 else
1846 __ Ddiv(dst, lhs, rhs);
1847 break;
1848 }
1849 case Primitive::kPrimFloat:
1850 case Primitive::kPrimDouble: {
1851 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1852 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1853 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1854 if (type == Primitive::kPrimFloat)
1855 __ DivS(dst, lhs, rhs);
1856 else
1857 __ DivD(dst, lhs, rhs);
1858 break;
1859 }
1860 default:
1861 LOG(FATAL) << "Unexpected div type " << type;
1862 }
1863 }
1864
VisitDivZeroCheck(HDivZeroCheck * instruction)1865 void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1866 LocationSummary* locations =
1867 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1868 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
1869 if (instruction->HasUses()) {
1870 locations->SetOut(Location::SameAsFirstInput());
1871 }
1872 }
1873
VisitDivZeroCheck(HDivZeroCheck * instruction)1874 void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1875 SlowPathCodeMIPS64* slow_path =
1876 new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
1877 codegen_->AddSlowPath(slow_path);
1878 Location value = instruction->GetLocations()->InAt(0);
1879
1880 Primitive::Type type = instruction->GetType();
1881
1882 if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) {
1883 LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
1884 }
1885
1886 if (value.IsConstant()) {
1887 int64_t divisor = codegen_->GetInt64ValueOf(value.GetConstant()->AsConstant());
1888 if (divisor == 0) {
1889 __ B(slow_path->GetEntryLabel());
1890 } else {
1891 // A division by a non-null constant is valid. We don't need to perform
1892 // any check, so simply fall through.
1893 }
1894 } else {
1895 __ Beqzc(value.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
1896 }
1897 }
1898
VisitDoubleConstant(HDoubleConstant * constant)1899 void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
1900 LocationSummary* locations =
1901 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1902 locations->SetOut(Location::ConstantLocation(constant));
1903 }
1904
VisitDoubleConstant(HDoubleConstant * cst ATTRIBUTE_UNUSED)1905 void InstructionCodeGeneratorMIPS64::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
1906 // Will be generated at use site.
1907 }
1908
VisitExit(HExit * exit)1909 void LocationsBuilderMIPS64::VisitExit(HExit* exit) {
1910 exit->SetLocations(nullptr);
1911 }
1912
VisitExit(HExit * exit ATTRIBUTE_UNUSED)1913 void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
1914 }
1915
VisitFloatConstant(HFloatConstant * constant)1916 void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
1917 LocationSummary* locations =
1918 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1919 locations->SetOut(Location::ConstantLocation(constant));
1920 }
1921
VisitFloatConstant(HFloatConstant * constant ATTRIBUTE_UNUSED)1922 void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
1923 // Will be generated at use site.
1924 }
1925
VisitGoto(HGoto * got)1926 void LocationsBuilderMIPS64::VisitGoto(HGoto* got) {
1927 got->SetLocations(nullptr);
1928 }
1929
VisitGoto(HGoto * got)1930 void InstructionCodeGeneratorMIPS64::VisitGoto(HGoto* got) {
1931 HBasicBlock* successor = got->GetSuccessor();
1932 DCHECK(!successor->IsExitBlock());
1933 HBasicBlock* block = got->GetBlock();
1934 HInstruction* previous = got->GetPrevious();
1935 HLoopInformation* info = block->GetLoopInformation();
1936
1937 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
1938 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1939 GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1940 return;
1941 }
1942 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1943 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1944 }
1945 if (!codegen_->GoesToNextBlock(block, successor)) {
1946 __ B(codegen_->GetLabelOf(successor));
1947 }
1948 }
1949
GenerateTestAndBranch(HInstruction * instruction,Label * true_target,Label * false_target,Label * always_true_target)1950 void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
1951 Label* true_target,
1952 Label* false_target,
1953 Label* always_true_target) {
1954 HInstruction* cond = instruction->InputAt(0);
1955 HCondition* condition = cond->AsCondition();
1956
1957 if (cond->IsIntConstant()) {
1958 int32_t cond_value = cond->AsIntConstant()->GetValue();
1959 if (cond_value == 1) {
1960 if (always_true_target != nullptr) {
1961 __ B(always_true_target);
1962 }
1963 return;
1964 } else {
1965 DCHECK_EQ(cond_value, 0);
1966 }
1967 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
1968 // The condition instruction has been materialized, compare the output to 0.
1969 Location cond_val = instruction->GetLocations()->InAt(0);
1970 DCHECK(cond_val.IsRegister());
1971 __ Bnezc(cond_val.AsRegister<GpuRegister>(), true_target);
1972 } else {
1973 // The condition instruction has not been materialized, use its inputs as
1974 // the comparison and its condition as the branch condition.
1975 GpuRegister lhs = condition->GetLocations()->InAt(0).AsRegister<GpuRegister>();
1976 Location rhs_location = condition->GetLocations()->InAt(1);
1977 GpuRegister rhs_reg = ZERO;
1978 int32_t rhs_imm = 0;
1979 bool use_imm = rhs_location.IsConstant();
1980 if (use_imm) {
1981 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
1982 } else {
1983 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1984 }
1985
1986 IfCondition if_cond = condition->GetCondition();
1987 if (use_imm && rhs_imm == 0) {
1988 switch (if_cond) {
1989 case kCondEQ:
1990 __ Beqzc(lhs, true_target);
1991 break;
1992 case kCondNE:
1993 __ Bnezc(lhs, true_target);
1994 break;
1995 case kCondLT:
1996 __ Bltzc(lhs, true_target);
1997 break;
1998 case kCondGE:
1999 __ Bgezc(lhs, true_target);
2000 break;
2001 case kCondLE:
2002 __ Blezc(lhs, true_target);
2003 break;
2004 case kCondGT:
2005 __ Bgtzc(lhs, true_target);
2006 break;
2007 }
2008 } else {
2009 if (use_imm) {
2010 rhs_reg = TMP;
2011 __ LoadConst32(rhs_reg, rhs_imm);
2012 }
2013 // It looks like we can get here with lhs == rhs. Should that be possible at all?
2014 // Mips R6 requires lhs != rhs for compact branches.
2015 if (lhs == rhs_reg) {
2016 DCHECK(!use_imm);
2017 switch (if_cond) {
2018 case kCondEQ:
2019 case kCondGE:
2020 case kCondLE:
2021 // if lhs == rhs for a positive condition, then it is a branch
2022 __ B(true_target);
2023 break;
2024 case kCondNE:
2025 case kCondLT:
2026 case kCondGT:
2027 // if lhs == rhs for a negative condition, then it is a NOP
2028 break;
2029 }
2030 } else {
2031 switch (if_cond) {
2032 case kCondEQ:
2033 __ Beqc(lhs, rhs_reg, true_target);
2034 break;
2035 case kCondNE:
2036 __ Bnec(lhs, rhs_reg, true_target);
2037 break;
2038 case kCondLT:
2039 __ Bltc(lhs, rhs_reg, true_target);
2040 break;
2041 case kCondGE:
2042 __ Bgec(lhs, rhs_reg, true_target);
2043 break;
2044 case kCondLE:
2045 __ Bgec(rhs_reg, lhs, true_target);
2046 break;
2047 case kCondGT:
2048 __ Bltc(rhs_reg, lhs, true_target);
2049 break;
2050 }
2051 }
2052 }
2053 }
2054 if (false_target != nullptr) {
2055 __ B(false_target);
2056 }
2057 }
2058
VisitIf(HIf * if_instr)2059 void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
2060 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2061 HInstruction* cond = if_instr->InputAt(0);
2062 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
2063 locations->SetInAt(0, Location::RequiresRegister());
2064 }
2065 }
2066
VisitIf(HIf * if_instr)2067 void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
2068 Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
2069 Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
2070 Label* always_true_target = true_target;
2071 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2072 if_instr->IfTrueSuccessor())) {
2073 always_true_target = nullptr;
2074 }
2075 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2076 if_instr->IfFalseSuccessor())) {
2077 false_target = nullptr;
2078 }
2079 GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
2080 }
2081
VisitDeoptimize(HDeoptimize * deoptimize)2082 void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2083 LocationSummary* locations = new (GetGraph()->GetArena())
2084 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2085 HInstruction* cond = deoptimize->InputAt(0);
2086 DCHECK(cond->IsCondition());
2087 if (cond->AsCondition()->NeedsMaterialization()) {
2088 locations->SetInAt(0, Location::RequiresRegister());
2089 }
2090 }
2091
VisitDeoptimize(HDeoptimize * deoptimize)2092 void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2093 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
2094 DeoptimizationSlowPathMIPS64(deoptimize);
2095 codegen_->AddSlowPath(slow_path);
2096 Label* slow_path_entry = slow_path->GetEntryLabel();
2097 GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
2098 }
2099
HandleFieldGet(HInstruction * instruction,const FieldInfo & field_info ATTRIBUTE_UNUSED)2100 void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
2101 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2102 LocationSummary* locations =
2103 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2104 locations->SetInAt(0, Location::RequiresRegister());
2105 if (Primitive::IsFloatingPointType(instruction->GetType())) {
2106 locations->SetOut(Location::RequiresFpuRegister());
2107 } else {
2108 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2109 }
2110 }
2111
HandleFieldGet(HInstruction * instruction,const FieldInfo & field_info)2112 void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
2113 const FieldInfo& field_info) {
2114 Primitive::Type type = field_info.GetFieldType();
2115 LocationSummary* locations = instruction->GetLocations();
2116 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2117 LoadOperandType load_type = kLoadUnsignedByte;
2118 switch (type) {
2119 case Primitive::kPrimBoolean:
2120 load_type = kLoadUnsignedByte;
2121 break;
2122 case Primitive::kPrimByte:
2123 load_type = kLoadSignedByte;
2124 break;
2125 case Primitive::kPrimShort:
2126 load_type = kLoadSignedHalfword;
2127 break;
2128 case Primitive::kPrimChar:
2129 load_type = kLoadUnsignedHalfword;
2130 break;
2131 case Primitive::kPrimInt:
2132 case Primitive::kPrimFloat:
2133 load_type = kLoadWord;
2134 break;
2135 case Primitive::kPrimLong:
2136 case Primitive::kPrimDouble:
2137 load_type = kLoadDoubleword;
2138 break;
2139 case Primitive::kPrimNot:
2140 load_type = kLoadUnsignedWord;
2141 break;
2142 case Primitive::kPrimVoid:
2143 LOG(FATAL) << "Unreachable type " << type;
2144 UNREACHABLE();
2145 }
2146 if (!Primitive::IsFloatingPointType(type)) {
2147 DCHECK(locations->Out().IsRegister());
2148 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2149 __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2150 } else {
2151 DCHECK(locations->Out().IsFpuRegister());
2152 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2153 __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2154 }
2155
2156 codegen_->MaybeRecordImplicitNullCheck(instruction);
2157 // TODO: memory barrier?
2158 }
2159
HandleFieldSet(HInstruction * instruction,const FieldInfo & field_info ATTRIBUTE_UNUSED)2160 void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
2161 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2162 LocationSummary* locations =
2163 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2164 locations->SetInAt(0, Location::RequiresRegister());
2165 if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
2166 locations->SetInAt(1, Location::RequiresFpuRegister());
2167 } else {
2168 locations->SetInAt(1, Location::RequiresRegister());
2169 }
2170 }
2171
HandleFieldSet(HInstruction * instruction,const FieldInfo & field_info)2172 void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
2173 const FieldInfo& field_info) {
2174 Primitive::Type type = field_info.GetFieldType();
2175 LocationSummary* locations = instruction->GetLocations();
2176 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2177 StoreOperandType store_type = kStoreByte;
2178 switch (type) {
2179 case Primitive::kPrimBoolean:
2180 case Primitive::kPrimByte:
2181 store_type = kStoreByte;
2182 break;
2183 case Primitive::kPrimShort:
2184 case Primitive::kPrimChar:
2185 store_type = kStoreHalfword;
2186 break;
2187 case Primitive::kPrimInt:
2188 case Primitive::kPrimFloat:
2189 case Primitive::kPrimNot:
2190 store_type = kStoreWord;
2191 break;
2192 case Primitive::kPrimLong:
2193 case Primitive::kPrimDouble:
2194 store_type = kStoreDoubleword;
2195 break;
2196 case Primitive::kPrimVoid:
2197 LOG(FATAL) << "Unreachable type " << type;
2198 UNREACHABLE();
2199 }
2200 if (!Primitive::IsFloatingPointType(type)) {
2201 DCHECK(locations->InAt(1).IsRegister());
2202 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2203 __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2204 } else {
2205 DCHECK(locations->InAt(1).IsFpuRegister());
2206 FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
2207 __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2208 }
2209
2210 codegen_->MaybeRecordImplicitNullCheck(instruction);
2211 // TODO: memory barriers?
2212 if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
2213 DCHECK(locations->InAt(1).IsRegister());
2214 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2215 codegen_->MarkGCCard(obj, src);
2216 }
2217 }
2218
VisitInstanceFieldGet(HInstanceFieldGet * instruction)2219 void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2220 HandleFieldGet(instruction, instruction->GetFieldInfo());
2221 }
2222
VisitInstanceFieldGet(HInstanceFieldGet * instruction)2223 void InstructionCodeGeneratorMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2224 HandleFieldGet(instruction, instruction->GetFieldInfo());
2225 }
2226
VisitInstanceFieldSet(HInstanceFieldSet * instruction)2227 void LocationsBuilderMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2228 HandleFieldSet(instruction, instruction->GetFieldInfo());
2229 }
2230
VisitInstanceFieldSet(HInstanceFieldSet * instruction)2231 void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2232 HandleFieldSet(instruction, instruction->GetFieldInfo());
2233 }
2234
VisitInstanceOf(HInstanceOf * instruction)2235 void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2236 LocationSummary::CallKind call_kind =
2237 instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
2238 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2239 locations->SetInAt(0, Location::RequiresRegister());
2240 locations->SetInAt(1, Location::RequiresRegister());
2241 // The output does overlap inputs.
2242 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2243 }
2244
VisitInstanceOf(HInstanceOf * instruction)2245 void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2246 LocationSummary* locations = instruction->GetLocations();
2247 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2248 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
2249 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2250
2251 Label done;
2252
2253 // Return 0 if `obj` is null.
2254 // TODO: Avoid this check if we know `obj` is not null.
2255 __ Move(out, ZERO);
2256 __ Beqzc(obj, &done);
2257
2258 // Compare the class of `obj` with `cls`.
2259 __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
2260 if (instruction->IsClassFinal()) {
2261 // Classes must be equal for the instanceof to succeed.
2262 __ Xor(out, out, cls);
2263 __ Sltiu(out, out, 1);
2264 } else {
2265 // If the classes are not equal, we go into a slow path.
2266 DCHECK(locations->OnlyCallsOnSlowPath());
2267 SlowPathCodeMIPS64* slow_path =
2268 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
2269 locations->InAt(1),
2270 locations->Out(),
2271 instruction->GetDexPc());
2272 codegen_->AddSlowPath(slow_path);
2273 __ Bnec(out, cls, slow_path->GetEntryLabel());
2274 __ LoadConst32(out, 1);
2275 __ Bind(slow_path->GetExitLabel());
2276 }
2277
2278 __ Bind(&done);
2279 }
2280
VisitIntConstant(HIntConstant * constant)2281 void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
2282 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2283 locations->SetOut(Location::ConstantLocation(constant));
2284 }
2285
VisitIntConstant(HIntConstant * constant ATTRIBUTE_UNUSED)2286 void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
2287 // Will be generated at use site.
2288 }
2289
VisitNullConstant(HNullConstant * constant)2290 void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
2291 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2292 locations->SetOut(Location::ConstantLocation(constant));
2293 }
2294
VisitNullConstant(HNullConstant * constant ATTRIBUTE_UNUSED)2295 void InstructionCodeGeneratorMIPS64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
2296 // Will be generated at use site.
2297 }
2298
HandleInvoke(HInvoke * invoke)2299 void LocationsBuilderMIPS64::HandleInvoke(HInvoke* invoke) {
2300 LocationSummary* locations =
2301 new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
2302 locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
2303
2304 InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
2305 for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
2306 HInstruction* input = invoke->InputAt(i);
2307 locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
2308 }
2309
2310 Primitive::Type return_type = invoke->GetType();
2311 if (return_type != Primitive::kPrimVoid) {
2312 locations->SetOut(calling_convention_visitor.GetReturnLocation(return_type));
2313 }
2314 }
2315
VisitInvokeInterface(HInvokeInterface * invoke)2316 void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2317 HandleInvoke(invoke);
2318 // The register T0 is required to be used for the hidden argument in
2319 // art_quick_imt_conflict_trampoline, so add the hidden argument.
2320 invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
2321 }
2322
VisitInvokeInterface(HInvokeInterface * invoke)2323 void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2324 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
2325 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2326 uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
2327 invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value();
2328 Location receiver = invoke->GetLocations()->InAt(0);
2329 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2330 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2331
2332 // Set the hidden argument.
2333 __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
2334 invoke->GetDexMethodIndex());
2335
2336 // temp = object->GetClass();
2337 if (receiver.IsStackSlot()) {
2338 __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
2339 __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
2340 } else {
2341 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2342 }
2343 codegen_->MaybeRecordImplicitNullCheck(invoke);
2344 // temp = temp->GetImtEntryAt(method_offset);
2345 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2346 // T9 = temp->GetEntryPoint();
2347 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2348 // T9();
2349 __ Jalr(T9);
2350 DCHECK(!codegen_->IsLeafMethod());
2351 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2352 }
2353
VisitInvokeVirtual(HInvokeVirtual * invoke)2354 void LocationsBuilderMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2355 // TODO intrinsic function
2356 HandleInvoke(invoke);
2357 }
2358
VisitInvokeStaticOrDirect(HInvokeStaticOrDirect * invoke)2359 void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2360 // When we do not run baseline, explicit clinit checks triggered by static
2361 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2362 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2363
2364 // TODO - intrinsic function
2365 HandleInvoke(invoke);
2366 }
2367
TryGenerateIntrinsicCode(HInvoke * invoke,CodeGeneratorMIPS64 * codegen ATTRIBUTE_UNUSED)2368 static bool TryGenerateIntrinsicCode(HInvoke* invoke,
2369 CodeGeneratorMIPS64* codegen ATTRIBUTE_UNUSED) {
2370 if (invoke->GetLocations()->Intrinsified()) {
2371 // TODO - intrinsic function
2372 return true;
2373 }
2374 return false;
2375 }
2376
GenerateStaticOrDirectCall(HInvokeStaticOrDirect * invoke,GpuRegister temp)2377 void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
2378 GpuRegister temp) {
2379 // All registers are assumed to be correctly set up per the calling convention.
2380
2381 // TODO: Implement all kinds of calls:
2382 // 1) boot -> boot
2383 // 2) app -> boot
2384 // 3) app -> app
2385 //
2386 // Currently we implement the app -> app logic, which looks up in the resolve cache.
2387
2388 if (invoke->IsStringInit()) {
2389 // temp = thread->string_init_entrypoint
2390 __ LoadFromOffset(kLoadDoubleword,
2391 temp,
2392 TR,
2393 invoke->GetStringInitOffset());
2394 // T9 = temp->entry_point_from_quick_compiled_code_;
2395 __ LoadFromOffset(kLoadDoubleword,
2396 T9,
2397 temp,
2398 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2399 kMips64WordSize).Int32Value());
2400 // T9()
2401 __ Jalr(T9);
2402 } else {
2403 // temp = method;
2404 LoadCurrentMethod(temp);
2405 if (!invoke->IsRecursive()) {
2406 // temp = temp->dex_cache_resolved_methods_;
2407 __ LoadFromOffset(kLoadUnsignedWord,
2408 temp,
2409 temp,
2410 ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
2411 // temp = temp[index_in_cache]
2412 __ LoadFromOffset(kLoadDoubleword,
2413 temp,
2414 temp,
2415 CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex()));
2416 // T9 = temp[offset_of_quick_compiled_code]
2417 __ LoadFromOffset(kLoadDoubleword,
2418 T9,
2419 temp,
2420 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2421 kMips64WordSize).Int32Value());
2422 // T9()
2423 __ Jalr(T9);
2424 } else {
2425 __ Jalr(&frame_entry_label_, T9);
2426 }
2427 }
2428
2429 DCHECK(!IsLeafMethod());
2430 }
2431
VisitInvokeStaticOrDirect(HInvokeStaticOrDirect * invoke)2432 void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2433 // When we do not run baseline, explicit clinit checks triggered by static
2434 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2435 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2436
2437 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2438 return;
2439 }
2440
2441 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2442
2443 codegen_->GenerateStaticOrDirectCall(invoke, temp);
2444 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2445 }
2446
VisitInvokeVirtual(HInvokeVirtual * invoke)2447 void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2448 // TODO: Try to generate intrinsics code.
2449 LocationSummary* locations = invoke->GetLocations();
2450 Location receiver = locations->InAt(0);
2451 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2452 size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
2453 invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
2454 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2455 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2456
2457 // temp = object->GetClass();
2458 if (receiver.IsStackSlot()) {
2459 __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
2460 __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
2461 } else {
2462 DCHECK(receiver.IsRegister());
2463 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2464 }
2465 codegen_->MaybeRecordImplicitNullCheck(invoke);
2466 // temp = temp->GetMethodAt(method_offset);
2467 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2468 // T9 = temp->GetEntryPoint();
2469 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2470 // T9();
2471 __ Jalr(T9);
2472 DCHECK(!codegen_->IsLeafMethod());
2473 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2474 }
2475
VisitLoadClass(HLoadClass * cls)2476 void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
2477 LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
2478 : LocationSummary::kNoCall;
2479 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
2480 locations->SetOut(Location::RequiresRegister());
2481 }
2482
VisitLoadClass(HLoadClass * cls)2483 void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
2484 GpuRegister out = cls->GetLocations()->Out().AsRegister<GpuRegister>();
2485 if (cls->IsReferrersClass()) {
2486 DCHECK(!cls->CanCallRuntime());
2487 DCHECK(!cls->MustGenerateClinitCheck());
2488 codegen_->LoadCurrentMethod(out);
2489 __ LoadFromOffset(
2490 kLoadUnsignedWord, out, out, ArtMethod::DeclaringClassOffset().Int32Value());
2491 } else {
2492 DCHECK(cls->CanCallRuntime());
2493 codegen_->LoadCurrentMethod(out);
2494 __ LoadFromOffset(
2495 kLoadUnsignedWord, out, out, ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
2496 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
2497 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
2498 cls,
2499 cls,
2500 cls->GetDexPc(),
2501 cls->MustGenerateClinitCheck());
2502 codegen_->AddSlowPath(slow_path);
2503 __ Beqzc(out, slow_path->GetEntryLabel());
2504 if (cls->MustGenerateClinitCheck()) {
2505 GenerateClassInitializationCheck(slow_path, out);
2506 } else {
2507 __ Bind(slow_path->GetExitLabel());
2508 }
2509 }
2510 }
2511
VisitLoadException(HLoadException * load)2512 void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
2513 LocationSummary* locations =
2514 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
2515 locations->SetOut(Location::RequiresRegister());
2516 }
2517
VisitLoadException(HLoadException * load)2518 void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
2519 GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
2520 __ LoadFromOffset(kLoadUnsignedWord, out, TR, Thread::ExceptionOffset<kMips64WordSize>().Int32Value());
2521 __ StoreToOffset(kStoreWord, ZERO, TR, Thread::ExceptionOffset<kMips64WordSize>().Int32Value());
2522 }
2523
VisitLoadLocal(HLoadLocal * load)2524 void LocationsBuilderMIPS64::VisitLoadLocal(HLoadLocal* load) {
2525 load->SetLocations(nullptr);
2526 }
2527
VisitLoadLocal(HLoadLocal * load ATTRIBUTE_UNUSED)2528 void InstructionCodeGeneratorMIPS64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
2529 // Nothing to do, this is driven by the code generator.
2530 }
2531
VisitLoadString(HLoadString * load)2532 void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
2533 LocationSummary* locations =
2534 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
2535 locations->SetOut(Location::RequiresRegister());
2536 }
2537
VisitLoadString(HLoadString * load)2538 void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
2539 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
2540 codegen_->AddSlowPath(slow_path);
2541
2542 GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
2543 codegen_->LoadCurrentMethod(out);
2544 __ LoadFromOffset(
2545 kLoadUnsignedWord, out, out, ArtMethod::DeclaringClassOffset().Int32Value());
2546 __ LoadFromOffset(kLoadUnsignedWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
2547 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
2548 __ Beqzc(out, slow_path->GetEntryLabel());
2549 __ Bind(slow_path->GetExitLabel());
2550 }
2551
VisitLocal(HLocal * local)2552 void LocationsBuilderMIPS64::VisitLocal(HLocal* local) {
2553 local->SetLocations(nullptr);
2554 }
2555
VisitLocal(HLocal * local)2556 void InstructionCodeGeneratorMIPS64::VisitLocal(HLocal* local) {
2557 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
2558 }
2559
VisitLongConstant(HLongConstant * constant)2560 void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
2561 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2562 locations->SetOut(Location::ConstantLocation(constant));
2563 }
2564
VisitLongConstant(HLongConstant * constant ATTRIBUTE_UNUSED)2565 void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
2566 // Will be generated at use site.
2567 }
2568
VisitMonitorOperation(HMonitorOperation * instruction)2569 void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2570 LocationSummary* locations =
2571 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2572 InvokeRuntimeCallingConvention calling_convention;
2573 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2574 }
2575
VisitMonitorOperation(HMonitorOperation * instruction)2576 void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2577 codegen_->InvokeRuntime(instruction->IsEnter()
2578 ? QUICK_ENTRY_POINT(pLockObject)
2579 : QUICK_ENTRY_POINT(pUnlockObject),
2580 instruction,
2581 instruction->GetDexPc(),
2582 nullptr);
2583 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
2584 }
2585
VisitMul(HMul * mul)2586 void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
2587 LocationSummary* locations =
2588 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2589 switch (mul->GetResultType()) {
2590 case Primitive::kPrimInt:
2591 case Primitive::kPrimLong:
2592 locations->SetInAt(0, Location::RequiresRegister());
2593 locations->SetInAt(1, Location::RequiresRegister());
2594 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2595 break;
2596
2597 case Primitive::kPrimFloat:
2598 case Primitive::kPrimDouble:
2599 locations->SetInAt(0, Location::RequiresFpuRegister());
2600 locations->SetInAt(1, Location::RequiresFpuRegister());
2601 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2602 break;
2603
2604 default:
2605 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2606 }
2607 }
2608
VisitMul(HMul * instruction)2609 void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
2610 Primitive::Type type = instruction->GetType();
2611 LocationSummary* locations = instruction->GetLocations();
2612
2613 switch (type) {
2614 case Primitive::kPrimInt:
2615 case Primitive::kPrimLong: {
2616 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2617 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
2618 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
2619 if (type == Primitive::kPrimInt)
2620 __ MulR6(dst, lhs, rhs);
2621 else
2622 __ Dmul(dst, lhs, rhs);
2623 break;
2624 }
2625 case Primitive::kPrimFloat:
2626 case Primitive::kPrimDouble: {
2627 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2628 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
2629 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
2630 if (type == Primitive::kPrimFloat)
2631 __ MulS(dst, lhs, rhs);
2632 else
2633 __ MulD(dst, lhs, rhs);
2634 break;
2635 }
2636 default:
2637 LOG(FATAL) << "Unexpected mul type " << type;
2638 }
2639 }
2640
VisitNeg(HNeg * neg)2641 void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
2642 LocationSummary* locations =
2643 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
2644 switch (neg->GetResultType()) {
2645 case Primitive::kPrimInt:
2646 case Primitive::kPrimLong:
2647 locations->SetInAt(0, Location::RequiresRegister());
2648 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2649 break;
2650
2651 case Primitive::kPrimFloat:
2652 case Primitive::kPrimDouble:
2653 locations->SetInAt(0, Location::RequiresFpuRegister());
2654 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2655 break;
2656
2657 default:
2658 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2659 }
2660 }
2661
VisitNeg(HNeg * instruction)2662 void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
2663 Primitive::Type type = instruction->GetType();
2664 LocationSummary* locations = instruction->GetLocations();
2665
2666 switch (type) {
2667 case Primitive::kPrimInt:
2668 case Primitive::kPrimLong: {
2669 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2670 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2671 if (type == Primitive::kPrimInt)
2672 __ Subu(dst, ZERO, src);
2673 else
2674 __ Dsubu(dst, ZERO, src);
2675 break;
2676 }
2677 case Primitive::kPrimFloat:
2678 case Primitive::kPrimDouble: {
2679 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2680 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
2681 if (type == Primitive::kPrimFloat)
2682 __ NegS(dst, src);
2683 else
2684 __ NegD(dst, src);
2685 break;
2686 }
2687 default:
2688 LOG(FATAL) << "Unexpected neg type " << type;
2689 }
2690 }
2691
VisitNewArray(HNewArray * instruction)2692 void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
2693 LocationSummary* locations =
2694 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2695 InvokeRuntimeCallingConvention calling_convention;
2696 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2697 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2698 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2699 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2700 }
2701
VisitNewArray(HNewArray * instruction)2702 void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
2703 InvokeRuntimeCallingConvention calling_convention;
2704 codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(2));
2705 // Move an uint16_t value to a register.
2706 __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2707 codegen_->InvokeRuntime(
2708 GetThreadOffset<kMips64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2709 instruction,
2710 instruction->GetDexPc(),
2711 nullptr);
2712 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
2713 }
2714
VisitNewInstance(HNewInstance * instruction)2715 void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
2716 LocationSummary* locations =
2717 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2718 InvokeRuntimeCallingConvention calling_convention;
2719 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2720 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2721 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2722 }
2723
VisitNewInstance(HNewInstance * instruction)2724 void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
2725 InvokeRuntimeCallingConvention calling_convention;
2726 codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
2727 // Move an uint16_t value to a register.
2728 __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2729 codegen_->InvokeRuntime(
2730 GetThreadOffset<kMips64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2731 instruction,
2732 instruction->GetDexPc(),
2733 nullptr);
2734 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
2735 }
2736
VisitNot(HNot * instruction)2737 void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
2738 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2739 locations->SetInAt(0, Location::RequiresRegister());
2740 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2741 }
2742
VisitNot(HNot * instruction)2743 void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
2744 Primitive::Type type = instruction->GetType();
2745 LocationSummary* locations = instruction->GetLocations();
2746
2747 switch (type) {
2748 case Primitive::kPrimInt:
2749 case Primitive::kPrimLong: {
2750 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2751 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2752 __ Nor(dst, src, ZERO);
2753 break;
2754 }
2755
2756 default:
2757 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
2758 }
2759 }
2760
VisitBooleanNot(HBooleanNot * instruction)2761 void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2762 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2763 locations->SetInAt(0, Location::RequiresRegister());
2764 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2765 }
2766
VisitBooleanNot(HBooleanNot * instruction)2767 void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2768 LocationSummary* locations = instruction->GetLocations();
2769 __ Xori(locations->Out().AsRegister<GpuRegister>(),
2770 locations->InAt(0).AsRegister<GpuRegister>(),
2771 1);
2772 }
2773
VisitNullCheck(HNullCheck * instruction)2774 void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
2775 LocationSummary* locations =
2776 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2777 locations->SetInAt(0, Location::RequiresRegister());
2778 if (instruction->HasUses()) {
2779 locations->SetOut(Location::SameAsFirstInput());
2780 }
2781 }
2782
GenerateImplicitNullCheck(HNullCheck * instruction)2783 void InstructionCodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
2784 if (codegen_->CanMoveNullCheckToUser(instruction)) {
2785 return;
2786 }
2787 Location obj = instruction->GetLocations()->InAt(0);
2788
2789 __ Lw(ZERO, obj.AsRegister<GpuRegister>(), 0);
2790 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
2791 }
2792
GenerateExplicitNullCheck(HNullCheck * instruction)2793 void InstructionCodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
2794 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
2795 codegen_->AddSlowPath(slow_path);
2796
2797 Location obj = instruction->GetLocations()->InAt(0);
2798
2799 __ Beqzc(obj.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
2800 }
2801
VisitNullCheck(HNullCheck * instruction)2802 void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
2803 if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
2804 GenerateImplicitNullCheck(instruction);
2805 } else {
2806 GenerateExplicitNullCheck(instruction);
2807 }
2808 }
2809
VisitOr(HOr * instruction)2810 void LocationsBuilderMIPS64::VisitOr(HOr* instruction) {
2811 HandleBinaryOp(instruction);
2812 }
2813
VisitOr(HOr * instruction)2814 void InstructionCodeGeneratorMIPS64::VisitOr(HOr* instruction) {
2815 HandleBinaryOp(instruction);
2816 }
2817
VisitParallelMove(HParallelMove * instruction ATTRIBUTE_UNUSED)2818 void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
2819 LOG(FATAL) << "Unreachable";
2820 }
2821
VisitParallelMove(HParallelMove * instruction)2822 void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
2823 codegen_->GetMoveResolver()->EmitNativeCode(instruction);
2824 }
2825
VisitParameterValue(HParameterValue * instruction)2826 void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
2827 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2828 Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2829 if (location.IsStackSlot()) {
2830 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2831 } else if (location.IsDoubleStackSlot()) {
2832 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2833 }
2834 locations->SetOut(location);
2835 }
2836
VisitParameterValue(HParameterValue * instruction ATTRIBUTE_UNUSED)2837 void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instruction
2838 ATTRIBUTE_UNUSED) {
2839 // Nothing to do, the parameter is already at its location.
2840 }
2841
VisitPhi(HPhi * instruction)2842 void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
2843 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2844 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2845 locations->SetInAt(i, Location::Any());
2846 }
2847 locations->SetOut(Location::Any());
2848 }
2849
VisitPhi(HPhi * instruction ATTRIBUTE_UNUSED)2850 void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
2851 LOG(FATAL) << "Unreachable";
2852 }
2853
VisitRem(HRem * rem)2854 void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
2855 Primitive::Type type = rem->GetResultType();
2856 LocationSummary::CallKind call_kind =
2857 Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
2858 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2859
2860 switch (type) {
2861 case Primitive::kPrimInt:
2862 case Primitive::kPrimLong:
2863 locations->SetInAt(0, Location::RequiresRegister());
2864 locations->SetInAt(1, Location::RequiresRegister());
2865 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2866 break;
2867
2868 case Primitive::kPrimFloat:
2869 case Primitive::kPrimDouble: {
2870 InvokeRuntimeCallingConvention calling_convention;
2871 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2872 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2873 locations->SetOut(calling_convention.GetReturnLocation(type));
2874 break;
2875 }
2876
2877 default:
2878 LOG(FATAL) << "Unexpected rem type " << type;
2879 }
2880 }
2881
VisitRem(HRem * instruction)2882 void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) {
2883 Primitive::Type type = instruction->GetType();
2884 LocationSummary* locations = instruction->GetLocations();
2885
2886 switch (type) {
2887 case Primitive::kPrimInt:
2888 case Primitive::kPrimLong: {
2889 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2890 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
2891 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
2892 if (type == Primitive::kPrimInt)
2893 __ ModR6(dst, lhs, rhs);
2894 else
2895 __ Dmod(dst, lhs, rhs);
2896 break;
2897 }
2898
2899 case Primitive::kPrimFloat:
2900 case Primitive::kPrimDouble: {
2901 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
2902 : QUICK_ENTRY_POINT(pFmod);
2903 codegen_->InvokeRuntime(entry_offset, instruction, instruction->GetDexPc(), nullptr);
2904 break;
2905 }
2906 default:
2907 LOG(FATAL) << "Unexpected rem type " << type;
2908 }
2909 }
2910
VisitMemoryBarrier(HMemoryBarrier * memory_barrier)2911 void LocationsBuilderMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2912 memory_barrier->SetLocations(nullptr);
2913 }
2914
VisitMemoryBarrier(HMemoryBarrier * memory_barrier)2915 void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2916 GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
2917 }
2918
VisitReturn(HReturn * ret)2919 void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
2920 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
2921 Primitive::Type return_type = ret->InputAt(0)->GetType();
2922 locations->SetInAt(0, Mips64ReturnLocation(return_type));
2923 }
2924
VisitReturn(HReturn * ret ATTRIBUTE_UNUSED)2925 void InstructionCodeGeneratorMIPS64::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
2926 codegen_->GenerateFrameExit();
2927 }
2928
VisitReturnVoid(HReturnVoid * ret)2929 void LocationsBuilderMIPS64::VisitReturnVoid(HReturnVoid* ret) {
2930 ret->SetLocations(nullptr);
2931 }
2932
VisitReturnVoid(HReturnVoid * ret ATTRIBUTE_UNUSED)2933 void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
2934 codegen_->GenerateFrameExit();
2935 }
2936
VisitShl(HShl * shl)2937 void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
2938 HandleShift(shl);
2939 }
2940
VisitShl(HShl * shl)2941 void InstructionCodeGeneratorMIPS64::VisitShl(HShl* shl) {
2942 HandleShift(shl);
2943 }
2944
VisitShr(HShr * shr)2945 void LocationsBuilderMIPS64::VisitShr(HShr* shr) {
2946 HandleShift(shr);
2947 }
2948
VisitShr(HShr * shr)2949 void InstructionCodeGeneratorMIPS64::VisitShr(HShr* shr) {
2950 HandleShift(shr);
2951 }
2952
VisitStoreLocal(HStoreLocal * store)2953 void LocationsBuilderMIPS64::VisitStoreLocal(HStoreLocal* store) {
2954 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
2955 Primitive::Type field_type = store->InputAt(1)->GetType();
2956 switch (field_type) {
2957 case Primitive::kPrimNot:
2958 case Primitive::kPrimBoolean:
2959 case Primitive::kPrimByte:
2960 case Primitive::kPrimChar:
2961 case Primitive::kPrimShort:
2962 case Primitive::kPrimInt:
2963 case Primitive::kPrimFloat:
2964 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
2965 break;
2966
2967 case Primitive::kPrimLong:
2968 case Primitive::kPrimDouble:
2969 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
2970 break;
2971
2972 default:
2973 LOG(FATAL) << "Unimplemented local type " << field_type;
2974 }
2975 }
2976
VisitStoreLocal(HStoreLocal * store ATTRIBUTE_UNUSED)2977 void InstructionCodeGeneratorMIPS64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
2978 }
2979
VisitSub(HSub * instruction)2980 void LocationsBuilderMIPS64::VisitSub(HSub* instruction) {
2981 HandleBinaryOp(instruction);
2982 }
2983
VisitSub(HSub * instruction)2984 void InstructionCodeGeneratorMIPS64::VisitSub(HSub* instruction) {
2985 HandleBinaryOp(instruction);
2986 }
2987
VisitStaticFieldGet(HStaticFieldGet * instruction)2988 void LocationsBuilderMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
2989 HandleFieldGet(instruction, instruction->GetFieldInfo());
2990 }
2991
VisitStaticFieldGet(HStaticFieldGet * instruction)2992 void InstructionCodeGeneratorMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
2993 HandleFieldGet(instruction, instruction->GetFieldInfo());
2994 }
2995
VisitStaticFieldSet(HStaticFieldSet * instruction)2996 void LocationsBuilderMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
2997 HandleFieldSet(instruction, instruction->GetFieldInfo());
2998 }
2999
VisitStaticFieldSet(HStaticFieldSet * instruction)3000 void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3001 HandleFieldSet(instruction, instruction->GetFieldInfo());
3002 }
3003
VisitSuspendCheck(HSuspendCheck * instruction)3004 void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3005 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3006 }
3007
VisitSuspendCheck(HSuspendCheck * instruction)3008 void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3009 HBasicBlock* block = instruction->GetBlock();
3010 if (block->GetLoopInformation() != nullptr) {
3011 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3012 // The back edge will generate the suspend check.
3013 return;
3014 }
3015 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3016 // The goto will generate the suspend check.
3017 return;
3018 }
3019 GenerateSuspendCheck(instruction, nullptr);
3020 }
3021
VisitTemporary(HTemporary * temp)3022 void LocationsBuilderMIPS64::VisitTemporary(HTemporary* temp) {
3023 temp->SetLocations(nullptr);
3024 }
3025
VisitTemporary(HTemporary * temp ATTRIBUTE_UNUSED)3026 void InstructionCodeGeneratorMIPS64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
3027 // Nothing to do, this is driven by the code generator.
3028 }
3029
VisitThrow(HThrow * instruction)3030 void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
3031 LocationSummary* locations =
3032 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3033 InvokeRuntimeCallingConvention calling_convention;
3034 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3035 }
3036
VisitThrow(HThrow * instruction)3037 void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
3038 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
3039 instruction,
3040 instruction->GetDexPc(),
3041 nullptr);
3042 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
3043 }
3044
VisitTypeConversion(HTypeConversion * conversion)3045 void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3046 Primitive::Type input_type = conversion->GetInputType();
3047 Primitive::Type result_type = conversion->GetResultType();
3048 DCHECK_NE(input_type, result_type);
3049
3050 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
3051 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
3052 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
3053 }
3054
3055 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
3056 if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
3057 (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
3058 call_kind = LocationSummary::kCall;
3059 }
3060
3061 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
3062
3063 if (call_kind == LocationSummary::kNoCall) {
3064 if (Primitive::IsFloatingPointType(input_type)) {
3065 locations->SetInAt(0, Location::RequiresFpuRegister());
3066 } else {
3067 locations->SetInAt(0, Location::RequiresRegister());
3068 }
3069
3070 if (Primitive::IsFloatingPointType(result_type)) {
3071 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3072 } else {
3073 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3074 }
3075 } else {
3076 InvokeRuntimeCallingConvention calling_convention;
3077
3078 if (Primitive::IsFloatingPointType(input_type)) {
3079 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
3080 } else {
3081 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3082 }
3083
3084 locations->SetOut(calling_convention.GetReturnLocation(result_type));
3085 }
3086 }
3087
VisitTypeConversion(HTypeConversion * conversion)3088 void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3089 LocationSummary* locations = conversion->GetLocations();
3090 Primitive::Type result_type = conversion->GetResultType();
3091 Primitive::Type input_type = conversion->GetInputType();
3092
3093 DCHECK_NE(input_type, result_type);
3094
3095 if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
3096 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3097 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3098
3099 switch (result_type) {
3100 case Primitive::kPrimChar:
3101 __ Andi(dst, src, 0xFFFF);
3102 break;
3103 case Primitive::kPrimByte:
3104 // long is never converted into types narrower than int directly,
3105 // so SEB and SEH can be used without ever causing unpredictable results
3106 // on 64-bit inputs
3107 DCHECK(input_type != Primitive::kPrimLong);
3108 __ Seb(dst, src);
3109 break;
3110 case Primitive::kPrimShort:
3111 // long is never converted into types narrower than int directly,
3112 // so SEB and SEH can be used without ever causing unpredictable results
3113 // on 64-bit inputs
3114 DCHECK(input_type != Primitive::kPrimLong);
3115 __ Seh(dst, src);
3116 break;
3117 case Primitive::kPrimInt:
3118 case Primitive::kPrimLong:
3119 // Sign-extend 32-bit int into bits 32 through 63 for
3120 // int-to-long and long-to-int conversions
3121 __ Sll(dst, src, 0);
3122 break;
3123
3124 default:
3125 LOG(FATAL) << "Unexpected type conversion from " << input_type
3126 << " to " << result_type;
3127 }
3128 } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
3129 if (input_type != Primitive::kPrimLong) {
3130 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3131 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3132 __ Mtc1(src, FTMP);
3133 if (result_type == Primitive::kPrimFloat) {
3134 __ Cvtsw(dst, FTMP);
3135 } else {
3136 __ Cvtdw(dst, FTMP);
3137 }
3138 } else {
3139 int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
3140 : QUICK_ENTRY_POINT(pL2d);
3141 codegen_->InvokeRuntime(entry_offset,
3142 conversion,
3143 conversion->GetDexPc(),
3144 nullptr);
3145 }
3146 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
3147 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
3148 int32_t entry_offset;
3149 if (result_type != Primitive::kPrimLong) {
3150 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
3151 : QUICK_ENTRY_POINT(pD2iz);
3152 } else {
3153 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
3154 : QUICK_ENTRY_POINT(pD2l);
3155 }
3156 codegen_->InvokeRuntime(entry_offset,
3157 conversion,
3158 conversion->GetDexPc(),
3159 nullptr);
3160 } else if (Primitive::IsFloatingPointType(result_type) &&
3161 Primitive::IsFloatingPointType(input_type)) {
3162 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3163 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
3164 if (result_type == Primitive::kPrimFloat) {
3165 __ Cvtsd(dst, src);
3166 } else {
3167 __ Cvtds(dst, src);
3168 }
3169 } else {
3170 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
3171 << " to " << result_type;
3172 }
3173 }
3174
VisitUShr(HUShr * ushr)3175 void LocationsBuilderMIPS64::VisitUShr(HUShr* ushr) {
3176 HandleShift(ushr);
3177 }
3178
VisitUShr(HUShr * ushr)3179 void InstructionCodeGeneratorMIPS64::VisitUShr(HUShr* ushr) {
3180 HandleShift(ushr);
3181 }
3182
VisitXor(HXor * instruction)3183 void LocationsBuilderMIPS64::VisitXor(HXor* instruction) {
3184 HandleBinaryOp(instruction);
3185 }
3186
VisitXor(HXor * instruction)3187 void InstructionCodeGeneratorMIPS64::VisitXor(HXor* instruction) {
3188 HandleBinaryOp(instruction);
3189 }
3190
VisitBoundType(HBoundType * instruction ATTRIBUTE_UNUSED)3191 void LocationsBuilderMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3192 // Nothing to do, this should be removed during prepare for register allocator.
3193 LOG(FATAL) << "Unreachable";
3194 }
3195
VisitBoundType(HBoundType * instruction ATTRIBUTE_UNUSED)3196 void InstructionCodeGeneratorMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3197 // Nothing to do, this should be removed during prepare for register allocator.
3198 LOG(FATAL) << "Unreachable";
3199 }
3200
VisitEqual(HEqual * comp)3201 void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
3202 VisitCondition(comp);
3203 }
3204
VisitEqual(HEqual * comp)3205 void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
3206 VisitCondition(comp);
3207 }
3208
VisitNotEqual(HNotEqual * comp)3209 void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
3210 VisitCondition(comp);
3211 }
3212
VisitNotEqual(HNotEqual * comp)3213 void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
3214 VisitCondition(comp);
3215 }
3216
VisitLessThan(HLessThan * comp)3217 void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
3218 VisitCondition(comp);
3219 }
3220
VisitLessThan(HLessThan * comp)3221 void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
3222 VisitCondition(comp);
3223 }
3224
VisitLessThanOrEqual(HLessThanOrEqual * comp)3225 void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3226 VisitCondition(comp);
3227 }
3228
VisitLessThanOrEqual(HLessThanOrEqual * comp)3229 void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3230 VisitCondition(comp);
3231 }
3232
VisitGreaterThan(HGreaterThan * comp)3233 void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3234 VisitCondition(comp);
3235 }
3236
VisitGreaterThan(HGreaterThan * comp)3237 void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3238 VisitCondition(comp);
3239 }
3240
VisitGreaterThanOrEqual(HGreaterThanOrEqual * comp)3241 void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3242 VisitCondition(comp);
3243 }
3244
VisitGreaterThanOrEqual(HGreaterThanOrEqual * comp)3245 void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3246 VisitCondition(comp);
3247 }
3248
3249 } // namespace mips64
3250 } // namespace art
3251