1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "code_generator_mips64.h"
18 
19 #include "art_method.h"
20 #include "code_generator_utils.h"
21 #include "compiled_method.h"
22 #include "entrypoints/quick/quick_entrypoints.h"
23 #include "entrypoints/quick/quick_entrypoints_enum.h"
24 #include "gc/accounting/card_table.h"
25 #include "intrinsics.h"
26 #include "intrinsics_mips64.h"
27 #include "mirror/array-inl.h"
28 #include "mirror/class-inl.h"
29 #include "offsets.h"
30 #include "thread.h"
31 #include "utils/assembler.h"
32 #include "utils/mips64/assembler_mips64.h"
33 #include "utils/stack_checks.h"
34 
35 namespace art {
36 namespace mips64 {
37 
38 static constexpr int kCurrentMethodStackOffset = 0;
39 static constexpr GpuRegister kMethodRegisterArgument = A0;
40 
Mips64ReturnLocation(Primitive::Type return_type)41 Location Mips64ReturnLocation(Primitive::Type return_type) {
42   switch (return_type) {
43     case Primitive::kPrimBoolean:
44     case Primitive::kPrimByte:
45     case Primitive::kPrimChar:
46     case Primitive::kPrimShort:
47     case Primitive::kPrimInt:
48     case Primitive::kPrimNot:
49     case Primitive::kPrimLong:
50       return Location::RegisterLocation(V0);
51 
52     case Primitive::kPrimFloat:
53     case Primitive::kPrimDouble:
54       return Location::FpuRegisterLocation(F0);
55 
56     case Primitive::kPrimVoid:
57       return Location();
58   }
59   UNREACHABLE();
60 }
61 
GetReturnLocation(Primitive::Type type) const62 Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(Primitive::Type type) const {
63   return Mips64ReturnLocation(type);
64 }
65 
GetMethodLocation() const66 Location InvokeDexCallingConventionVisitorMIPS64::GetMethodLocation() const {
67   return Location::RegisterLocation(kMethodRegisterArgument);
68 }
69 
GetNextLocation(Primitive::Type type)70 Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Type type) {
71   Location next_location;
72   if (type == Primitive::kPrimVoid) {
73     LOG(FATAL) << "Unexpected parameter type " << type;
74   }
75 
76   if (Primitive::IsFloatingPointType(type) &&
77       (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
78     next_location = Location::FpuRegisterLocation(
79         calling_convention.GetFpuRegisterAt(float_index_++));
80     gp_index_++;
81   } else if (!Primitive::IsFloatingPointType(type) &&
82              (gp_index_ < calling_convention.GetNumberOfRegisters())) {
83     next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++));
84     float_index_++;
85   } else {
86     size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
87     next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
88                                                  : Location::StackSlot(stack_offset);
89   }
90 
91   // Space on the stack is reserved for all arguments.
92   stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
93 
94   return next_location;
95 }
96 
GetReturnLocation(Primitive::Type type)97 Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
98   return Mips64ReturnLocation(type);
99 }
100 
101 // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
102 #define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->  // NOLINT
103 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, x).Int32Value()
104 
105 class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
106  public:
BoundsCheckSlowPathMIPS64(HBoundsCheck * instruction)107   explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : SlowPathCodeMIPS64(instruction) {}
108 
EmitNativeCode(CodeGenerator * codegen)109   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
110     LocationSummary* locations = instruction_->GetLocations();
111     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
112     __ Bind(GetEntryLabel());
113     if (instruction_->CanThrowIntoCatchBlock()) {
114       // Live registers will be restored in the catch block if caught.
115       SaveLiveRegisters(codegen, instruction_->GetLocations());
116     }
117     // We're moving two locations to locations that could overlap, so we need a parallel
118     // move resolver.
119     InvokeRuntimeCallingConvention calling_convention;
120     codegen->EmitParallelMoves(locations->InAt(0),
121                                Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
122                                Primitive::kPrimInt,
123                                locations->InAt(1),
124                                Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
125                                Primitive::kPrimInt);
126     QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt()
127         ? kQuickThrowStringBounds
128         : kQuickThrowArrayBounds;
129     mips64_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this);
130     CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
131     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
132   }
133 
IsFatal() const134   bool IsFatal() const OVERRIDE { return true; }
135 
GetDescription() const136   const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
137 
138  private:
139   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
140 };
141 
142 class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
143  public:
DivZeroCheckSlowPathMIPS64(HDivZeroCheck * instruction)144   explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : SlowPathCodeMIPS64(instruction) {}
145 
EmitNativeCode(CodeGenerator * codegen)146   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
147     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
148     __ Bind(GetEntryLabel());
149     mips64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
150     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
151   }
152 
IsFatal() const153   bool IsFatal() const OVERRIDE { return true; }
154 
GetDescription() const155   const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
156 
157  private:
158   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
159 };
160 
161 class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
162  public:
LoadClassSlowPathMIPS64(HLoadClass * cls,HInstruction * at,uint32_t dex_pc,bool do_clinit)163   LoadClassSlowPathMIPS64(HLoadClass* cls,
164                           HInstruction* at,
165                           uint32_t dex_pc,
166                           bool do_clinit)
167       : SlowPathCodeMIPS64(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
168     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
169   }
170 
EmitNativeCode(CodeGenerator * codegen)171   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
172     LocationSummary* locations = instruction_->GetLocations();
173     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
174 
175     __ Bind(GetEntryLabel());
176     SaveLiveRegisters(codegen, locations);
177 
178     InvokeRuntimeCallingConvention calling_convention;
179     dex::TypeIndex type_index = cls_->GetTypeIndex();
180     __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
181     QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
182                                                 : kQuickInitializeType;
183     mips64_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
184     if (do_clinit_) {
185       CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
186     } else {
187       CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
188     }
189 
190     // Move the class to the desired location.
191     Location out = locations->Out();
192     if (out.IsValid()) {
193       DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
194       Primitive::Type type = instruction_->GetType();
195       mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
196     }
197 
198     RestoreLiveRegisters(codegen, locations);
199     // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
200     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
201     if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
202       DCHECK(out.IsValid());
203       // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
204       // kSaveEverything and use a temporary for the .bss entry address in the fast path,
205       // so that we can avoid another calculation here.
206       DCHECK_NE(out.AsRegister<GpuRegister>(), AT);
207       CodeGeneratorMIPS64::PcRelativePatchInfo* info =
208           mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
209       mips64_codegen->EmitPcRelativeAddressPlaceholderHigh(info, AT);
210       __ Sw(out.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
211     }
212     __ Bc(GetExitLabel());
213   }
214 
GetDescription() const215   const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
216 
217  private:
218   // The class this slow path will load.
219   HLoadClass* const cls_;
220 
221   // The dex PC of `at_`.
222   const uint32_t dex_pc_;
223 
224   // Whether to initialize the class.
225   const bool do_clinit_;
226 
227   DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
228 };
229 
230 class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
231  public:
LoadStringSlowPathMIPS64(HLoadString * instruction)232   explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : SlowPathCodeMIPS64(instruction) {}
233 
EmitNativeCode(CodeGenerator * codegen)234   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
235     LocationSummary* locations = instruction_->GetLocations();
236     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
237     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
238 
239     __ Bind(GetEntryLabel());
240     SaveLiveRegisters(codegen, locations);
241 
242     InvokeRuntimeCallingConvention calling_convention;
243     HLoadString* load = instruction_->AsLoadString();
244     const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
245     __ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
246     mips64_codegen->InvokeRuntime(kQuickResolveString,
247                                   instruction_,
248                                   instruction_->GetDexPc(),
249                                   this);
250     CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
251     Primitive::Type type = instruction_->GetType();
252     mips64_codegen->MoveLocation(locations->Out(),
253                                  calling_convention.GetReturnLocation(type),
254                                  type);
255 
256     RestoreLiveRegisters(codegen, locations);
257 
258     // Store the resolved String to the BSS entry.
259     // TODO: Change art_quick_resolve_string to kSaveEverything and use a temporary for the
260     // .bss entry address in the fast path, so that we can avoid another calculation here.
261     GpuRegister out = locations->Out().AsRegister<GpuRegister>();
262     DCHECK_NE(out, AT);
263     CodeGeneratorMIPS64::PcRelativePatchInfo* info =
264         mips64_codegen->NewPcRelativeStringPatch(load->GetDexFile(), string_index);
265     mips64_codegen->EmitPcRelativeAddressPlaceholderHigh(info, AT);
266     __ Sw(out, AT, /* placeholder */ 0x5678);
267 
268     __ Bc(GetExitLabel());
269   }
270 
GetDescription() const271   const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
272 
273  private:
274   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
275 };
276 
277 class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
278  public:
NullCheckSlowPathMIPS64(HNullCheck * instr)279   explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : SlowPathCodeMIPS64(instr) {}
280 
EmitNativeCode(CodeGenerator * codegen)281   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
282     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
283     __ Bind(GetEntryLabel());
284     if (instruction_->CanThrowIntoCatchBlock()) {
285       // Live registers will be restored in the catch block if caught.
286       SaveLiveRegisters(codegen, instruction_->GetLocations());
287     }
288     mips64_codegen->InvokeRuntime(kQuickThrowNullPointer,
289                                   instruction_,
290                                   instruction_->GetDexPc(),
291                                   this);
292     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
293   }
294 
IsFatal() const295   bool IsFatal() const OVERRIDE { return true; }
296 
GetDescription() const297   const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
298 
299  private:
300   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
301 };
302 
303 class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
304  public:
SuspendCheckSlowPathMIPS64(HSuspendCheck * instruction,HBasicBlock * successor)305   SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
306       : SlowPathCodeMIPS64(instruction), successor_(successor) {}
307 
EmitNativeCode(CodeGenerator * codegen)308   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
309     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
310     __ Bind(GetEntryLabel());
311     mips64_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
312     CheckEntrypointTypes<kQuickTestSuspend, void, void>();
313     if (successor_ == nullptr) {
314       __ Bc(GetReturnLabel());
315     } else {
316       __ Bc(mips64_codegen->GetLabelOf(successor_));
317     }
318   }
319 
GetReturnLabel()320   Mips64Label* GetReturnLabel() {
321     DCHECK(successor_ == nullptr);
322     return &return_label_;
323   }
324 
GetDescription() const325   const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
326 
327  private:
328   // If not null, the block to branch to after the suspend check.
329   HBasicBlock* const successor_;
330 
331   // If `successor_` is null, the label to branch to after the suspend check.
332   Mips64Label return_label_;
333 
334   DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS64);
335 };
336 
337 class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
338  public:
TypeCheckSlowPathMIPS64(HInstruction * instruction,bool is_fatal)339   explicit TypeCheckSlowPathMIPS64(HInstruction* instruction, bool is_fatal)
340       : SlowPathCodeMIPS64(instruction), is_fatal_(is_fatal) {}
341 
EmitNativeCode(CodeGenerator * codegen)342   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
343     LocationSummary* locations = instruction_->GetLocations();
344 
345     uint32_t dex_pc = instruction_->GetDexPc();
346     DCHECK(instruction_->IsCheckCast()
347            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
348     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
349 
350     __ Bind(GetEntryLabel());
351     if (!is_fatal_) {
352       SaveLiveRegisters(codegen, locations);
353     }
354 
355     // We're moving two locations to locations that could overlap, so we need a parallel
356     // move resolver.
357     InvokeRuntimeCallingConvention calling_convention;
358     codegen->EmitParallelMoves(locations->InAt(0),
359                                Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
360                                Primitive::kPrimNot,
361                                locations->InAt(1),
362                                Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
363                                Primitive::kPrimNot);
364     if (instruction_->IsInstanceOf()) {
365       mips64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
366       CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
367       Primitive::Type ret_type = instruction_->GetType();
368       Location ret_loc = calling_convention.GetReturnLocation(ret_type);
369       mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
370     } else {
371       DCHECK(instruction_->IsCheckCast());
372       mips64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
373       CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
374     }
375 
376     if (!is_fatal_) {
377       RestoreLiveRegisters(codegen, locations);
378       __ Bc(GetExitLabel());
379     }
380   }
381 
GetDescription() const382   const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
383 
IsFatal() const384   bool IsFatal() const OVERRIDE { return is_fatal_; }
385 
386  private:
387   const bool is_fatal_;
388 
389   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
390 };
391 
392 class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
393  public:
DeoptimizationSlowPathMIPS64(HDeoptimize * instruction)394   explicit DeoptimizationSlowPathMIPS64(HDeoptimize* instruction)
395     : SlowPathCodeMIPS64(instruction) {}
396 
EmitNativeCode(CodeGenerator * codegen)397   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
398     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
399     __ Bind(GetEntryLabel());
400       LocationSummary* locations = instruction_->GetLocations();
401     SaveLiveRegisters(codegen, locations);
402     InvokeRuntimeCallingConvention calling_convention;
403     __ LoadConst32(calling_convention.GetRegisterAt(0),
404                    static_cast<uint32_t>(instruction_->AsDeoptimize()->GetDeoptimizationKind()));
405     mips64_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
406     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
407   }
408 
GetDescription() const409   const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
410 
411  private:
412   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
413 };
414 
415 class ArraySetSlowPathMIPS64 : public SlowPathCodeMIPS64 {
416  public:
ArraySetSlowPathMIPS64(HInstruction * instruction)417   explicit ArraySetSlowPathMIPS64(HInstruction* instruction) : SlowPathCodeMIPS64(instruction) {}
418 
EmitNativeCode(CodeGenerator * codegen)419   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
420     LocationSummary* locations = instruction_->GetLocations();
421     __ Bind(GetEntryLabel());
422     SaveLiveRegisters(codegen, locations);
423 
424     InvokeRuntimeCallingConvention calling_convention;
425     HParallelMove parallel_move(codegen->GetGraph()->GetArena());
426     parallel_move.AddMove(
427         locations->InAt(0),
428         Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
429         Primitive::kPrimNot,
430         nullptr);
431     parallel_move.AddMove(
432         locations->InAt(1),
433         Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
434         Primitive::kPrimInt,
435         nullptr);
436     parallel_move.AddMove(
437         locations->InAt(2),
438         Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
439         Primitive::kPrimNot,
440         nullptr);
441     codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
442 
443     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
444     mips64_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this);
445     CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
446     RestoreLiveRegisters(codegen, locations);
447     __ Bc(GetExitLabel());
448   }
449 
GetDescription() const450   const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS64"; }
451 
452  private:
453   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS64);
454 };
455 
456 // Slow path marking an object reference `ref` during a read
457 // barrier. The field `obj.field` in the object `obj` holding this
458 // reference does not get updated by this slow path after marking (see
459 // ReadBarrierMarkAndUpdateFieldSlowPathMIPS64 below for that).
460 //
461 // This means that after the execution of this slow path, `ref` will
462 // always be up-to-date, but `obj.field` may not; i.e., after the
463 // flip, `ref` will be a to-space reference, but `obj.field` will
464 // probably still be a from-space reference (unless it gets updated by
465 // another thread, or if another thread installed another object
466 // reference (different from `ref`) in `obj.field`).
467 //
468 // If `entrypoint` is a valid location it is assumed to already be
469 // holding the entrypoint. The case where the entrypoint is passed in
470 // is for the GcRoot read barrier.
471 class ReadBarrierMarkSlowPathMIPS64 : public SlowPathCodeMIPS64 {
472  public:
ReadBarrierMarkSlowPathMIPS64(HInstruction * instruction,Location ref,Location entrypoint=Location::NoLocation ())473   ReadBarrierMarkSlowPathMIPS64(HInstruction* instruction,
474                                 Location ref,
475                                 Location entrypoint = Location::NoLocation())
476       : SlowPathCodeMIPS64(instruction), ref_(ref), entrypoint_(entrypoint) {
477     DCHECK(kEmitCompilerReadBarrier);
478   }
479 
GetDescription() const480   const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; }
481 
EmitNativeCode(CodeGenerator * codegen)482   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
483     LocationSummary* locations = instruction_->GetLocations();
484     GpuRegister ref_reg = ref_.AsRegister<GpuRegister>();
485     DCHECK(locations->CanCall());
486     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
487     DCHECK(instruction_->IsInstanceFieldGet() ||
488            instruction_->IsStaticFieldGet() ||
489            instruction_->IsArrayGet() ||
490            instruction_->IsArraySet() ||
491            instruction_->IsLoadClass() ||
492            instruction_->IsLoadString() ||
493            instruction_->IsInstanceOf() ||
494            instruction_->IsCheckCast() ||
495            (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
496            (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
497         << "Unexpected instruction in read barrier marking slow path: "
498         << instruction_->DebugName();
499 
500     __ Bind(GetEntryLabel());
501     // No need to save live registers; it's taken care of by the
502     // entrypoint. Also, there is no need to update the stack mask,
503     // as this runtime call will not trigger a garbage collection.
504     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
505     DCHECK((V0 <= ref_reg && ref_reg <= T2) ||
506            (S2 <= ref_reg && ref_reg <= S7) ||
507            (ref_reg == S8)) << ref_reg;
508     // "Compact" slow path, saving two moves.
509     //
510     // Instead of using the standard runtime calling convention (input
511     // and output in A0 and V0 respectively):
512     //
513     //   A0 <- ref
514     //   V0 <- ReadBarrierMark(A0)
515     //   ref <- V0
516     //
517     // we just use rX (the register containing `ref`) as input and output
518     // of a dedicated entrypoint:
519     //
520     //   rX <- ReadBarrierMarkRegX(rX)
521     //
522     if (entrypoint_.IsValid()) {
523       mips64_codegen->ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction_, this);
524       DCHECK_EQ(entrypoint_.AsRegister<GpuRegister>(), T9);
525       __ Jalr(entrypoint_.AsRegister<GpuRegister>());
526       __ Nop();
527     } else {
528       int32_t entry_point_offset =
529           CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(ref_reg - 1);
530       // This runtime call does not require a stack map.
531       mips64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
532                                                           instruction_,
533                                                           this);
534     }
535     __ Bc(GetExitLabel());
536   }
537 
538  private:
539   // The location (register) of the marked object reference.
540   const Location ref_;
541 
542   // The location of the entrypoint if already loaded.
543   const Location entrypoint_;
544 
545   DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathMIPS64);
546 };
547 
548 // Slow path marking an object reference `ref` during a read barrier,
549 // and if needed, atomically updating the field `obj.field` in the
550 // object `obj` holding this reference after marking (contrary to
551 // ReadBarrierMarkSlowPathMIPS64 above, which never tries to update
552 // `obj.field`).
553 //
554 // This means that after the execution of this slow path, both `ref`
555 // and `obj.field` will be up-to-date; i.e., after the flip, both will
556 // hold the same to-space reference (unless another thread installed
557 // another object reference (different from `ref`) in `obj.field`).
558 class ReadBarrierMarkAndUpdateFieldSlowPathMIPS64 : public SlowPathCodeMIPS64 {
559  public:
ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(HInstruction * instruction,Location ref,GpuRegister obj,Location field_offset,GpuRegister temp1)560   ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(HInstruction* instruction,
561                                               Location ref,
562                                               GpuRegister obj,
563                                               Location field_offset,
564                                               GpuRegister temp1)
565       : SlowPathCodeMIPS64(instruction),
566         ref_(ref),
567         obj_(obj),
568         field_offset_(field_offset),
569         temp1_(temp1) {
570     DCHECK(kEmitCompilerReadBarrier);
571   }
572 
GetDescription() const573   const char* GetDescription() const OVERRIDE {
574     return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS64";
575   }
576 
EmitNativeCode(CodeGenerator * codegen)577   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
578     LocationSummary* locations = instruction_->GetLocations();
579     GpuRegister ref_reg = ref_.AsRegister<GpuRegister>();
580     DCHECK(locations->CanCall());
581     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
582     // This slow path is only used by the UnsafeCASObject intrinsic.
583     DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
584         << "Unexpected instruction in read barrier marking and field updating slow path: "
585         << instruction_->DebugName();
586     DCHECK(instruction_->GetLocations()->Intrinsified());
587     DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
588     DCHECK(field_offset_.IsRegister()) << field_offset_;
589 
590     __ Bind(GetEntryLabel());
591 
592     // Save the old reference.
593     // Note that we cannot use AT or TMP to save the old reference, as those
594     // are used by the code that follows, but we need the old reference after
595     // the call to the ReadBarrierMarkRegX entry point.
596     DCHECK_NE(temp1_, AT);
597     DCHECK_NE(temp1_, TMP);
598     __ Move(temp1_, ref_reg);
599 
600     // No need to save live registers; it's taken care of by the
601     // entrypoint. Also, there is no need to update the stack mask,
602     // as this runtime call will not trigger a garbage collection.
603     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
604     DCHECK((V0 <= ref_reg && ref_reg <= T2) ||
605            (S2 <= ref_reg && ref_reg <= S7) ||
606            (ref_reg == S8)) << ref_reg;
607     // "Compact" slow path, saving two moves.
608     //
609     // Instead of using the standard runtime calling convention (input
610     // and output in A0 and V0 respectively):
611     //
612     //   A0 <- ref
613     //   V0 <- ReadBarrierMark(A0)
614     //   ref <- V0
615     //
616     // we just use rX (the register containing `ref`) as input and output
617     // of a dedicated entrypoint:
618     //
619     //   rX <- ReadBarrierMarkRegX(rX)
620     //
621     int32_t entry_point_offset =
622         CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(ref_reg - 1);
623     // This runtime call does not require a stack map.
624     mips64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
625                                                         instruction_,
626                                                         this);
627 
628     // If the new reference is different from the old reference,
629     // update the field in the holder (`*(obj_ + field_offset_)`).
630     //
631     // Note that this field could also hold a different object, if
632     // another thread had concurrently changed it. In that case, the
633     // the compare-and-set (CAS) loop below would abort, leaving the
634     // field as-is.
635     Mips64Label done;
636     __ Beqc(temp1_, ref_reg, &done);
637 
638     // Update the the holder's field atomically.  This may fail if
639     // mutator updates before us, but it's OK.  This is achieved
640     // using a strong compare-and-set (CAS) operation with relaxed
641     // memory synchronization ordering, where the expected value is
642     // the old reference and the desired value is the new reference.
643 
644     // Convenience aliases.
645     GpuRegister base = obj_;
646     GpuRegister offset = field_offset_.AsRegister<GpuRegister>();
647     GpuRegister expected = temp1_;
648     GpuRegister value = ref_reg;
649     GpuRegister tmp_ptr = TMP;      // Pointer to actual memory.
650     GpuRegister tmp = AT;           // Value in memory.
651 
652     __ Daddu(tmp_ptr, base, offset);
653 
654     if (kPoisonHeapReferences) {
655       __ PoisonHeapReference(expected);
656       // Do not poison `value` if it is the same register as
657       // `expected`, which has just been poisoned.
658       if (value != expected) {
659         __ PoisonHeapReference(value);
660       }
661     }
662 
663     // do {
664     //   tmp = [r_ptr] - expected;
665     // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
666 
667     Mips64Label loop_head, exit_loop;
668     __ Bind(&loop_head);
669     __ Ll(tmp, tmp_ptr);
670     // The LL instruction sign-extends the 32-bit value, but
671     // 32-bit references must be zero-extended. Zero-extend `tmp`.
672     __ Dext(tmp, tmp, 0, 32);
673     __ Bnec(tmp, expected, &exit_loop);
674     __ Move(tmp, value);
675     __ Sc(tmp, tmp_ptr);
676     __ Beqzc(tmp, &loop_head);
677     __ Bind(&exit_loop);
678 
679     if (kPoisonHeapReferences) {
680       __ UnpoisonHeapReference(expected);
681       // Do not unpoison `value` if it is the same register as
682       // `expected`, which has just been unpoisoned.
683       if (value != expected) {
684         __ UnpoisonHeapReference(value);
685       }
686     }
687 
688     __ Bind(&done);
689     __ Bc(GetExitLabel());
690   }
691 
692  private:
693   // The location (register) of the marked object reference.
694   const Location ref_;
695   // The register containing the object holding the marked object reference field.
696   const GpuRegister obj_;
697   // The location of the offset of the marked reference field within `obj_`.
698   Location field_offset_;
699 
700   const GpuRegister temp1_;
701 
702   DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathMIPS64);
703 };
704 
705 // Slow path generating a read barrier for a heap reference.
706 class ReadBarrierForHeapReferenceSlowPathMIPS64 : public SlowPathCodeMIPS64 {
707  public:
ReadBarrierForHeapReferenceSlowPathMIPS64(HInstruction * instruction,Location out,Location ref,Location obj,uint32_t offset,Location index)708   ReadBarrierForHeapReferenceSlowPathMIPS64(HInstruction* instruction,
709                                             Location out,
710                                             Location ref,
711                                             Location obj,
712                                             uint32_t offset,
713                                             Location index)
714       : SlowPathCodeMIPS64(instruction),
715         out_(out),
716         ref_(ref),
717         obj_(obj),
718         offset_(offset),
719         index_(index) {
720     DCHECK(kEmitCompilerReadBarrier);
721     // If `obj` is equal to `out` or `ref`, it means the initial object
722     // has been overwritten by (or after) the heap object reference load
723     // to be instrumented, e.g.:
724     //
725     //   __ LoadFromOffset(kLoadWord, out, out, offset);
726     //   codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset);
727     //
728     // In that case, we have lost the information about the original
729     // object, and the emitted read barrier cannot work properly.
730     DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out;
731     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
732   }
733 
EmitNativeCode(CodeGenerator * codegen)734   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
735     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
736     LocationSummary* locations = instruction_->GetLocations();
737     Primitive::Type type = Primitive::kPrimNot;
738     GpuRegister reg_out = out_.AsRegister<GpuRegister>();
739     DCHECK(locations->CanCall());
740     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
741     DCHECK(instruction_->IsInstanceFieldGet() ||
742            instruction_->IsStaticFieldGet() ||
743            instruction_->IsArrayGet() ||
744            instruction_->IsInstanceOf() ||
745            instruction_->IsCheckCast() ||
746            (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
747         << "Unexpected instruction in read barrier for heap reference slow path: "
748         << instruction_->DebugName();
749 
750     __ Bind(GetEntryLabel());
751     SaveLiveRegisters(codegen, locations);
752 
753     // We may have to change the index's value, but as `index_` is a
754     // constant member (like other "inputs" of this slow path),
755     // introduce a copy of it, `index`.
756     Location index = index_;
757     if (index_.IsValid()) {
758       // Handle `index_` for HArrayGet and UnsafeGetObject/UnsafeGetObjectVolatile intrinsics.
759       if (instruction_->IsArrayGet()) {
760         // Compute the actual memory offset and store it in `index`.
761         GpuRegister index_reg = index_.AsRegister<GpuRegister>();
762         DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg));
763         if (codegen->IsCoreCalleeSaveRegister(index_reg)) {
764           // We are about to change the value of `index_reg` (see the
765           // calls to art::mips64::Mips64Assembler::Sll and
766           // art::mips64::MipsAssembler::Addiu32 below), but it has
767           // not been saved by the previous call to
768           // art::SlowPathCode::SaveLiveRegisters, as it is a
769           // callee-save register --
770           // art::SlowPathCode::SaveLiveRegisters does not consider
771           // callee-save registers, as it has been designed with the
772           // assumption that callee-save registers are supposed to be
773           // handled by the called function.  So, as a callee-save
774           // register, `index_reg` _would_ eventually be saved onto
775           // the stack, but it would be too late: we would have
776           // changed its value earlier.  Therefore, we manually save
777           // it here into another freely available register,
778           // `free_reg`, chosen of course among the caller-save
779           // registers (as a callee-save `free_reg` register would
780           // exhibit the same problem).
781           //
782           // Note we could have requested a temporary register from
783           // the register allocator instead; but we prefer not to, as
784           // this is a slow path, and we know we can find a
785           // caller-save register that is available.
786           GpuRegister free_reg = FindAvailableCallerSaveRegister(codegen);
787           __ Move(free_reg, index_reg);
788           index_reg = free_reg;
789           index = Location::RegisterLocation(index_reg);
790         } else {
791           // The initial register stored in `index_` has already been
792           // saved in the call to art::SlowPathCode::SaveLiveRegisters
793           // (as it is not a callee-save register), so we can freely
794           // use it.
795         }
796         // Shifting the index value contained in `index_reg` by the scale
797         // factor (2) cannot overflow in practice, as the runtime is
798         // unable to allocate object arrays with a size larger than
799         // 2^26 - 1 (that is, 2^28 - 4 bytes).
800         __ Sll(index_reg, index_reg, TIMES_4);
801         static_assert(
802             sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
803             "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
804         __ Addiu32(index_reg, index_reg, offset_);
805       } else {
806         // In the case of the UnsafeGetObject/UnsafeGetObjectVolatile
807         // intrinsics, `index_` is not shifted by a scale factor of 2
808         // (as in the case of ArrayGet), as it is actually an offset
809         // to an object field within an object.
810         DCHECK(instruction_->IsInvoke()) << instruction_->DebugName();
811         DCHECK(instruction_->GetLocations()->Intrinsified());
812         DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
813                (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
814             << instruction_->AsInvoke()->GetIntrinsic();
815         DCHECK_EQ(offset_, 0U);
816         DCHECK(index_.IsRegister());
817       }
818     }
819 
820     // We're moving two or three locations to locations that could
821     // overlap, so we need a parallel move resolver.
822     InvokeRuntimeCallingConvention calling_convention;
823     HParallelMove parallel_move(codegen->GetGraph()->GetArena());
824     parallel_move.AddMove(ref_,
825                           Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
826                           Primitive::kPrimNot,
827                           nullptr);
828     parallel_move.AddMove(obj_,
829                           Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
830                           Primitive::kPrimNot,
831                           nullptr);
832     if (index.IsValid()) {
833       parallel_move.AddMove(index,
834                             Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
835                             Primitive::kPrimInt,
836                             nullptr);
837       codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
838     } else {
839       codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
840       __ LoadConst32(calling_convention.GetRegisterAt(2), offset_);
841     }
842     mips64_codegen->InvokeRuntime(kQuickReadBarrierSlow,
843                                   instruction_,
844                                   instruction_->GetDexPc(),
845                                   this);
846     CheckEntrypointTypes<
847         kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
848     mips64_codegen->MoveLocation(out_, calling_convention.GetReturnLocation(type), type);
849 
850     RestoreLiveRegisters(codegen, locations);
851     __ Bc(GetExitLabel());
852   }
853 
GetDescription() const854   const char* GetDescription() const OVERRIDE {
855     return "ReadBarrierForHeapReferenceSlowPathMIPS64";
856   }
857 
858  private:
FindAvailableCallerSaveRegister(CodeGenerator * codegen)859   GpuRegister FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
860     size_t ref = static_cast<int>(ref_.AsRegister<GpuRegister>());
861     size_t obj = static_cast<int>(obj_.AsRegister<GpuRegister>());
862     for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
863       if (i != ref &&
864           i != obj &&
865           !codegen->IsCoreCalleeSaveRegister(i) &&
866           !codegen->IsBlockedCoreRegister(i)) {
867         return static_cast<GpuRegister>(i);
868       }
869     }
870     // We shall never fail to find a free caller-save register, as
871     // there are more than two core caller-save registers on MIPS64
872     // (meaning it is possible to find one which is different from
873     // `ref` and `obj`).
874     DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u);
875     LOG(FATAL) << "Could not find a free caller-save register";
876     UNREACHABLE();
877   }
878 
879   const Location out_;
880   const Location ref_;
881   const Location obj_;
882   const uint32_t offset_;
883   // An additional location containing an index to an array.
884   // Only used for HArrayGet and the UnsafeGetObject &
885   // UnsafeGetObjectVolatile intrinsics.
886   const Location index_;
887 
888   DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathMIPS64);
889 };
890 
891 // Slow path generating a read barrier for a GC root.
892 class ReadBarrierForRootSlowPathMIPS64 : public SlowPathCodeMIPS64 {
893  public:
ReadBarrierForRootSlowPathMIPS64(HInstruction * instruction,Location out,Location root)894   ReadBarrierForRootSlowPathMIPS64(HInstruction* instruction, Location out, Location root)
895       : SlowPathCodeMIPS64(instruction), out_(out), root_(root) {
896     DCHECK(kEmitCompilerReadBarrier);
897   }
898 
EmitNativeCode(CodeGenerator * codegen)899   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
900     LocationSummary* locations = instruction_->GetLocations();
901     Primitive::Type type = Primitive::kPrimNot;
902     GpuRegister reg_out = out_.AsRegister<GpuRegister>();
903     DCHECK(locations->CanCall());
904     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
905     DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
906         << "Unexpected instruction in read barrier for GC root slow path: "
907         << instruction_->DebugName();
908 
909     __ Bind(GetEntryLabel());
910     SaveLiveRegisters(codegen, locations);
911 
912     InvokeRuntimeCallingConvention calling_convention;
913     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
914     mips64_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
915                                  root_,
916                                  Primitive::kPrimNot);
917     mips64_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow,
918                                   instruction_,
919                                   instruction_->GetDexPc(),
920                                   this);
921     CheckEntrypointTypes<kQuickReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*>();
922     mips64_codegen->MoveLocation(out_, calling_convention.GetReturnLocation(type), type);
923 
924     RestoreLiveRegisters(codegen, locations);
925     __ Bc(GetExitLabel());
926   }
927 
GetDescription() const928   const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS64"; }
929 
930  private:
931   const Location out_;
932   const Location root_;
933 
934   DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathMIPS64);
935 };
936 
CodeGeneratorMIPS64(HGraph * graph,const Mips64InstructionSetFeatures & isa_features,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)937 CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
938                                          const Mips64InstructionSetFeatures& isa_features,
939                                          const CompilerOptions& compiler_options,
940                                          OptimizingCompilerStats* stats)
941     : CodeGenerator(graph,
942                     kNumberOfGpuRegisters,
943                     kNumberOfFpuRegisters,
944                     /* number_of_register_pairs */ 0,
945                     ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
946                                         arraysize(kCoreCalleeSaves)),
947                     ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
948                                         arraysize(kFpuCalleeSaves)),
949                     compiler_options,
950                     stats),
951       block_labels_(nullptr),
952       location_builder_(graph, this),
953       instruction_visitor_(graph, this),
954       move_resolver_(graph->GetArena(), this),
955       assembler_(graph->GetArena()),
956       isa_features_(isa_features),
957       uint32_literals_(std::less<uint32_t>(),
958                        graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
959       uint64_literals_(std::less<uint64_t>(),
960                        graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
961       pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
962       boot_image_string_patches_(StringReferenceValueComparator(),
963                                  graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
964       pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
965       boot_image_type_patches_(TypeReferenceValueComparator(),
966                                graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
967       pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
968       type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
969       jit_string_patches_(StringReferenceValueComparator(),
970                           graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
971       jit_class_patches_(TypeReferenceValueComparator(),
972                          graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
973   // Save RA (containing the return address) to mimic Quick.
974   AddAllocatedRegister(Location::RegisterLocation(RA));
975 }
976 
977 #undef __
978 // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
979 #define __ down_cast<Mips64Assembler*>(GetAssembler())->  // NOLINT
980 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, x).Int32Value()
981 
Finalize(CodeAllocator * allocator)982 void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
983   // Ensure that we fix up branches.
984   __ FinalizeCode();
985 
986   // Adjust native pc offsets in stack maps.
987   for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
988     uint32_t old_position =
989         stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64);
990     uint32_t new_position = __ GetAdjustedPosition(old_position);
991     DCHECK_GE(new_position, old_position);
992     stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
993   }
994 
995   // Adjust pc offsets for the disassembly information.
996   if (disasm_info_ != nullptr) {
997     GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
998     frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
999     frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
1000     for (auto& it : *disasm_info_->GetInstructionIntervals()) {
1001       it.second.start = __ GetAdjustedPosition(it.second.start);
1002       it.second.end = __ GetAdjustedPosition(it.second.end);
1003     }
1004     for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
1005       it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
1006       it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
1007     }
1008   }
1009 
1010   CodeGenerator::Finalize(allocator);
1011 }
1012 
GetAssembler() const1013 Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
1014   return codegen_->GetAssembler();
1015 }
1016 
EmitMove(size_t index)1017 void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
1018   MoveOperands* move = moves_[index];
1019   codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
1020 }
1021 
EmitSwap(size_t index)1022 void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
1023   MoveOperands* move = moves_[index];
1024   codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
1025 }
1026 
RestoreScratch(int reg)1027 void ParallelMoveResolverMIPS64::RestoreScratch(int reg) {
1028   // Pop reg
1029   __ Ld(GpuRegister(reg), SP, 0);
1030   __ DecreaseFrameSize(kMips64DoublewordSize);
1031 }
1032 
SpillScratch(int reg)1033 void ParallelMoveResolverMIPS64::SpillScratch(int reg) {
1034   // Push reg
1035   __ IncreaseFrameSize(kMips64DoublewordSize);
1036   __ Sd(GpuRegister(reg), SP, 0);
1037 }
1038 
Exchange(int index1,int index2,bool double_slot)1039 void ParallelMoveResolverMIPS64::Exchange(int index1, int index2, bool double_slot) {
1040   LoadOperandType load_type = double_slot ? kLoadDoubleword : kLoadWord;
1041   StoreOperandType store_type = double_slot ? kStoreDoubleword : kStoreWord;
1042   // Allocate a scratch register other than TMP, if available.
1043   // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
1044   // automatically unspilled when the scratch scope object is destroyed).
1045   ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
1046   // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
1047   int stack_offset = ensure_scratch.IsSpilled() ? kMips64DoublewordSize : 0;
1048   __ LoadFromOffset(load_type,
1049                     GpuRegister(ensure_scratch.GetRegister()),
1050                     SP,
1051                     index1 + stack_offset);
1052   __ LoadFromOffset(load_type,
1053                     TMP,
1054                     SP,
1055                     index2 + stack_offset);
1056   __ StoreToOffset(store_type,
1057                    GpuRegister(ensure_scratch.GetRegister()),
1058                    SP,
1059                    index2 + stack_offset);
1060   __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
1061 }
1062 
DWARFReg(GpuRegister reg)1063 static dwarf::Reg DWARFReg(GpuRegister reg) {
1064   return dwarf::Reg::Mips64Core(static_cast<int>(reg));
1065 }
1066 
DWARFReg(FpuRegister reg)1067 static dwarf::Reg DWARFReg(FpuRegister reg) {
1068   return dwarf::Reg::Mips64Fp(static_cast<int>(reg));
1069 }
1070 
GenerateFrameEntry()1071 void CodeGeneratorMIPS64::GenerateFrameEntry() {
1072   __ Bind(&frame_entry_label_);
1073 
1074   bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
1075 
1076   if (do_overflow_check) {
1077     __ LoadFromOffset(kLoadWord,
1078                       ZERO,
1079                       SP,
1080                       -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
1081     RecordPcInfo(nullptr, 0);
1082   }
1083 
1084   if (HasEmptyFrame()) {
1085     return;
1086   }
1087 
1088   // Make sure the frame size isn't unreasonably large.
1089   if (GetFrameSize() > GetStackOverflowReservedBytes(kMips64)) {
1090     LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips64) << " bytes";
1091   }
1092 
1093   // Spill callee-saved registers.
1094 
1095   uint32_t ofs = GetFrameSize();
1096   __ IncreaseFrameSize(ofs);
1097 
1098   for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
1099     GpuRegister reg = kCoreCalleeSaves[i];
1100     if (allocated_registers_.ContainsCoreRegister(reg)) {
1101       ofs -= kMips64DoublewordSize;
1102       __ StoreToOffset(kStoreDoubleword, reg, SP, ofs);
1103       __ cfi().RelOffset(DWARFReg(reg), ofs);
1104     }
1105   }
1106 
1107   for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
1108     FpuRegister reg = kFpuCalleeSaves[i];
1109     if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
1110       ofs -= kMips64DoublewordSize;
1111       __ StoreFpuToOffset(kStoreDoubleword, reg, SP, ofs);
1112       __ cfi().RelOffset(DWARFReg(reg), ofs);
1113     }
1114   }
1115 
1116   // Save the current method if we need it. Note that we do not
1117   // do this in HCurrentMethod, as the instruction might have been removed
1118   // in the SSA graph.
1119   if (RequiresCurrentMethod()) {
1120     __ StoreToOffset(kStoreDoubleword, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
1121   }
1122 
1123   if (GetGraph()->HasShouldDeoptimizeFlag()) {
1124     // Initialize should_deoptimize flag to 0.
1125     __ StoreToOffset(kStoreWord, ZERO, SP, GetStackOffsetOfShouldDeoptimizeFlag());
1126   }
1127 }
1128 
GenerateFrameExit()1129 void CodeGeneratorMIPS64::GenerateFrameExit() {
1130   __ cfi().RememberState();
1131 
1132   if (!HasEmptyFrame()) {
1133     // Restore callee-saved registers.
1134 
1135     // For better instruction scheduling restore RA before other registers.
1136     uint32_t ofs = GetFrameSize();
1137     for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
1138       GpuRegister reg = kCoreCalleeSaves[i];
1139       if (allocated_registers_.ContainsCoreRegister(reg)) {
1140         ofs -= kMips64DoublewordSize;
1141         __ LoadFromOffset(kLoadDoubleword, reg, SP, ofs);
1142         __ cfi().Restore(DWARFReg(reg));
1143       }
1144     }
1145 
1146     for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
1147       FpuRegister reg = kFpuCalleeSaves[i];
1148       if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
1149         ofs -= kMips64DoublewordSize;
1150         __ LoadFpuFromOffset(kLoadDoubleword, reg, SP, ofs);
1151         __ cfi().Restore(DWARFReg(reg));
1152       }
1153     }
1154 
1155     __ DecreaseFrameSize(GetFrameSize());
1156   }
1157 
1158   __ Jic(RA, 0);
1159 
1160   __ cfi().RestoreState();
1161   __ cfi().DefCFAOffset(GetFrameSize());
1162 }
1163 
Bind(HBasicBlock * block)1164 void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
1165   __ Bind(GetLabelOf(block));
1166 }
1167 
MoveLocation(Location destination,Location source,Primitive::Type dst_type)1168 void CodeGeneratorMIPS64::MoveLocation(Location destination,
1169                                        Location source,
1170                                        Primitive::Type dst_type) {
1171   if (source.Equals(destination)) {
1172     return;
1173   }
1174 
1175   // A valid move can always be inferred from the destination and source
1176   // locations. When moving from and to a register, the argument type can be
1177   // used to generate 32bit instead of 64bit moves.
1178   bool unspecified_type = (dst_type == Primitive::kPrimVoid);
1179   DCHECK_EQ(unspecified_type, false);
1180 
1181   if (destination.IsRegister() || destination.IsFpuRegister()) {
1182     if (unspecified_type) {
1183       HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
1184       if (source.IsStackSlot() ||
1185           (src_cst != nullptr && (src_cst->IsIntConstant()
1186                                   || src_cst->IsFloatConstant()
1187                                   || src_cst->IsNullConstant()))) {
1188         // For stack slots and 32bit constants, a 64bit type is appropriate.
1189         dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
1190       } else {
1191         // If the source is a double stack slot or a 64bit constant, a 64bit
1192         // type is appropriate. Else the source is a register, and since the
1193         // type has not been specified, we chose a 64bit type to force a 64bit
1194         // move.
1195         dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
1196       }
1197     }
1198     DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
1199            (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
1200     if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
1201       // Move to GPR/FPR from stack
1202       LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
1203       if (Primitive::IsFloatingPointType(dst_type)) {
1204         __ LoadFpuFromOffset(load_type,
1205                              destination.AsFpuRegister<FpuRegister>(),
1206                              SP,
1207                              source.GetStackIndex());
1208       } else {
1209         // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
1210         __ LoadFromOffset(load_type,
1211                           destination.AsRegister<GpuRegister>(),
1212                           SP,
1213                           source.GetStackIndex());
1214       }
1215     } else if (source.IsConstant()) {
1216       // Move to GPR/FPR from constant
1217       GpuRegister gpr = AT;
1218       if (!Primitive::IsFloatingPointType(dst_type)) {
1219         gpr = destination.AsRegister<GpuRegister>();
1220       }
1221       if (dst_type == Primitive::kPrimInt || dst_type == Primitive::kPrimFloat) {
1222         int32_t value = GetInt32ValueOf(source.GetConstant()->AsConstant());
1223         if (Primitive::IsFloatingPointType(dst_type) && value == 0) {
1224           gpr = ZERO;
1225         } else {
1226           __ LoadConst32(gpr, value);
1227         }
1228       } else {
1229         int64_t value = GetInt64ValueOf(source.GetConstant()->AsConstant());
1230         if (Primitive::IsFloatingPointType(dst_type) && value == 0) {
1231           gpr = ZERO;
1232         } else {
1233           __ LoadConst64(gpr, value);
1234         }
1235       }
1236       if (dst_type == Primitive::kPrimFloat) {
1237         __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
1238       } else if (dst_type == Primitive::kPrimDouble) {
1239         __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
1240       }
1241     } else if (source.IsRegister()) {
1242       if (destination.IsRegister()) {
1243         // Move to GPR from GPR
1244         __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
1245       } else {
1246         DCHECK(destination.IsFpuRegister());
1247         if (Primitive::Is64BitType(dst_type)) {
1248           __ Dmtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
1249         } else {
1250           __ Mtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
1251         }
1252       }
1253     } else if (source.IsFpuRegister()) {
1254       if (destination.IsFpuRegister()) {
1255         // Move to FPR from FPR
1256         if (dst_type == Primitive::kPrimFloat) {
1257           __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
1258         } else {
1259           DCHECK_EQ(dst_type, Primitive::kPrimDouble);
1260           __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
1261         }
1262       } else {
1263         DCHECK(destination.IsRegister());
1264         if (Primitive::Is64BitType(dst_type)) {
1265           __ Dmfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
1266         } else {
1267           __ Mfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
1268         }
1269       }
1270     }
1271   } else {  // The destination is not a register. It must be a stack slot.
1272     DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
1273     if (source.IsRegister() || source.IsFpuRegister()) {
1274       if (unspecified_type) {
1275         if (source.IsRegister()) {
1276           dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
1277         } else {
1278           dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
1279         }
1280       }
1281       DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
1282              (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
1283       // Move to stack from GPR/FPR
1284       StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
1285       if (source.IsRegister()) {
1286         __ StoreToOffset(store_type,
1287                          source.AsRegister<GpuRegister>(),
1288                          SP,
1289                          destination.GetStackIndex());
1290       } else {
1291         __ StoreFpuToOffset(store_type,
1292                             source.AsFpuRegister<FpuRegister>(),
1293                             SP,
1294                             destination.GetStackIndex());
1295       }
1296     } else if (source.IsConstant()) {
1297       // Move to stack from constant
1298       HConstant* src_cst = source.GetConstant();
1299       StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
1300       GpuRegister gpr = ZERO;
1301       if (destination.IsStackSlot()) {
1302         int32_t value = GetInt32ValueOf(src_cst->AsConstant());
1303         if (value != 0) {
1304           gpr = TMP;
1305           __ LoadConst32(gpr, value);
1306         }
1307       } else {
1308         DCHECK(destination.IsDoubleStackSlot());
1309         int64_t value = GetInt64ValueOf(src_cst->AsConstant());
1310         if (value != 0) {
1311           gpr = TMP;
1312           __ LoadConst64(gpr, value);
1313         }
1314       }
1315       __ StoreToOffset(store_type, gpr, SP, destination.GetStackIndex());
1316     } else {
1317       DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
1318       DCHECK_EQ(source.IsDoubleStackSlot(), destination.IsDoubleStackSlot());
1319       // Move to stack from stack
1320       if (destination.IsStackSlot()) {
1321         __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
1322         __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
1323       } else {
1324         __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
1325         __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
1326       }
1327     }
1328   }
1329 }
1330 
SwapLocations(Location loc1,Location loc2,Primitive::Type type)1331 void CodeGeneratorMIPS64::SwapLocations(Location loc1, Location loc2, Primitive::Type type) {
1332   DCHECK(!loc1.IsConstant());
1333   DCHECK(!loc2.IsConstant());
1334 
1335   if (loc1.Equals(loc2)) {
1336     return;
1337   }
1338 
1339   bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
1340   bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
1341   bool is_fp_reg1 = loc1.IsFpuRegister();
1342   bool is_fp_reg2 = loc2.IsFpuRegister();
1343 
1344   if (loc2.IsRegister() && loc1.IsRegister()) {
1345     // Swap 2 GPRs
1346     GpuRegister r1 = loc1.AsRegister<GpuRegister>();
1347     GpuRegister r2 = loc2.AsRegister<GpuRegister>();
1348     __ Move(TMP, r2);
1349     __ Move(r2, r1);
1350     __ Move(r1, TMP);
1351   } else if (is_fp_reg2 && is_fp_reg1) {
1352     // Swap 2 FPRs
1353     FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>();
1354     FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>();
1355     if (type == Primitive::kPrimFloat) {
1356       __ MovS(FTMP, r1);
1357       __ MovS(r1, r2);
1358       __ MovS(r2, FTMP);
1359     } else {
1360       DCHECK_EQ(type, Primitive::kPrimDouble);
1361       __ MovD(FTMP, r1);
1362       __ MovD(r1, r2);
1363       __ MovD(r2, FTMP);
1364     }
1365   } else if (is_slot1 != is_slot2) {
1366     // Swap GPR/FPR and stack slot
1367     Location reg_loc = is_slot1 ? loc2 : loc1;
1368     Location mem_loc = is_slot1 ? loc1 : loc2;
1369     LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword;
1370     StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword;
1371     // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
1372     __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
1373     if (reg_loc.IsFpuRegister()) {
1374       __ StoreFpuToOffset(store_type,
1375                           reg_loc.AsFpuRegister<FpuRegister>(),
1376                           SP,
1377                           mem_loc.GetStackIndex());
1378       if (mem_loc.IsStackSlot()) {
1379         __ Mtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
1380       } else {
1381         DCHECK(mem_loc.IsDoubleStackSlot());
1382         __ Dmtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
1383       }
1384     } else {
1385       __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
1386       __ Move(reg_loc.AsRegister<GpuRegister>(), TMP);
1387     }
1388   } else if (is_slot1 && is_slot2) {
1389     move_resolver_.Exchange(loc1.GetStackIndex(),
1390                             loc2.GetStackIndex(),
1391                             loc1.IsDoubleStackSlot());
1392   } else {
1393     LOG(FATAL) << "Unimplemented swap between locations " << loc1 << " and " << loc2;
1394   }
1395 }
1396 
MoveConstant(Location location,int32_t value)1397 void CodeGeneratorMIPS64::MoveConstant(Location location, int32_t value) {
1398   DCHECK(location.IsRegister());
1399   __ LoadConst32(location.AsRegister<GpuRegister>(), value);
1400 }
1401 
AddLocationAsTemp(Location location,LocationSummary * locations)1402 void CodeGeneratorMIPS64::AddLocationAsTemp(Location location, LocationSummary* locations) {
1403   if (location.IsRegister()) {
1404     locations->AddTemp(location);
1405   } else {
1406     UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
1407   }
1408 }
1409 
MarkGCCard(GpuRegister object,GpuRegister value,bool value_can_be_null)1410 void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object,
1411                                      GpuRegister value,
1412                                      bool value_can_be_null) {
1413   Mips64Label done;
1414   GpuRegister card = AT;
1415   GpuRegister temp = TMP;
1416   if (value_can_be_null) {
1417     __ Beqzc(value, &done);
1418   }
1419   __ LoadFromOffset(kLoadDoubleword,
1420                     card,
1421                     TR,
1422                     Thread::CardTableOffset<kMips64PointerSize>().Int32Value());
1423   __ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
1424   __ Daddu(temp, card, temp);
1425   __ Sb(card, temp, 0);
1426   if (value_can_be_null) {
1427     __ Bind(&done);
1428   }
1429 }
1430 
1431 template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo> & infos,ArenaVector<LinkerPatch> * linker_patches)1432 inline void CodeGeneratorMIPS64::EmitPcRelativeLinkerPatches(
1433     const ArenaDeque<PcRelativePatchInfo>& infos,
1434     ArenaVector<LinkerPatch>* linker_patches) {
1435   for (const PcRelativePatchInfo& info : infos) {
1436     const DexFile& dex_file = info.target_dex_file;
1437     size_t offset_or_index = info.offset_or_index;
1438     DCHECK(info.pc_rel_label.IsBound());
1439     uint32_t pc_rel_offset = __ GetLabelLocation(&info.pc_rel_label);
1440     linker_patches->push_back(Factory(pc_rel_offset, &dex_file, pc_rel_offset, offset_or_index));
1441   }
1442 }
1443 
EmitLinkerPatches(ArenaVector<LinkerPatch> * linker_patches)1444 void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
1445   DCHECK(linker_patches->empty());
1446   size_t size =
1447       pc_relative_dex_cache_patches_.size() +
1448       pc_relative_string_patches_.size() +
1449       pc_relative_type_patches_.size() +
1450       type_bss_entry_patches_.size() +
1451       boot_image_string_patches_.size() +
1452       boot_image_type_patches_.size();
1453   linker_patches->reserve(size);
1454   EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
1455                                                                linker_patches);
1456   if (!GetCompilerOptions().IsBootImage()) {
1457     DCHECK(pc_relative_type_patches_.empty());
1458     EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
1459                                                                   linker_patches);
1460   } else {
1461     EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
1462                                                                 linker_patches);
1463     EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
1464                                                                   linker_patches);
1465   }
1466   EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
1467                                                               linker_patches);
1468   for (const auto& entry : boot_image_string_patches_) {
1469     const StringReference& target_string = entry.first;
1470     Literal* literal = entry.second;
1471     DCHECK(literal->GetLabel()->IsBound());
1472     uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
1473     linker_patches->push_back(LinkerPatch::StringPatch(literal_offset,
1474                                                        target_string.dex_file,
1475                                                        target_string.string_index.index_));
1476   }
1477   for (const auto& entry : boot_image_type_patches_) {
1478     const TypeReference& target_type = entry.first;
1479     Literal* literal = entry.second;
1480     DCHECK(literal->GetLabel()->IsBound());
1481     uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
1482     linker_patches->push_back(LinkerPatch::TypePatch(literal_offset,
1483                                                      target_type.dex_file,
1484                                                      target_type.type_index.index_));
1485   }
1486   DCHECK_EQ(size, linker_patches->size());
1487 }
1488 
NewPcRelativeStringPatch(const DexFile & dex_file,dex::StringIndex string_index)1489 CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeStringPatch(
1490     const DexFile& dex_file, dex::StringIndex string_index) {
1491   return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
1492 }
1493 
NewPcRelativeTypePatch(const DexFile & dex_file,dex::TypeIndex type_index)1494 CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeTypePatch(
1495     const DexFile& dex_file, dex::TypeIndex type_index) {
1496   return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
1497 }
1498 
NewTypeBssEntryPatch(const DexFile & dex_file,dex::TypeIndex type_index)1499 CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewTypeBssEntryPatch(
1500     const DexFile& dex_file, dex::TypeIndex type_index) {
1501   return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
1502 }
1503 
NewPcRelativeDexCacheArrayPatch(const DexFile & dex_file,uint32_t element_offset)1504 CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeDexCacheArrayPatch(
1505     const DexFile& dex_file, uint32_t element_offset) {
1506   return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
1507 }
1508 
NewPcRelativePatch(const DexFile & dex_file,uint32_t offset_or_index,ArenaDeque<PcRelativePatchInfo> * patches)1509 CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativePatch(
1510     const DexFile& dex_file, uint32_t offset_or_index, ArenaDeque<PcRelativePatchInfo>* patches) {
1511   patches->emplace_back(dex_file, offset_or_index);
1512   return &patches->back();
1513 }
1514 
DeduplicateUint32Literal(uint32_t value,Uint32ToLiteralMap * map)1515 Literal* CodeGeneratorMIPS64::DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map) {
1516   return map->GetOrCreate(
1517       value,
1518       [this, value]() { return __ NewLiteral<uint32_t>(value); });
1519 }
1520 
DeduplicateUint64Literal(uint64_t value)1521 Literal* CodeGeneratorMIPS64::DeduplicateUint64Literal(uint64_t value) {
1522   return uint64_literals_.GetOrCreate(
1523       value,
1524       [this, value]() { return __ NewLiteral<uint64_t>(value); });
1525 }
1526 
DeduplicateMethodLiteral(MethodReference target_method,MethodToLiteralMap * map)1527 Literal* CodeGeneratorMIPS64::DeduplicateMethodLiteral(MethodReference target_method,
1528                                                        MethodToLiteralMap* map) {
1529   return map->GetOrCreate(
1530       target_method,
1531       [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
1532 }
1533 
DeduplicateBootImageStringLiteral(const DexFile & dex_file,dex::StringIndex string_index)1534 Literal* CodeGeneratorMIPS64::DeduplicateBootImageStringLiteral(const DexFile& dex_file,
1535                                                                 dex::StringIndex string_index) {
1536   return boot_image_string_patches_.GetOrCreate(
1537       StringReference(&dex_file, string_index),
1538       [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
1539 }
1540 
DeduplicateBootImageTypeLiteral(const DexFile & dex_file,dex::TypeIndex type_index)1541 Literal* CodeGeneratorMIPS64::DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
1542                                                               dex::TypeIndex type_index) {
1543   return boot_image_type_patches_.GetOrCreate(
1544       TypeReference(&dex_file, type_index),
1545       [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
1546 }
1547 
DeduplicateBootImageAddressLiteral(uint64_t address)1548 Literal* CodeGeneratorMIPS64::DeduplicateBootImageAddressLiteral(uint64_t address) {
1549   return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
1550 }
1551 
EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo * info,GpuRegister out)1552 void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info,
1553                                                                GpuRegister out) {
1554   __ Bind(&info->pc_rel_label);
1555   // Add the high half of a 32-bit offset to PC.
1556   __ Auipc(out, /* placeholder */ 0x1234);
1557   // The immediately following instruction will add the sign-extended low half of the 32-bit
1558   // offset to `out` (e.g. ld, jialc, daddiu).
1559 }
1560 
DeduplicateJitStringLiteral(const DexFile & dex_file,dex::StringIndex string_index,Handle<mirror::String> handle)1561 Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file,
1562                                                           dex::StringIndex string_index,
1563                                                           Handle<mirror::String> handle) {
1564   jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
1565                               reinterpret_cast64<uint64_t>(handle.GetReference()));
1566   return jit_string_patches_.GetOrCreate(
1567       StringReference(&dex_file, string_index),
1568       [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
1569 }
1570 
DeduplicateJitClassLiteral(const DexFile & dex_file,dex::TypeIndex type_index,Handle<mirror::Class> handle)1571 Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file,
1572                                                          dex::TypeIndex type_index,
1573                                                          Handle<mirror::Class> handle) {
1574   jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
1575                              reinterpret_cast64<uint64_t>(handle.GetReference()));
1576   return jit_class_patches_.GetOrCreate(
1577       TypeReference(&dex_file, type_index),
1578       [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
1579 }
1580 
PatchJitRootUse(uint8_t * code,const uint8_t * roots_data,const Literal * literal,uint64_t index_in_table) const1581 void CodeGeneratorMIPS64::PatchJitRootUse(uint8_t* code,
1582                                           const uint8_t* roots_data,
1583                                           const Literal* literal,
1584                                           uint64_t index_in_table) const {
1585   uint32_t literal_offset = GetAssembler().GetLabelLocation(literal->GetLabel());
1586   uintptr_t address =
1587       reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
1588   reinterpret_cast<uint32_t*>(code + literal_offset)[0] = dchecked_integral_cast<uint32_t>(address);
1589 }
1590 
EmitJitRootPatches(uint8_t * code,const uint8_t * roots_data)1591 void CodeGeneratorMIPS64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
1592   for (const auto& entry : jit_string_patches_) {
1593     const auto& it = jit_string_roots_.find(entry.first);
1594     DCHECK(it != jit_string_roots_.end());
1595     PatchJitRootUse(code, roots_data, entry.second, it->second);
1596   }
1597   for (const auto& entry : jit_class_patches_) {
1598     const auto& it = jit_class_roots_.find(entry.first);
1599     DCHECK(it != jit_class_roots_.end());
1600     PatchJitRootUse(code, roots_data, entry.second, it->second);
1601   }
1602 }
1603 
SetupBlockedRegisters() const1604 void CodeGeneratorMIPS64::SetupBlockedRegisters() const {
1605   // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
1606   blocked_core_registers_[ZERO] = true;
1607   blocked_core_registers_[K0] = true;
1608   blocked_core_registers_[K1] = true;
1609   blocked_core_registers_[GP] = true;
1610   blocked_core_registers_[SP] = true;
1611   blocked_core_registers_[RA] = true;
1612 
1613   // AT, TMP(T8) and TMP2(T3) are used as temporary/scratch
1614   // registers (similar to how AT is used by MIPS assemblers).
1615   blocked_core_registers_[AT] = true;
1616   blocked_core_registers_[TMP] = true;
1617   blocked_core_registers_[TMP2] = true;
1618   blocked_fpu_registers_[FTMP] = true;
1619 
1620   // Reserve suspend and thread registers.
1621   blocked_core_registers_[S0] = true;
1622   blocked_core_registers_[TR] = true;
1623 
1624   // Reserve T9 for function calls
1625   blocked_core_registers_[T9] = true;
1626 
1627   if (GetGraph()->IsDebuggable()) {
1628     // Stubs do not save callee-save floating point registers. If the graph
1629     // is debuggable, we need to deal with these registers differently. For
1630     // now, just block them.
1631     for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
1632       blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
1633     }
1634   }
1635 }
1636 
SaveCoreRegister(size_t stack_index,uint32_t reg_id)1637 size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
1638   __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
1639   return kMips64DoublewordSize;
1640 }
1641 
RestoreCoreRegister(size_t stack_index,uint32_t reg_id)1642 size_t CodeGeneratorMIPS64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
1643   __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
1644   return kMips64DoublewordSize;
1645 }
1646 
SaveFloatingPointRegister(size_t stack_index,uint32_t reg_id)1647 size_t CodeGeneratorMIPS64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
1648   __ StoreFpuToOffset(kStoreDoubleword, FpuRegister(reg_id), SP, stack_index);
1649   return kMips64DoublewordSize;
1650 }
1651 
RestoreFloatingPointRegister(size_t stack_index,uint32_t reg_id)1652 size_t CodeGeneratorMIPS64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
1653   __ LoadFpuFromOffset(kLoadDoubleword, FpuRegister(reg_id), SP, stack_index);
1654   return kMips64DoublewordSize;
1655 }
1656 
DumpCoreRegister(std::ostream & stream,int reg) const1657 void CodeGeneratorMIPS64::DumpCoreRegister(std::ostream& stream, int reg) const {
1658   stream << GpuRegister(reg);
1659 }
1660 
DumpFloatingPointRegister(std::ostream & stream,int reg) const1661 void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
1662   stream << FpuRegister(reg);
1663 }
1664 
InvokeRuntime(QuickEntrypointEnum entrypoint,HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)1665 void CodeGeneratorMIPS64::InvokeRuntime(QuickEntrypointEnum entrypoint,
1666                                         HInstruction* instruction,
1667                                         uint32_t dex_pc,
1668                                         SlowPathCode* slow_path) {
1669   ValidateInvokeRuntime(entrypoint, instruction, slow_path);
1670   GenerateInvokeRuntime(GetThreadOffset<kMips64PointerSize>(entrypoint).Int32Value());
1671   if (EntrypointRequiresStackMap(entrypoint)) {
1672     RecordPcInfo(instruction, dex_pc, slow_path);
1673   }
1674 }
1675 
InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,HInstruction * instruction,SlowPathCode * slow_path)1676 void CodeGeneratorMIPS64::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
1677                                                               HInstruction* instruction,
1678                                                               SlowPathCode* slow_path) {
1679   ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
1680   GenerateInvokeRuntime(entry_point_offset);
1681 }
1682 
GenerateInvokeRuntime(int32_t entry_point_offset)1683 void CodeGeneratorMIPS64::GenerateInvokeRuntime(int32_t entry_point_offset) {
1684   __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
1685   __ Jalr(T9);
1686   __ Nop();
1687 }
1688 
GenerateClassInitializationCheck(SlowPathCodeMIPS64 * slow_path,GpuRegister class_reg)1689 void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
1690                                                                       GpuRegister class_reg) {
1691   __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
1692   __ LoadConst32(AT, mirror::Class::kStatusInitialized);
1693   __ Bltc(TMP, AT, slow_path->GetEntryLabel());
1694   // Even if the initialized flag is set, we need to ensure consistent memory ordering.
1695   __ Sync(0);
1696   __ Bind(slow_path->GetExitLabel());
1697 }
1698 
GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED)1699 void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
1700   __ Sync(0);  // only stype 0 is supported
1701 }
1702 
GenerateSuspendCheck(HSuspendCheck * instruction,HBasicBlock * successor)1703 void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
1704                                                           HBasicBlock* successor) {
1705   SuspendCheckSlowPathMIPS64* slow_path =
1706     new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
1707   codegen_->AddSlowPath(slow_path);
1708 
1709   __ LoadFromOffset(kLoadUnsignedHalfword,
1710                     TMP,
1711                     TR,
1712                     Thread::ThreadFlagsOffset<kMips64PointerSize>().Int32Value());
1713   if (successor == nullptr) {
1714     __ Bnezc(TMP, slow_path->GetEntryLabel());
1715     __ Bind(slow_path->GetReturnLabel());
1716   } else {
1717     __ Beqzc(TMP, codegen_->GetLabelOf(successor));
1718     __ Bc(slow_path->GetEntryLabel());
1719     // slow_path will return to GetLabelOf(successor).
1720   }
1721 }
1722 
InstructionCodeGeneratorMIPS64(HGraph * graph,CodeGeneratorMIPS64 * codegen)1723 InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
1724                                                                CodeGeneratorMIPS64* codegen)
1725       : InstructionCodeGenerator(graph, codegen),
1726         assembler_(codegen->GetAssembler()),
1727         codegen_(codegen) {}
1728 
HandleBinaryOp(HBinaryOperation * instruction)1729 void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1730   DCHECK_EQ(instruction->InputCount(), 2U);
1731   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1732   Primitive::Type type = instruction->GetResultType();
1733   switch (type) {
1734     case Primitive::kPrimInt:
1735     case Primitive::kPrimLong: {
1736       locations->SetInAt(0, Location::RequiresRegister());
1737       HInstruction* right = instruction->InputAt(1);
1738       bool can_use_imm = false;
1739       if (right->IsConstant()) {
1740         int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant());
1741         if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
1742           can_use_imm = IsUint<16>(imm);
1743         } else if (instruction->IsAdd()) {
1744           can_use_imm = IsInt<16>(imm);
1745         } else {
1746           DCHECK(instruction->IsSub());
1747           can_use_imm = IsInt<16>(-imm);
1748         }
1749       }
1750       if (can_use_imm)
1751         locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
1752       else
1753         locations->SetInAt(1, Location::RequiresRegister());
1754       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1755       }
1756       break;
1757 
1758     case Primitive::kPrimFloat:
1759     case Primitive::kPrimDouble:
1760       locations->SetInAt(0, Location::RequiresFpuRegister());
1761       locations->SetInAt(1, Location::RequiresFpuRegister());
1762       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1763       break;
1764 
1765     default:
1766       LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
1767   }
1768 }
1769 
HandleBinaryOp(HBinaryOperation * instruction)1770 void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1771   Primitive::Type type = instruction->GetType();
1772   LocationSummary* locations = instruction->GetLocations();
1773 
1774   switch (type) {
1775     case Primitive::kPrimInt:
1776     case Primitive::kPrimLong: {
1777       GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1778       GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1779       Location rhs_location = locations->InAt(1);
1780 
1781       GpuRegister rhs_reg = ZERO;
1782       int64_t rhs_imm = 0;
1783       bool use_imm = rhs_location.IsConstant();
1784       if (use_imm) {
1785         rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1786       } else {
1787         rhs_reg = rhs_location.AsRegister<GpuRegister>();
1788       }
1789 
1790       if (instruction->IsAnd()) {
1791         if (use_imm)
1792           __ Andi(dst, lhs, rhs_imm);
1793         else
1794           __ And(dst, lhs, rhs_reg);
1795       } else if (instruction->IsOr()) {
1796         if (use_imm)
1797           __ Ori(dst, lhs, rhs_imm);
1798         else
1799           __ Or(dst, lhs, rhs_reg);
1800       } else if (instruction->IsXor()) {
1801         if (use_imm)
1802           __ Xori(dst, lhs, rhs_imm);
1803         else
1804           __ Xor(dst, lhs, rhs_reg);
1805       } else if (instruction->IsAdd()) {
1806         if (type == Primitive::kPrimInt) {
1807           if (use_imm)
1808             __ Addiu(dst, lhs, rhs_imm);
1809           else
1810             __ Addu(dst, lhs, rhs_reg);
1811         } else {
1812           if (use_imm)
1813             __ Daddiu(dst, lhs, rhs_imm);
1814           else
1815             __ Daddu(dst, lhs, rhs_reg);
1816         }
1817       } else {
1818         DCHECK(instruction->IsSub());
1819         if (type == Primitive::kPrimInt) {
1820           if (use_imm)
1821             __ Addiu(dst, lhs, -rhs_imm);
1822           else
1823             __ Subu(dst, lhs, rhs_reg);
1824         } else {
1825           if (use_imm)
1826             __ Daddiu(dst, lhs, -rhs_imm);
1827           else
1828             __ Dsubu(dst, lhs, rhs_reg);
1829         }
1830       }
1831       break;
1832     }
1833     case Primitive::kPrimFloat:
1834     case Primitive::kPrimDouble: {
1835       FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1836       FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1837       FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1838       if (instruction->IsAdd()) {
1839         if (type == Primitive::kPrimFloat)
1840           __ AddS(dst, lhs, rhs);
1841         else
1842           __ AddD(dst, lhs, rhs);
1843       } else if (instruction->IsSub()) {
1844         if (type == Primitive::kPrimFloat)
1845           __ SubS(dst, lhs, rhs);
1846         else
1847           __ SubD(dst, lhs, rhs);
1848       } else {
1849         LOG(FATAL) << "Unexpected floating-point binary operation";
1850       }
1851       break;
1852     }
1853     default:
1854       LOG(FATAL) << "Unexpected binary operation type " << type;
1855   }
1856 }
1857 
HandleShift(HBinaryOperation * instr)1858 void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
1859   DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
1860 
1861   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1862   Primitive::Type type = instr->GetResultType();
1863   switch (type) {
1864     case Primitive::kPrimInt:
1865     case Primitive::kPrimLong: {
1866       locations->SetInAt(0, Location::RequiresRegister());
1867       locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1868       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1869       break;
1870     }
1871     default:
1872       LOG(FATAL) << "Unexpected shift type " << type;
1873   }
1874 }
1875 
HandleShift(HBinaryOperation * instr)1876 void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
1877   DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
1878   LocationSummary* locations = instr->GetLocations();
1879   Primitive::Type type = instr->GetType();
1880 
1881   switch (type) {
1882     case Primitive::kPrimInt:
1883     case Primitive::kPrimLong: {
1884       GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1885       GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1886       Location rhs_location = locations->InAt(1);
1887 
1888       GpuRegister rhs_reg = ZERO;
1889       int64_t rhs_imm = 0;
1890       bool use_imm = rhs_location.IsConstant();
1891       if (use_imm) {
1892         rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1893       } else {
1894         rhs_reg = rhs_location.AsRegister<GpuRegister>();
1895       }
1896 
1897       if (use_imm) {
1898         uint32_t shift_value = rhs_imm &
1899             (type == Primitive::kPrimInt ? kMaxIntShiftDistance : kMaxLongShiftDistance);
1900 
1901         if (shift_value == 0) {
1902           if (dst != lhs) {
1903             __ Move(dst, lhs);
1904           }
1905         } else if (type == Primitive::kPrimInt) {
1906           if (instr->IsShl()) {
1907             __ Sll(dst, lhs, shift_value);
1908           } else if (instr->IsShr()) {
1909             __ Sra(dst, lhs, shift_value);
1910           } else if (instr->IsUShr()) {
1911             __ Srl(dst, lhs, shift_value);
1912           } else {
1913             __ Rotr(dst, lhs, shift_value);
1914           }
1915         } else {
1916           if (shift_value < 32) {
1917             if (instr->IsShl()) {
1918               __ Dsll(dst, lhs, shift_value);
1919             } else if (instr->IsShr()) {
1920               __ Dsra(dst, lhs, shift_value);
1921             } else if (instr->IsUShr()) {
1922               __ Dsrl(dst, lhs, shift_value);
1923             } else {
1924               __ Drotr(dst, lhs, shift_value);
1925             }
1926           } else {
1927             shift_value -= 32;
1928             if (instr->IsShl()) {
1929               __ Dsll32(dst, lhs, shift_value);
1930             } else if (instr->IsShr()) {
1931               __ Dsra32(dst, lhs, shift_value);
1932             } else if (instr->IsUShr()) {
1933               __ Dsrl32(dst, lhs, shift_value);
1934             } else {
1935               __ Drotr32(dst, lhs, shift_value);
1936             }
1937           }
1938         }
1939       } else {
1940         if (type == Primitive::kPrimInt) {
1941           if (instr->IsShl()) {
1942             __ Sllv(dst, lhs, rhs_reg);
1943           } else if (instr->IsShr()) {
1944             __ Srav(dst, lhs, rhs_reg);
1945           } else if (instr->IsUShr()) {
1946             __ Srlv(dst, lhs, rhs_reg);
1947           } else {
1948             __ Rotrv(dst, lhs, rhs_reg);
1949           }
1950         } else {
1951           if (instr->IsShl()) {
1952             __ Dsllv(dst, lhs, rhs_reg);
1953           } else if (instr->IsShr()) {
1954             __ Dsrav(dst, lhs, rhs_reg);
1955           } else if (instr->IsUShr()) {
1956             __ Dsrlv(dst, lhs, rhs_reg);
1957           } else {
1958             __ Drotrv(dst, lhs, rhs_reg);
1959           }
1960         }
1961       }
1962       break;
1963     }
1964     default:
1965       LOG(FATAL) << "Unexpected shift operation type " << type;
1966   }
1967 }
1968 
VisitAdd(HAdd * instruction)1969 void LocationsBuilderMIPS64::VisitAdd(HAdd* instruction) {
1970   HandleBinaryOp(instruction);
1971 }
1972 
VisitAdd(HAdd * instruction)1973 void InstructionCodeGeneratorMIPS64::VisitAdd(HAdd* instruction) {
1974   HandleBinaryOp(instruction);
1975 }
1976 
VisitAnd(HAnd * instruction)1977 void LocationsBuilderMIPS64::VisitAnd(HAnd* instruction) {
1978   HandleBinaryOp(instruction);
1979 }
1980 
VisitAnd(HAnd * instruction)1981 void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) {
1982   HandleBinaryOp(instruction);
1983 }
1984 
VisitArrayGet(HArrayGet * instruction)1985 void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
1986   Primitive::Type type = instruction->GetType();
1987   bool object_array_get_with_read_barrier =
1988       kEmitCompilerReadBarrier && (type == Primitive::kPrimNot);
1989   LocationSummary* locations =
1990       new (GetGraph()->GetArena()) LocationSummary(instruction,
1991                                                    object_array_get_with_read_barrier
1992                                                        ? LocationSummary::kCallOnSlowPath
1993                                                        : LocationSummary::kNoCall);
1994   locations->SetInAt(0, Location::RequiresRegister());
1995   locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1996   if (Primitive::IsFloatingPointType(type)) {
1997     locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1998   } else {
1999     // The output overlaps in the case of an object array get with
2000     // read barriers enabled: we do not want the move to overwrite the
2001     // array's location, as we need it to emit the read barrier.
2002     locations->SetOut(Location::RequiresRegister(),
2003                       object_array_get_with_read_barrier
2004                           ? Location::kOutputOverlap
2005                           : Location::kNoOutputOverlap);
2006   }
2007   // We need a temporary register for the read barrier marking slow
2008   // path in CodeGeneratorMIPS64::GenerateArrayLoadWithBakerReadBarrier.
2009   if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
2010     locations->AddTemp(Location::RequiresRegister());
2011   }
2012 }
2013 
GetImplicitNullChecker(HInstruction * instruction,CodeGeneratorMIPS64 * codegen)2014 static auto GetImplicitNullChecker(HInstruction* instruction, CodeGeneratorMIPS64* codegen) {
2015   auto null_checker = [codegen, instruction]() {
2016     codegen->MaybeRecordImplicitNullCheck(instruction);
2017   };
2018   return null_checker;
2019 }
2020 
VisitArrayGet(HArrayGet * instruction)2021 void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
2022   LocationSummary* locations = instruction->GetLocations();
2023   Location obj_loc = locations->InAt(0);
2024   GpuRegister obj = obj_loc.AsRegister<GpuRegister>();
2025   Location out_loc = locations->Out();
2026   Location index = locations->InAt(1);
2027   uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
2028   auto null_checker = GetImplicitNullChecker(instruction, codegen_);
2029 
2030   Primitive::Type type = instruction->GetType();
2031   const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
2032                                         instruction->IsStringCharAt();
2033   switch (type) {
2034     case Primitive::kPrimBoolean: {
2035       GpuRegister out = out_loc.AsRegister<GpuRegister>();
2036       if (index.IsConstant()) {
2037         size_t offset =
2038             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
2039         __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset, null_checker);
2040       } else {
2041         __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
2042         __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset, null_checker);
2043       }
2044       break;
2045     }
2046 
2047     case Primitive::kPrimByte: {
2048       GpuRegister out = out_loc.AsRegister<GpuRegister>();
2049       if (index.IsConstant()) {
2050         size_t offset =
2051             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
2052         __ LoadFromOffset(kLoadSignedByte, out, obj, offset, null_checker);
2053       } else {
2054         __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
2055         __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset, null_checker);
2056       }
2057       break;
2058     }
2059 
2060     case Primitive::kPrimShort: {
2061       GpuRegister out = out_loc.AsRegister<GpuRegister>();
2062       if (index.IsConstant()) {
2063         size_t offset =
2064             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
2065         __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset, null_checker);
2066       } else {
2067         __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_2);
2068         __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
2069       }
2070       break;
2071     }
2072 
2073     case Primitive::kPrimChar: {
2074       GpuRegister out = out_loc.AsRegister<GpuRegister>();
2075       if (maybe_compressed_char_at) {
2076         uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
2077         __ LoadFromOffset(kLoadWord, TMP, obj, count_offset, null_checker);
2078         __ Dext(TMP, TMP, 0, 1);
2079         static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
2080                       "Expecting 0=compressed, 1=uncompressed");
2081       }
2082       if (index.IsConstant()) {
2083         int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
2084         if (maybe_compressed_char_at) {
2085           Mips64Label uncompressed_load, done;
2086           __ Bnezc(TMP, &uncompressed_load);
2087           __ LoadFromOffset(kLoadUnsignedByte,
2088                             out,
2089                             obj,
2090                             data_offset + (const_index << TIMES_1));
2091           __ Bc(&done);
2092           __ Bind(&uncompressed_load);
2093           __ LoadFromOffset(kLoadUnsignedHalfword,
2094                             out,
2095                             obj,
2096                             data_offset + (const_index << TIMES_2));
2097           __ Bind(&done);
2098         } else {
2099           __ LoadFromOffset(kLoadUnsignedHalfword,
2100                             out,
2101                             obj,
2102                             data_offset + (const_index << TIMES_2),
2103                             null_checker);
2104         }
2105       } else {
2106         GpuRegister index_reg = index.AsRegister<GpuRegister>();
2107         if (maybe_compressed_char_at) {
2108           Mips64Label uncompressed_load, done;
2109           __ Bnezc(TMP, &uncompressed_load);
2110           __ Daddu(TMP, obj, index_reg);
2111           __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
2112           __ Bc(&done);
2113           __ Bind(&uncompressed_load);
2114           __ Dlsa(TMP, index_reg, obj, TIMES_2);
2115           __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
2116           __ Bind(&done);
2117         } else {
2118           __ Dlsa(TMP, index_reg, obj, TIMES_2);
2119           __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
2120         }
2121       }
2122       break;
2123     }
2124 
2125     case Primitive::kPrimInt: {
2126       DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
2127       GpuRegister out = out_loc.AsRegister<GpuRegister>();
2128       LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
2129       if (index.IsConstant()) {
2130         size_t offset =
2131             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
2132         __ LoadFromOffset(load_type, out, obj, offset, null_checker);
2133       } else {
2134         __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_4);
2135         __ LoadFromOffset(load_type, out, TMP, data_offset, null_checker);
2136       }
2137       break;
2138     }
2139 
2140     case Primitive::kPrimNot: {
2141       static_assert(
2142           sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
2143           "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
2144       // /* HeapReference<Object> */ out =
2145       //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
2146       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
2147         Location temp = locations->GetTemp(0);
2148         // Note that a potential implicit null check is handled in this
2149         // CodeGeneratorMIPS64::GenerateArrayLoadWithBakerReadBarrier call.
2150         codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
2151                                                         out_loc,
2152                                                         obj,
2153                                                         data_offset,
2154                                                         index,
2155                                                         temp,
2156                                                         /* needs_null_check */ true);
2157       } else {
2158         GpuRegister out = out_loc.AsRegister<GpuRegister>();
2159         if (index.IsConstant()) {
2160           size_t offset =
2161               (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
2162           __ LoadFromOffset(kLoadUnsignedWord, out, obj, offset, null_checker);
2163           // If read barriers are enabled, emit read barriers other than
2164           // Baker's using a slow path (and also unpoison the loaded
2165           // reference, if heap poisoning is enabled).
2166           codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
2167         } else {
2168           __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_4);
2169           __ LoadFromOffset(kLoadUnsignedWord, out, TMP, data_offset, null_checker);
2170           // If read barriers are enabled, emit read barriers other than
2171           // Baker's using a slow path (and also unpoison the loaded
2172           // reference, if heap poisoning is enabled).
2173           codegen_->MaybeGenerateReadBarrierSlow(instruction,
2174                                                  out_loc,
2175                                                  out_loc,
2176                                                  obj_loc,
2177                                                  data_offset,
2178                                                  index);
2179         }
2180       }
2181       break;
2182     }
2183 
2184     case Primitive::kPrimLong: {
2185       GpuRegister out = out_loc.AsRegister<GpuRegister>();
2186       if (index.IsConstant()) {
2187         size_t offset =
2188             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
2189         __ LoadFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
2190       } else {
2191         __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_8);
2192         __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
2193       }
2194       break;
2195     }
2196 
2197     case Primitive::kPrimFloat: {
2198       FpuRegister out = out_loc.AsFpuRegister<FpuRegister>();
2199       if (index.IsConstant()) {
2200         size_t offset =
2201             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
2202         __ LoadFpuFromOffset(kLoadWord, out, obj, offset, null_checker);
2203       } else {
2204         __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_4);
2205         __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
2206       }
2207       break;
2208     }
2209 
2210     case Primitive::kPrimDouble: {
2211       FpuRegister out = out_loc.AsFpuRegister<FpuRegister>();
2212       if (index.IsConstant()) {
2213         size_t offset =
2214             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
2215         __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
2216       } else {
2217         __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_8);
2218         __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
2219       }
2220       break;
2221     }
2222 
2223     case Primitive::kPrimVoid:
2224       LOG(FATAL) << "Unreachable type " << instruction->GetType();
2225       UNREACHABLE();
2226   }
2227 }
2228 
VisitArrayLength(HArrayLength * instruction)2229 void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
2230   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2231   locations->SetInAt(0, Location::RequiresRegister());
2232   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2233 }
2234 
VisitArrayLength(HArrayLength * instruction)2235 void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
2236   LocationSummary* locations = instruction->GetLocations();
2237   uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
2238   GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2239   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2240   __ LoadFromOffset(kLoadWord, out, obj, offset);
2241   codegen_->MaybeRecordImplicitNullCheck(instruction);
2242   // Mask out compression flag from String's array length.
2243   if (mirror::kUseStringCompression && instruction->IsStringLength()) {
2244     __ Srl(out, out, 1u);
2245   }
2246 }
2247 
RegisterOrZeroConstant(HInstruction * instruction)2248 Location LocationsBuilderMIPS64::RegisterOrZeroConstant(HInstruction* instruction) {
2249   return (instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern())
2250       ? Location::ConstantLocation(instruction->AsConstant())
2251       : Location::RequiresRegister();
2252 }
2253 
FpuRegisterOrConstantForStore(HInstruction * instruction)2254 Location LocationsBuilderMIPS64::FpuRegisterOrConstantForStore(HInstruction* instruction) {
2255   // We can store 0.0 directly (from the ZERO register) without loading it into an FPU register.
2256   // We can store a non-zero float or double constant without first loading it into the FPU,
2257   // but we should only prefer this if the constant has a single use.
2258   if (instruction->IsConstant() &&
2259       (instruction->AsConstant()->IsZeroBitPattern() ||
2260        instruction->GetUses().HasExactlyOneElement())) {
2261     return Location::ConstantLocation(instruction->AsConstant());
2262     // Otherwise fall through and require an FPU register for the constant.
2263   }
2264   return Location::RequiresFpuRegister();
2265 }
2266 
VisitArraySet(HArraySet * instruction)2267 void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
2268   Primitive::Type value_type = instruction->GetComponentType();
2269 
2270   bool needs_write_barrier =
2271       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
2272   bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
2273 
2274   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
2275       instruction,
2276       may_need_runtime_call_for_type_check ?
2277           LocationSummary::kCallOnSlowPath :
2278           LocationSummary::kNoCall);
2279 
2280   locations->SetInAt(0, Location::RequiresRegister());
2281   locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
2282   if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
2283     locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2)));
2284   } else {
2285     locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2)));
2286   }
2287   if (needs_write_barrier) {
2288     // Temporary register for the write barrier.
2289     locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
2290   }
2291 }
2292 
VisitArraySet(HArraySet * instruction)2293 void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
2294   LocationSummary* locations = instruction->GetLocations();
2295   GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2296   Location index = locations->InAt(1);
2297   Location value_location = locations->InAt(2);
2298   Primitive::Type value_type = instruction->GetComponentType();
2299   bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
2300   bool needs_write_barrier =
2301       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
2302   auto null_checker = GetImplicitNullChecker(instruction, codegen_);
2303   GpuRegister base_reg = index.IsConstant() ? obj : TMP;
2304 
2305   switch (value_type) {
2306     case Primitive::kPrimBoolean:
2307     case Primitive::kPrimByte: {
2308       uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
2309       if (index.IsConstant()) {
2310         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1;
2311       } else {
2312         __ Daddu(base_reg, obj, index.AsRegister<GpuRegister>());
2313       }
2314       if (value_location.IsConstant()) {
2315         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
2316         __ StoreConstToOffset(kStoreByte, value, base_reg, data_offset, TMP, null_checker);
2317       } else {
2318         GpuRegister value = value_location.AsRegister<GpuRegister>();
2319         __ StoreToOffset(kStoreByte, value, base_reg, data_offset, null_checker);
2320       }
2321       break;
2322     }
2323 
2324     case Primitive::kPrimShort:
2325     case Primitive::kPrimChar: {
2326       uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
2327       if (index.IsConstant()) {
2328         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2;
2329       } else {
2330         __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_2);
2331       }
2332       if (value_location.IsConstant()) {
2333         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
2334         __ StoreConstToOffset(kStoreHalfword, value, base_reg, data_offset, TMP, null_checker);
2335       } else {
2336         GpuRegister value = value_location.AsRegister<GpuRegister>();
2337         __ StoreToOffset(kStoreHalfword, value, base_reg, data_offset, null_checker);
2338       }
2339       break;
2340     }
2341 
2342     case Primitive::kPrimInt: {
2343       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
2344       if (index.IsConstant()) {
2345         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
2346       } else {
2347         __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
2348       }
2349       if (value_location.IsConstant()) {
2350         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
2351         __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
2352       } else {
2353         GpuRegister value = value_location.AsRegister<GpuRegister>();
2354         __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
2355       }
2356       break;
2357     }
2358 
2359     case Primitive::kPrimNot: {
2360       if (value_location.IsConstant()) {
2361         // Just setting null.
2362         uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
2363         if (index.IsConstant()) {
2364           data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
2365         } else {
2366           __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
2367         }
2368         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
2369         DCHECK_EQ(value, 0);
2370         __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
2371         DCHECK(!needs_write_barrier);
2372         DCHECK(!may_need_runtime_call_for_type_check);
2373         break;
2374       }
2375 
2376       DCHECK(needs_write_barrier);
2377       GpuRegister value = value_location.AsRegister<GpuRegister>();
2378       GpuRegister temp1 = locations->GetTemp(0).AsRegister<GpuRegister>();
2379       GpuRegister temp2 = TMP;  // Doesn't need to survive slow path.
2380       uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2381       uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
2382       uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
2383       Mips64Label done;
2384       SlowPathCodeMIPS64* slow_path = nullptr;
2385 
2386       if (may_need_runtime_call_for_type_check) {
2387         slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathMIPS64(instruction);
2388         codegen_->AddSlowPath(slow_path);
2389         if (instruction->GetValueCanBeNull()) {
2390           Mips64Label non_zero;
2391           __ Bnezc(value, &non_zero);
2392           uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
2393           if (index.IsConstant()) {
2394             data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
2395           } else {
2396             __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
2397           }
2398           __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
2399           __ Bc(&done);
2400           __ Bind(&non_zero);
2401         }
2402 
2403         // Note that when read barriers are enabled, the type checks
2404         // are performed without read barriers.  This is fine, even in
2405         // the case where a class object is in the from-space after
2406         // the flip, as a comparison involving such a type would not
2407         // produce a false positive; it may of course produce a false
2408         // negative, in which case we would take the ArraySet slow
2409         // path.
2410 
2411         // /* HeapReference<Class> */ temp1 = obj->klass_
2412         __ LoadFromOffset(kLoadUnsignedWord, temp1, obj, class_offset, null_checker);
2413         __ MaybeUnpoisonHeapReference(temp1);
2414 
2415         // /* HeapReference<Class> */ temp1 = temp1->component_type_
2416         __ LoadFromOffset(kLoadUnsignedWord, temp1, temp1, component_offset);
2417         // /* HeapReference<Class> */ temp2 = value->klass_
2418         __ LoadFromOffset(kLoadUnsignedWord, temp2, value, class_offset);
2419         // If heap poisoning is enabled, no need to unpoison `temp1`
2420         // nor `temp2`, as we are comparing two poisoned references.
2421 
2422         if (instruction->StaticTypeOfArrayIsObjectArray()) {
2423           Mips64Label do_put;
2424           __ Beqc(temp1, temp2, &do_put);
2425           // If heap poisoning is enabled, the `temp1` reference has
2426           // not been unpoisoned yet; unpoison it now.
2427           __ MaybeUnpoisonHeapReference(temp1);
2428 
2429           // /* HeapReference<Class> */ temp1 = temp1->super_class_
2430           __ LoadFromOffset(kLoadUnsignedWord, temp1, temp1, super_offset);
2431           // If heap poisoning is enabled, no need to unpoison
2432           // `temp1`, as we are comparing against null below.
2433           __ Bnezc(temp1, slow_path->GetEntryLabel());
2434           __ Bind(&do_put);
2435         } else {
2436           __ Bnec(temp1, temp2, slow_path->GetEntryLabel());
2437         }
2438       }
2439 
2440       GpuRegister source = value;
2441       if (kPoisonHeapReferences) {
2442         // Note that in the case where `value` is a null reference,
2443         // we do not enter this block, as a null reference does not
2444         // need poisoning.
2445         __ Move(temp1, value);
2446         __ PoisonHeapReference(temp1);
2447         source = temp1;
2448       }
2449 
2450       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
2451       if (index.IsConstant()) {
2452         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
2453       } else {
2454         __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
2455       }
2456       __ StoreToOffset(kStoreWord, source, base_reg, data_offset);
2457 
2458       if (!may_need_runtime_call_for_type_check) {
2459         codegen_->MaybeRecordImplicitNullCheck(instruction);
2460       }
2461 
2462       codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
2463 
2464       if (done.IsLinked()) {
2465         __ Bind(&done);
2466       }
2467 
2468       if (slow_path != nullptr) {
2469         __ Bind(slow_path->GetExitLabel());
2470       }
2471       break;
2472     }
2473 
2474     case Primitive::kPrimLong: {
2475       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
2476       if (index.IsConstant()) {
2477         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
2478       } else {
2479         __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_8);
2480       }
2481       if (value_location.IsConstant()) {
2482         int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
2483         __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
2484       } else {
2485         GpuRegister value = value_location.AsRegister<GpuRegister>();
2486         __ StoreToOffset(kStoreDoubleword, value, base_reg, data_offset, null_checker);
2487       }
2488       break;
2489     }
2490 
2491     case Primitive::kPrimFloat: {
2492       uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
2493       if (index.IsConstant()) {
2494         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
2495       } else {
2496         __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
2497       }
2498       if (value_location.IsConstant()) {
2499         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
2500         __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
2501       } else {
2502         FpuRegister value = value_location.AsFpuRegister<FpuRegister>();
2503         __ StoreFpuToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
2504       }
2505       break;
2506     }
2507 
2508     case Primitive::kPrimDouble: {
2509       uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
2510       if (index.IsConstant()) {
2511         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
2512       } else {
2513         __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_8);
2514       }
2515       if (value_location.IsConstant()) {
2516         int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
2517         __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
2518       } else {
2519         FpuRegister value = value_location.AsFpuRegister<FpuRegister>();
2520         __ StoreFpuToOffset(kStoreDoubleword, value, base_reg, data_offset, null_checker);
2521       }
2522       break;
2523     }
2524 
2525     case Primitive::kPrimVoid:
2526       LOG(FATAL) << "Unreachable type " << instruction->GetType();
2527       UNREACHABLE();
2528   }
2529 }
2530 
VisitBoundsCheck(HBoundsCheck * instruction)2531 void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
2532   RegisterSet caller_saves = RegisterSet::Empty();
2533   InvokeRuntimeCallingConvention calling_convention;
2534   caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2535   caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2536   LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
2537   locations->SetInAt(0, Location::RequiresRegister());
2538   locations->SetInAt(1, Location::RequiresRegister());
2539 }
2540 
VisitBoundsCheck(HBoundsCheck * instruction)2541 void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
2542   LocationSummary* locations = instruction->GetLocations();
2543   BoundsCheckSlowPathMIPS64* slow_path =
2544       new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction);
2545   codegen_->AddSlowPath(slow_path);
2546 
2547   GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
2548   GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
2549 
2550   // length is limited by the maximum positive signed 32-bit integer.
2551   // Unsigned comparison of length and index checks for index < 0
2552   // and for length <= index simultaneously.
2553   __ Bgeuc(index, length, slow_path->GetEntryLabel());
2554 }
2555 
2556 // Temp is used for read barrier.
NumberOfInstanceOfTemps(TypeCheckKind type_check_kind)2557 static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
2558   if (kEmitCompilerReadBarrier &&
2559       (kUseBakerReadBarrier ||
2560        type_check_kind == TypeCheckKind::kAbstractClassCheck ||
2561        type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
2562        type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
2563     return 1;
2564   }
2565   return 0;
2566 }
2567 
2568 // Extra temp is used for read barrier.
NumberOfCheckCastTemps(TypeCheckKind type_check_kind)2569 static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
2570   return 1 + NumberOfInstanceOfTemps(type_check_kind);
2571 }
2572 
VisitCheckCast(HCheckCast * instruction)2573 void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
2574   LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2575   bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
2576 
2577   TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
2578   switch (type_check_kind) {
2579     case TypeCheckKind::kExactCheck:
2580     case TypeCheckKind::kAbstractClassCheck:
2581     case TypeCheckKind::kClassHierarchyCheck:
2582     case TypeCheckKind::kArrayObjectCheck:
2583       call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
2584           ? LocationSummary::kCallOnSlowPath
2585           : LocationSummary::kNoCall;  // In fact, call on a fatal (non-returning) slow path.
2586       break;
2587     case TypeCheckKind::kArrayCheck:
2588     case TypeCheckKind::kUnresolvedCheck:
2589     case TypeCheckKind::kInterfaceCheck:
2590       call_kind = LocationSummary::kCallOnSlowPath;
2591       break;
2592   }
2593 
2594   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2595   locations->SetInAt(0, Location::RequiresRegister());
2596   locations->SetInAt(1, Location::RequiresRegister());
2597   locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
2598 }
2599 
VisitCheckCast(HCheckCast * instruction)2600 void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
2601   TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
2602   LocationSummary* locations = instruction->GetLocations();
2603   Location obj_loc = locations->InAt(0);
2604   GpuRegister obj = obj_loc.AsRegister<GpuRegister>();
2605   GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
2606   Location temp_loc = locations->GetTemp(0);
2607   GpuRegister temp = temp_loc.AsRegister<GpuRegister>();
2608   const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
2609   DCHECK_LE(num_temps, 2u);
2610   Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
2611   const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2612   const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
2613   const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
2614   const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
2615   const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value();
2616   const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value();
2617   const uint32_t object_array_data_offset =
2618       mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
2619   Mips64Label done;
2620 
2621   // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
2622   // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
2623   // read barriers is done for performance and code size reasons.
2624   bool is_type_check_slow_path_fatal = false;
2625   if (!kEmitCompilerReadBarrier) {
2626     is_type_check_slow_path_fatal =
2627         (type_check_kind == TypeCheckKind::kExactCheck ||
2628          type_check_kind == TypeCheckKind::kAbstractClassCheck ||
2629          type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
2630          type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
2631         !instruction->CanThrowIntoCatchBlock();
2632   }
2633   SlowPathCodeMIPS64* slow_path =
2634       new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
2635                                                            is_type_check_slow_path_fatal);
2636   codegen_->AddSlowPath(slow_path);
2637 
2638   // Avoid this check if we know `obj` is not null.
2639   if (instruction->MustDoNullCheck()) {
2640     __ Beqzc(obj, &done);
2641   }
2642 
2643   switch (type_check_kind) {
2644     case TypeCheckKind::kExactCheck:
2645     case TypeCheckKind::kArrayCheck: {
2646       // /* HeapReference<Class> */ temp = obj->klass_
2647       GenerateReferenceLoadTwoRegisters(instruction,
2648                                         temp_loc,
2649                                         obj_loc,
2650                                         class_offset,
2651                                         maybe_temp2_loc,
2652                                         kWithoutReadBarrier);
2653       // Jump to slow path for throwing the exception or doing a
2654       // more involved array check.
2655       __ Bnec(temp, cls, slow_path->GetEntryLabel());
2656       break;
2657     }
2658 
2659     case TypeCheckKind::kAbstractClassCheck: {
2660       // /* HeapReference<Class> */ temp = obj->klass_
2661       GenerateReferenceLoadTwoRegisters(instruction,
2662                                         temp_loc,
2663                                         obj_loc,
2664                                         class_offset,
2665                                         maybe_temp2_loc,
2666                                         kWithoutReadBarrier);
2667       // If the class is abstract, we eagerly fetch the super class of the
2668       // object to avoid doing a comparison we know will fail.
2669       Mips64Label loop;
2670       __ Bind(&loop);
2671       // /* HeapReference<Class> */ temp = temp->super_class_
2672       GenerateReferenceLoadOneRegister(instruction,
2673                                        temp_loc,
2674                                        super_offset,
2675                                        maybe_temp2_loc,
2676                                        kWithoutReadBarrier);
2677       // If the class reference currently in `temp` is null, jump to the slow path to throw the
2678       // exception.
2679       __ Beqzc(temp, slow_path->GetEntryLabel());
2680       // Otherwise, compare the classes.
2681       __ Bnec(temp, cls, &loop);
2682       break;
2683     }
2684 
2685     case TypeCheckKind::kClassHierarchyCheck: {
2686       // /* HeapReference<Class> */ temp = obj->klass_
2687       GenerateReferenceLoadTwoRegisters(instruction,
2688                                         temp_loc,
2689                                         obj_loc,
2690                                         class_offset,
2691                                         maybe_temp2_loc,
2692                                         kWithoutReadBarrier);
2693       // Walk over the class hierarchy to find a match.
2694       Mips64Label loop;
2695       __ Bind(&loop);
2696       __ Beqc(temp, cls, &done);
2697       // /* HeapReference<Class> */ temp = temp->super_class_
2698       GenerateReferenceLoadOneRegister(instruction,
2699                                        temp_loc,
2700                                        super_offset,
2701                                        maybe_temp2_loc,
2702                                        kWithoutReadBarrier);
2703       // If the class reference currently in `temp` is null, jump to the slow path to throw the
2704       // exception. Otherwise, jump to the beginning of the loop.
2705       __ Bnezc(temp, &loop);
2706       __ Bc(slow_path->GetEntryLabel());
2707       break;
2708     }
2709 
2710     case TypeCheckKind::kArrayObjectCheck: {
2711       // /* HeapReference<Class> */ temp = obj->klass_
2712       GenerateReferenceLoadTwoRegisters(instruction,
2713                                         temp_loc,
2714                                         obj_loc,
2715                                         class_offset,
2716                                         maybe_temp2_loc,
2717                                         kWithoutReadBarrier);
2718       // Do an exact check.
2719       __ Beqc(temp, cls, &done);
2720       // Otherwise, we need to check that the object's class is a non-primitive array.
2721       // /* HeapReference<Class> */ temp = temp->component_type_
2722       GenerateReferenceLoadOneRegister(instruction,
2723                                        temp_loc,
2724                                        component_offset,
2725                                        maybe_temp2_loc,
2726                                        kWithoutReadBarrier);
2727       // If the component type is null, jump to the slow path to throw the exception.
2728       __ Beqzc(temp, slow_path->GetEntryLabel());
2729       // Otherwise, the object is indeed an array, further check that this component
2730       // type is not a primitive type.
2731       __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
2732       static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
2733       __ Bnezc(temp, slow_path->GetEntryLabel());
2734       break;
2735     }
2736 
2737     case TypeCheckKind::kUnresolvedCheck:
2738       // We always go into the type check slow path for the unresolved check case.
2739       // We cannot directly call the CheckCast runtime entry point
2740       // without resorting to a type checking slow path here (i.e. by
2741       // calling InvokeRuntime directly), as it would require to
2742       // assign fixed registers for the inputs of this HInstanceOf
2743       // instruction (following the runtime calling convention), which
2744       // might be cluttered by the potential first read barrier
2745       // emission at the beginning of this method.
2746       __ Bc(slow_path->GetEntryLabel());
2747       break;
2748 
2749     case TypeCheckKind::kInterfaceCheck: {
2750       // Avoid read barriers to improve performance of the fast path. We can not get false
2751       // positives by doing this.
2752       // /* HeapReference<Class> */ temp = obj->klass_
2753       GenerateReferenceLoadTwoRegisters(instruction,
2754                                         temp_loc,
2755                                         obj_loc,
2756                                         class_offset,
2757                                         maybe_temp2_loc,
2758                                         kWithoutReadBarrier);
2759       // /* HeapReference<Class> */ temp = temp->iftable_
2760       GenerateReferenceLoadTwoRegisters(instruction,
2761                                         temp_loc,
2762                                         temp_loc,
2763                                         iftable_offset,
2764                                         maybe_temp2_loc,
2765                                         kWithoutReadBarrier);
2766       // Iftable is never null.
2767       __ Lw(TMP, temp, array_length_offset);
2768       // Loop through the iftable and check if any class matches.
2769       Mips64Label loop;
2770       __ Bind(&loop);
2771       __ Beqzc(TMP, slow_path->GetEntryLabel());
2772       __ Lwu(AT, temp, object_array_data_offset);
2773       __ MaybeUnpoisonHeapReference(AT);
2774       // Go to next interface.
2775       __ Daddiu(temp, temp, 2 * kHeapReferenceSize);
2776       __ Addiu(TMP, TMP, -2);
2777       // Compare the classes and continue the loop if they do not match.
2778       __ Bnec(AT, cls, &loop);
2779       break;
2780     }
2781   }
2782 
2783   __ Bind(&done);
2784   __ Bind(slow_path->GetExitLabel());
2785 }
2786 
VisitClinitCheck(HClinitCheck * check)2787 void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
2788   LocationSummary* locations =
2789       new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
2790   locations->SetInAt(0, Location::RequiresRegister());
2791   if (check->HasUses()) {
2792     locations->SetOut(Location::SameAsFirstInput());
2793   }
2794 }
2795 
VisitClinitCheck(HClinitCheck * check)2796 void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
2797   // We assume the class is not null.
2798   SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
2799       check->GetLoadClass(),
2800       check,
2801       check->GetDexPc(),
2802       true);
2803   codegen_->AddSlowPath(slow_path);
2804   GenerateClassInitializationCheck(slow_path,
2805                                    check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
2806 }
2807 
VisitCompare(HCompare * compare)2808 void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
2809   Primitive::Type in_type = compare->InputAt(0)->GetType();
2810 
2811   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare);
2812 
2813   switch (in_type) {
2814     case Primitive::kPrimBoolean:
2815     case Primitive::kPrimByte:
2816     case Primitive::kPrimShort:
2817     case Primitive::kPrimChar:
2818     case Primitive::kPrimInt:
2819     case Primitive::kPrimLong:
2820       locations->SetInAt(0, Location::RequiresRegister());
2821       locations->SetInAt(1, Location::RegisterOrConstant(compare->InputAt(1)));
2822       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2823       break;
2824 
2825     case Primitive::kPrimFloat:
2826     case Primitive::kPrimDouble:
2827       locations->SetInAt(0, Location::RequiresFpuRegister());
2828       locations->SetInAt(1, Location::RequiresFpuRegister());
2829       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2830       break;
2831 
2832     default:
2833       LOG(FATAL) << "Unexpected type for compare operation " << in_type;
2834   }
2835 }
2836 
VisitCompare(HCompare * instruction)2837 void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
2838   LocationSummary* locations = instruction->GetLocations();
2839   GpuRegister res = locations->Out().AsRegister<GpuRegister>();
2840   Primitive::Type in_type = instruction->InputAt(0)->GetType();
2841 
2842   //  0 if: left == right
2843   //  1 if: left  > right
2844   // -1 if: left  < right
2845   switch (in_type) {
2846     case Primitive::kPrimBoolean:
2847     case Primitive::kPrimByte:
2848     case Primitive::kPrimShort:
2849     case Primitive::kPrimChar:
2850     case Primitive::kPrimInt:
2851     case Primitive::kPrimLong: {
2852       GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
2853       Location rhs_location = locations->InAt(1);
2854       bool use_imm = rhs_location.IsConstant();
2855       GpuRegister rhs = ZERO;
2856       if (use_imm) {
2857         if (in_type == Primitive::kPrimLong) {
2858           int64_t value = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()->AsConstant());
2859           if (value != 0) {
2860             rhs = AT;
2861             __ LoadConst64(rhs, value);
2862           }
2863         } else {
2864           int32_t value = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant()->AsConstant());
2865           if (value != 0) {
2866             rhs = AT;
2867             __ LoadConst32(rhs, value);
2868           }
2869         }
2870       } else {
2871         rhs = rhs_location.AsRegister<GpuRegister>();
2872       }
2873       __ Slt(TMP, lhs, rhs);
2874       __ Slt(res, rhs, lhs);
2875       __ Subu(res, res, TMP);
2876       break;
2877     }
2878 
2879     case Primitive::kPrimFloat: {
2880       FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
2881       FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
2882       Mips64Label done;
2883       __ CmpEqS(FTMP, lhs, rhs);
2884       __ LoadConst32(res, 0);
2885       __ Bc1nez(FTMP, &done);
2886       if (instruction->IsGtBias()) {
2887         __ CmpLtS(FTMP, lhs, rhs);
2888         __ LoadConst32(res, -1);
2889         __ Bc1nez(FTMP, &done);
2890         __ LoadConst32(res, 1);
2891       } else {
2892         __ CmpLtS(FTMP, rhs, lhs);
2893         __ LoadConst32(res, 1);
2894         __ Bc1nez(FTMP, &done);
2895         __ LoadConst32(res, -1);
2896       }
2897       __ Bind(&done);
2898       break;
2899     }
2900 
2901     case Primitive::kPrimDouble: {
2902       FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
2903       FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
2904       Mips64Label done;
2905       __ CmpEqD(FTMP, lhs, rhs);
2906       __ LoadConst32(res, 0);
2907       __ Bc1nez(FTMP, &done);
2908       if (instruction->IsGtBias()) {
2909         __ CmpLtD(FTMP, lhs, rhs);
2910         __ LoadConst32(res, -1);
2911         __ Bc1nez(FTMP, &done);
2912         __ LoadConst32(res, 1);
2913       } else {
2914         __ CmpLtD(FTMP, rhs, lhs);
2915         __ LoadConst32(res, 1);
2916         __ Bc1nez(FTMP, &done);
2917         __ LoadConst32(res, -1);
2918       }
2919       __ Bind(&done);
2920       break;
2921     }
2922 
2923     default:
2924       LOG(FATAL) << "Unimplemented compare type " << in_type;
2925   }
2926 }
2927 
HandleCondition(HCondition * instruction)2928 void LocationsBuilderMIPS64::HandleCondition(HCondition* instruction) {
2929   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2930   switch (instruction->InputAt(0)->GetType()) {
2931     default:
2932     case Primitive::kPrimLong:
2933       locations->SetInAt(0, Location::RequiresRegister());
2934       locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
2935       break;
2936 
2937     case Primitive::kPrimFloat:
2938     case Primitive::kPrimDouble:
2939       locations->SetInAt(0, Location::RequiresFpuRegister());
2940       locations->SetInAt(1, Location::RequiresFpuRegister());
2941       break;
2942   }
2943   if (!instruction->IsEmittedAtUseSite()) {
2944     locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2945   }
2946 }
2947 
HandleCondition(HCondition * instruction)2948 void InstructionCodeGeneratorMIPS64::HandleCondition(HCondition* instruction) {
2949   if (instruction->IsEmittedAtUseSite()) {
2950     return;
2951   }
2952 
2953   Primitive::Type type = instruction->InputAt(0)->GetType();
2954   LocationSummary* locations = instruction->GetLocations();
2955   switch (type) {
2956     default:
2957       // Integer case.
2958       GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ false, locations);
2959       return;
2960     case Primitive::kPrimLong:
2961       GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ true, locations);
2962       return;
2963     case Primitive::kPrimFloat:
2964     case Primitive::kPrimDouble:
2965       GenerateFpCompare(instruction->GetCondition(), instruction->IsGtBias(), type, locations);
2966      return;
2967   }
2968 }
2969 
DivRemOneOrMinusOne(HBinaryOperation * instruction)2970 void InstructionCodeGeneratorMIPS64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
2971   DCHECK(instruction->IsDiv() || instruction->IsRem());
2972   Primitive::Type type = instruction->GetResultType();
2973 
2974   LocationSummary* locations = instruction->GetLocations();
2975   Location second = locations->InAt(1);
2976   DCHECK(second.IsConstant());
2977 
2978   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2979   GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
2980   int64_t imm = Int64FromConstant(second.GetConstant());
2981   DCHECK(imm == 1 || imm == -1);
2982 
2983   if (instruction->IsRem()) {
2984     __ Move(out, ZERO);
2985   } else {
2986     if (imm == -1) {
2987       if (type == Primitive::kPrimInt) {
2988         __ Subu(out, ZERO, dividend);
2989       } else {
2990         DCHECK_EQ(type, Primitive::kPrimLong);
2991         __ Dsubu(out, ZERO, dividend);
2992       }
2993     } else if (out != dividend) {
2994       __ Move(out, dividend);
2995     }
2996   }
2997 }
2998 
DivRemByPowerOfTwo(HBinaryOperation * instruction)2999 void InstructionCodeGeneratorMIPS64::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
3000   DCHECK(instruction->IsDiv() || instruction->IsRem());
3001   Primitive::Type type = instruction->GetResultType();
3002 
3003   LocationSummary* locations = instruction->GetLocations();
3004   Location second = locations->InAt(1);
3005   DCHECK(second.IsConstant());
3006 
3007   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
3008   GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
3009   int64_t imm = Int64FromConstant(second.GetConstant());
3010   uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm));
3011   int ctz_imm = CTZ(abs_imm);
3012 
3013   if (instruction->IsDiv()) {
3014     if (type == Primitive::kPrimInt) {
3015       if (ctz_imm == 1) {
3016         // Fast path for division by +/-2, which is very common.
3017         __ Srl(TMP, dividend, 31);
3018       } else {
3019         __ Sra(TMP, dividend, 31);
3020         __ Srl(TMP, TMP, 32 - ctz_imm);
3021       }
3022       __ Addu(out, dividend, TMP);
3023       __ Sra(out, out, ctz_imm);
3024       if (imm < 0) {
3025         __ Subu(out, ZERO, out);
3026       }
3027     } else {
3028       DCHECK_EQ(type, Primitive::kPrimLong);
3029       if (ctz_imm == 1) {
3030         // Fast path for division by +/-2, which is very common.
3031         __ Dsrl32(TMP, dividend, 31);
3032       } else {
3033         __ Dsra32(TMP, dividend, 31);
3034         if (ctz_imm > 32) {
3035           __ Dsrl(TMP, TMP, 64 - ctz_imm);
3036         } else {
3037           __ Dsrl32(TMP, TMP, 32 - ctz_imm);
3038         }
3039       }
3040       __ Daddu(out, dividend, TMP);
3041       if (ctz_imm < 32) {
3042         __ Dsra(out, out, ctz_imm);
3043       } else {
3044         __ Dsra32(out, out, ctz_imm - 32);
3045       }
3046       if (imm < 0) {
3047         __ Dsubu(out, ZERO, out);
3048       }
3049     }
3050   } else {
3051     if (type == Primitive::kPrimInt) {
3052       if (ctz_imm == 1) {
3053         // Fast path for modulo +/-2, which is very common.
3054         __ Sra(TMP, dividend, 31);
3055         __ Subu(out, dividend, TMP);
3056         __ Andi(out, out, 1);
3057         __ Addu(out, out, TMP);
3058       } else {
3059         __ Sra(TMP, dividend, 31);
3060         __ Srl(TMP, TMP, 32 - ctz_imm);
3061         __ Addu(out, dividend, TMP);
3062         if (IsUint<16>(abs_imm - 1)) {
3063           __ Andi(out, out, abs_imm - 1);
3064         } else {
3065           __ Sll(out, out, 32 - ctz_imm);
3066           __ Srl(out, out, 32 - ctz_imm);
3067         }
3068         __ Subu(out, out, TMP);
3069       }
3070     } else {
3071       DCHECK_EQ(type, Primitive::kPrimLong);
3072       if (ctz_imm == 1) {
3073         // Fast path for modulo +/-2, which is very common.
3074         __ Dsra32(TMP, dividend, 31);
3075         __ Dsubu(out, dividend, TMP);
3076         __ Andi(out, out, 1);
3077         __ Daddu(out, out, TMP);
3078       } else {
3079         __ Dsra32(TMP, dividend, 31);
3080         if (ctz_imm > 32) {
3081           __ Dsrl(TMP, TMP, 64 - ctz_imm);
3082         } else {
3083           __ Dsrl32(TMP, TMP, 32 - ctz_imm);
3084         }
3085         __ Daddu(out, dividend, TMP);
3086         if (IsUint<16>(abs_imm - 1)) {
3087           __ Andi(out, out, abs_imm - 1);
3088         } else {
3089           if (ctz_imm > 32) {
3090             __ Dsll(out, out, 64 - ctz_imm);
3091             __ Dsrl(out, out, 64 - ctz_imm);
3092           } else {
3093             __ Dsll32(out, out, 32 - ctz_imm);
3094             __ Dsrl32(out, out, 32 - ctz_imm);
3095           }
3096         }
3097         __ Dsubu(out, out, TMP);
3098       }
3099     }
3100   }
3101 }
3102 
GenerateDivRemWithAnyConstant(HBinaryOperation * instruction)3103 void InstructionCodeGeneratorMIPS64::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
3104   DCHECK(instruction->IsDiv() || instruction->IsRem());
3105 
3106   LocationSummary* locations = instruction->GetLocations();
3107   Location second = locations->InAt(1);
3108   DCHECK(second.IsConstant());
3109 
3110   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
3111   GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
3112   int64_t imm = Int64FromConstant(second.GetConstant());
3113 
3114   Primitive::Type type = instruction->GetResultType();
3115   DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong) << type;
3116 
3117   int64_t magic;
3118   int shift;
3119   CalculateMagicAndShiftForDivRem(imm,
3120                                   (type == Primitive::kPrimLong),
3121                                   &magic,
3122                                   &shift);
3123 
3124   if (type == Primitive::kPrimInt) {
3125     __ LoadConst32(TMP, magic);
3126     __ MuhR6(TMP, dividend, TMP);
3127 
3128     if (imm > 0 && magic < 0) {
3129       __ Addu(TMP, TMP, dividend);
3130     } else if (imm < 0 && magic > 0) {
3131       __ Subu(TMP, TMP, dividend);
3132     }
3133 
3134     if (shift != 0) {
3135       __ Sra(TMP, TMP, shift);
3136     }
3137 
3138     if (instruction->IsDiv()) {
3139       __ Sra(out, TMP, 31);
3140       __ Subu(out, TMP, out);
3141     } else {
3142       __ Sra(AT, TMP, 31);
3143       __ Subu(AT, TMP, AT);
3144       __ LoadConst32(TMP, imm);
3145       __ MulR6(TMP, AT, TMP);
3146       __ Subu(out, dividend, TMP);
3147     }
3148   } else {
3149     __ LoadConst64(TMP, magic);
3150     __ Dmuh(TMP, dividend, TMP);
3151 
3152     if (imm > 0 && magic < 0) {
3153       __ Daddu(TMP, TMP, dividend);
3154     } else if (imm < 0 && magic > 0) {
3155       __ Dsubu(TMP, TMP, dividend);
3156     }
3157 
3158     if (shift >= 32) {
3159       __ Dsra32(TMP, TMP, shift - 32);
3160     } else if (shift > 0) {
3161       __ Dsra(TMP, TMP, shift);
3162     }
3163 
3164     if (instruction->IsDiv()) {
3165       __ Dsra32(out, TMP, 31);
3166       __ Dsubu(out, TMP, out);
3167     } else {
3168       __ Dsra32(AT, TMP, 31);
3169       __ Dsubu(AT, TMP, AT);
3170       __ LoadConst64(TMP, imm);
3171       __ Dmul(TMP, AT, TMP);
3172       __ Dsubu(out, dividend, TMP);
3173     }
3174   }
3175 }
3176 
GenerateDivRemIntegral(HBinaryOperation * instruction)3177 void InstructionCodeGeneratorMIPS64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
3178   DCHECK(instruction->IsDiv() || instruction->IsRem());
3179   Primitive::Type type = instruction->GetResultType();
3180   DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong) << type;
3181 
3182   LocationSummary* locations = instruction->GetLocations();
3183   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
3184   Location second = locations->InAt(1);
3185 
3186   if (second.IsConstant()) {
3187     int64_t imm = Int64FromConstant(second.GetConstant());
3188     if (imm == 0) {
3189       // Do not generate anything. DivZeroCheck would prevent any code to be executed.
3190     } else if (imm == 1 || imm == -1) {
3191       DivRemOneOrMinusOne(instruction);
3192     } else if (IsPowerOfTwo(AbsOrMin(imm))) {
3193       DivRemByPowerOfTwo(instruction);
3194     } else {
3195       DCHECK(imm <= -2 || imm >= 2);
3196       GenerateDivRemWithAnyConstant(instruction);
3197     }
3198   } else {
3199     GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
3200     GpuRegister divisor = second.AsRegister<GpuRegister>();
3201     if (instruction->IsDiv()) {
3202       if (type == Primitive::kPrimInt)
3203         __ DivR6(out, dividend, divisor);
3204       else
3205         __ Ddiv(out, dividend, divisor);
3206     } else {
3207       if (type == Primitive::kPrimInt)
3208         __ ModR6(out, dividend, divisor);
3209       else
3210         __ Dmod(out, dividend, divisor);
3211     }
3212   }
3213 }
3214 
VisitDiv(HDiv * div)3215 void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
3216   LocationSummary* locations =
3217       new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
3218   switch (div->GetResultType()) {
3219     case Primitive::kPrimInt:
3220     case Primitive::kPrimLong:
3221       locations->SetInAt(0, Location::RequiresRegister());
3222       locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
3223       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3224       break;
3225 
3226     case Primitive::kPrimFloat:
3227     case Primitive::kPrimDouble:
3228       locations->SetInAt(0, Location::RequiresFpuRegister());
3229       locations->SetInAt(1, Location::RequiresFpuRegister());
3230       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3231       break;
3232 
3233     default:
3234       LOG(FATAL) << "Unexpected div type " << div->GetResultType();
3235   }
3236 }
3237 
VisitDiv(HDiv * instruction)3238 void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
3239   Primitive::Type type = instruction->GetType();
3240   LocationSummary* locations = instruction->GetLocations();
3241 
3242   switch (type) {
3243     case Primitive::kPrimInt:
3244     case Primitive::kPrimLong:
3245       GenerateDivRemIntegral(instruction);
3246       break;
3247     case Primitive::kPrimFloat:
3248     case Primitive::kPrimDouble: {
3249       FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3250       FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
3251       FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
3252       if (type == Primitive::kPrimFloat)
3253         __ DivS(dst, lhs, rhs);
3254       else
3255         __ DivD(dst, lhs, rhs);
3256       break;
3257     }
3258     default:
3259       LOG(FATAL) << "Unexpected div type " << type;
3260   }
3261 }
3262 
VisitDivZeroCheck(HDivZeroCheck * instruction)3263 void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
3264   LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
3265   locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
3266 }
3267 
VisitDivZeroCheck(HDivZeroCheck * instruction)3268 void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
3269   SlowPathCodeMIPS64* slow_path =
3270       new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
3271   codegen_->AddSlowPath(slow_path);
3272   Location value = instruction->GetLocations()->InAt(0);
3273 
3274   Primitive::Type type = instruction->GetType();
3275 
3276   if (!Primitive::IsIntegralType(type)) {
3277     LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
3278     return;
3279   }
3280 
3281   if (value.IsConstant()) {
3282     int64_t divisor = codegen_->GetInt64ValueOf(value.GetConstant()->AsConstant());
3283     if (divisor == 0) {
3284       __ Bc(slow_path->GetEntryLabel());
3285     } else {
3286       // A division by a non-null constant is valid. We don't need to perform
3287       // any check, so simply fall through.
3288     }
3289   } else {
3290     __ Beqzc(value.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
3291   }
3292 }
3293 
VisitDoubleConstant(HDoubleConstant * constant)3294 void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
3295   LocationSummary* locations =
3296       new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
3297   locations->SetOut(Location::ConstantLocation(constant));
3298 }
3299 
VisitDoubleConstant(HDoubleConstant * cst ATTRIBUTE_UNUSED)3300 void InstructionCodeGeneratorMIPS64::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
3301   // Will be generated at use site.
3302 }
3303 
VisitExit(HExit * exit)3304 void LocationsBuilderMIPS64::VisitExit(HExit* exit) {
3305   exit->SetLocations(nullptr);
3306 }
3307 
VisitExit(HExit * exit ATTRIBUTE_UNUSED)3308 void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
3309 }
3310 
VisitFloatConstant(HFloatConstant * constant)3311 void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
3312   LocationSummary* locations =
3313       new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
3314   locations->SetOut(Location::ConstantLocation(constant));
3315 }
3316 
VisitFloatConstant(HFloatConstant * constant ATTRIBUTE_UNUSED)3317 void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
3318   // Will be generated at use site.
3319 }
3320 
HandleGoto(HInstruction * got,HBasicBlock * successor)3321 void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
3322   DCHECK(!successor->IsExitBlock());
3323   HBasicBlock* block = got->GetBlock();
3324   HInstruction* previous = got->GetPrevious();
3325   HLoopInformation* info = block->GetLoopInformation();
3326 
3327   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
3328     codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
3329     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
3330     return;
3331   }
3332   if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
3333     GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
3334   }
3335   if (!codegen_->GoesToNextBlock(block, successor)) {
3336     __ Bc(codegen_->GetLabelOf(successor));
3337   }
3338 }
3339 
VisitGoto(HGoto * got)3340 void LocationsBuilderMIPS64::VisitGoto(HGoto* got) {
3341   got->SetLocations(nullptr);
3342 }
3343 
VisitGoto(HGoto * got)3344 void InstructionCodeGeneratorMIPS64::VisitGoto(HGoto* got) {
3345   HandleGoto(got, got->GetSuccessor());
3346 }
3347 
VisitTryBoundary(HTryBoundary * try_boundary)3348 void LocationsBuilderMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
3349   try_boundary->SetLocations(nullptr);
3350 }
3351 
VisitTryBoundary(HTryBoundary * try_boundary)3352 void InstructionCodeGeneratorMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
3353   HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
3354   if (!successor->IsExitBlock()) {
3355     HandleGoto(try_boundary, successor);
3356   }
3357 }
3358 
GenerateIntLongCompare(IfCondition cond,bool is64bit,LocationSummary * locations)3359 void InstructionCodeGeneratorMIPS64::GenerateIntLongCompare(IfCondition cond,
3360                                                             bool is64bit,
3361                                                             LocationSummary* locations) {
3362   GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3363   GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
3364   Location rhs_location = locations->InAt(1);
3365   GpuRegister rhs_reg = ZERO;
3366   int64_t rhs_imm = 0;
3367   bool use_imm = rhs_location.IsConstant();
3368   if (use_imm) {
3369     if (is64bit) {
3370       rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
3371     } else {
3372       rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
3373     }
3374   } else {
3375     rhs_reg = rhs_location.AsRegister<GpuRegister>();
3376   }
3377   int64_t rhs_imm_plus_one = rhs_imm + UINT64_C(1);
3378 
3379   switch (cond) {
3380     case kCondEQ:
3381     case kCondNE:
3382       if (use_imm && IsInt<16>(-rhs_imm)) {
3383         if (rhs_imm == 0) {
3384           if (cond == kCondEQ) {
3385             __ Sltiu(dst, lhs, 1);
3386           } else {
3387             __ Sltu(dst, ZERO, lhs);
3388           }
3389         } else {
3390           if (is64bit) {
3391             __ Daddiu(dst, lhs, -rhs_imm);
3392           } else {
3393             __ Addiu(dst, lhs, -rhs_imm);
3394           }
3395           if (cond == kCondEQ) {
3396             __ Sltiu(dst, dst, 1);
3397           } else {
3398             __ Sltu(dst, ZERO, dst);
3399           }
3400         }
3401       } else {
3402         if (use_imm && IsUint<16>(rhs_imm)) {
3403           __ Xori(dst, lhs, rhs_imm);
3404         } else {
3405           if (use_imm) {
3406             rhs_reg = TMP;
3407             __ LoadConst64(rhs_reg, rhs_imm);
3408           }
3409           __ Xor(dst, lhs, rhs_reg);
3410         }
3411         if (cond == kCondEQ) {
3412           __ Sltiu(dst, dst, 1);
3413         } else {
3414           __ Sltu(dst, ZERO, dst);
3415         }
3416       }
3417       break;
3418 
3419     case kCondLT:
3420     case kCondGE:
3421       if (use_imm && IsInt<16>(rhs_imm)) {
3422         __ Slti(dst, lhs, rhs_imm);
3423       } else {
3424         if (use_imm) {
3425           rhs_reg = TMP;
3426           __ LoadConst64(rhs_reg, rhs_imm);
3427         }
3428         __ Slt(dst, lhs, rhs_reg);
3429       }
3430       if (cond == kCondGE) {
3431         // Simulate lhs >= rhs via !(lhs < rhs) since there's
3432         // only the slt instruction but no sge.
3433         __ Xori(dst, dst, 1);
3434       }
3435       break;
3436 
3437     case kCondLE:
3438     case kCondGT:
3439       if (use_imm && IsInt<16>(rhs_imm_plus_one)) {
3440         // Simulate lhs <= rhs via lhs < rhs + 1.
3441         __ Slti(dst, lhs, rhs_imm_plus_one);
3442         if (cond == kCondGT) {
3443           // Simulate lhs > rhs via !(lhs <= rhs) since there's
3444           // only the slti instruction but no sgti.
3445           __ Xori(dst, dst, 1);
3446         }
3447       } else {
3448         if (use_imm) {
3449           rhs_reg = TMP;
3450           __ LoadConst64(rhs_reg, rhs_imm);
3451         }
3452         __ Slt(dst, rhs_reg, lhs);
3453         if (cond == kCondLE) {
3454           // Simulate lhs <= rhs via !(rhs < lhs) since there's
3455           // only the slt instruction but no sle.
3456           __ Xori(dst, dst, 1);
3457         }
3458       }
3459       break;
3460 
3461     case kCondB:
3462     case kCondAE:
3463       if (use_imm && IsInt<16>(rhs_imm)) {
3464         // Sltiu sign-extends its 16-bit immediate operand before
3465         // the comparison and thus lets us compare directly with
3466         // unsigned values in the ranges [0, 0x7fff] and
3467         // [0x[ffffffff]ffff8000, 0x[ffffffff]ffffffff].
3468         __ Sltiu(dst, lhs, rhs_imm);
3469       } else {
3470         if (use_imm) {
3471           rhs_reg = TMP;
3472           __ LoadConst64(rhs_reg, rhs_imm);
3473         }
3474         __ Sltu(dst, lhs, rhs_reg);
3475       }
3476       if (cond == kCondAE) {
3477         // Simulate lhs >= rhs via !(lhs < rhs) since there's
3478         // only the sltu instruction but no sgeu.
3479         __ Xori(dst, dst, 1);
3480       }
3481       break;
3482 
3483     case kCondBE:
3484     case kCondA:
3485       if (use_imm && (rhs_imm_plus_one != 0) && IsInt<16>(rhs_imm_plus_one)) {
3486         // Simulate lhs <= rhs via lhs < rhs + 1.
3487         // Note that this only works if rhs + 1 does not overflow
3488         // to 0, hence the check above.
3489         // Sltiu sign-extends its 16-bit immediate operand before
3490         // the comparison and thus lets us compare directly with
3491         // unsigned values in the ranges [0, 0x7fff] and
3492         // [0x[ffffffff]ffff8000, 0x[ffffffff]ffffffff].
3493         __ Sltiu(dst, lhs, rhs_imm_plus_one);
3494         if (cond == kCondA) {
3495           // Simulate lhs > rhs via !(lhs <= rhs) since there's
3496           // only the sltiu instruction but no sgtiu.
3497           __ Xori(dst, dst, 1);
3498         }
3499       } else {
3500         if (use_imm) {
3501           rhs_reg = TMP;
3502           __ LoadConst64(rhs_reg, rhs_imm);
3503         }
3504         __ Sltu(dst, rhs_reg, lhs);
3505         if (cond == kCondBE) {
3506           // Simulate lhs <= rhs via !(rhs < lhs) since there's
3507           // only the sltu instruction but no sleu.
3508           __ Xori(dst, dst, 1);
3509         }
3510       }
3511       break;
3512   }
3513 }
3514 
GenerateIntLongCompareAndBranch(IfCondition cond,bool is64bit,LocationSummary * locations,Mips64Label * label)3515 void InstructionCodeGeneratorMIPS64::GenerateIntLongCompareAndBranch(IfCondition cond,
3516                                                                      bool is64bit,
3517                                                                      LocationSummary* locations,
3518                                                                      Mips64Label* label) {
3519   GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
3520   Location rhs_location = locations->InAt(1);
3521   GpuRegister rhs_reg = ZERO;
3522   int64_t rhs_imm = 0;
3523   bool use_imm = rhs_location.IsConstant();
3524   if (use_imm) {
3525     if (is64bit) {
3526       rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
3527     } else {
3528       rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
3529     }
3530   } else {
3531     rhs_reg = rhs_location.AsRegister<GpuRegister>();
3532   }
3533 
3534   if (use_imm && rhs_imm == 0) {
3535     switch (cond) {
3536       case kCondEQ:
3537       case kCondBE:  // <= 0 if zero
3538         __ Beqzc(lhs, label);
3539         break;
3540       case kCondNE:
3541       case kCondA:  // > 0 if non-zero
3542         __ Bnezc(lhs, label);
3543         break;
3544       case kCondLT:
3545         __ Bltzc(lhs, label);
3546         break;
3547       case kCondGE:
3548         __ Bgezc(lhs, label);
3549         break;
3550       case kCondLE:
3551         __ Blezc(lhs, label);
3552         break;
3553       case kCondGT:
3554         __ Bgtzc(lhs, label);
3555         break;
3556       case kCondB:  // always false
3557         break;
3558       case kCondAE:  // always true
3559         __ Bc(label);
3560         break;
3561     }
3562   } else {
3563     if (use_imm) {
3564       rhs_reg = TMP;
3565       __ LoadConst64(rhs_reg, rhs_imm);
3566     }
3567     switch (cond) {
3568       case kCondEQ:
3569         __ Beqc(lhs, rhs_reg, label);
3570         break;
3571       case kCondNE:
3572         __ Bnec(lhs, rhs_reg, label);
3573         break;
3574       case kCondLT:
3575         __ Bltc(lhs, rhs_reg, label);
3576         break;
3577       case kCondGE:
3578         __ Bgec(lhs, rhs_reg, label);
3579         break;
3580       case kCondLE:
3581         __ Bgec(rhs_reg, lhs, label);
3582         break;
3583       case kCondGT:
3584         __ Bltc(rhs_reg, lhs, label);
3585         break;
3586       case kCondB:
3587         __ Bltuc(lhs, rhs_reg, label);
3588         break;
3589       case kCondAE:
3590         __ Bgeuc(lhs, rhs_reg, label);
3591         break;
3592       case kCondBE:
3593         __ Bgeuc(rhs_reg, lhs, label);
3594         break;
3595       case kCondA:
3596         __ Bltuc(rhs_reg, lhs, label);
3597         break;
3598     }
3599   }
3600 }
3601 
GenerateFpCompare(IfCondition cond,bool gt_bias,Primitive::Type type,LocationSummary * locations)3602 void InstructionCodeGeneratorMIPS64::GenerateFpCompare(IfCondition cond,
3603                                                        bool gt_bias,
3604                                                        Primitive::Type type,
3605                                                        LocationSummary* locations) {
3606   GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3607   FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
3608   FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
3609   if (type == Primitive::kPrimFloat) {
3610     switch (cond) {
3611       case kCondEQ:
3612         __ CmpEqS(FTMP, lhs, rhs);
3613         __ Mfc1(dst, FTMP);
3614         __ Andi(dst, dst, 1);
3615         break;
3616       case kCondNE:
3617         __ CmpEqS(FTMP, lhs, rhs);
3618         __ Mfc1(dst, FTMP);
3619         __ Addiu(dst, dst, 1);
3620         break;
3621       case kCondLT:
3622         if (gt_bias) {
3623           __ CmpLtS(FTMP, lhs, rhs);
3624         } else {
3625           __ CmpUltS(FTMP, lhs, rhs);
3626         }
3627         __ Mfc1(dst, FTMP);
3628         __ Andi(dst, dst, 1);
3629         break;
3630       case kCondLE:
3631         if (gt_bias) {
3632           __ CmpLeS(FTMP, lhs, rhs);
3633         } else {
3634           __ CmpUleS(FTMP, lhs, rhs);
3635         }
3636         __ Mfc1(dst, FTMP);
3637         __ Andi(dst, dst, 1);
3638         break;
3639       case kCondGT:
3640         if (gt_bias) {
3641           __ CmpUltS(FTMP, rhs, lhs);
3642         } else {
3643           __ CmpLtS(FTMP, rhs, lhs);
3644         }
3645         __ Mfc1(dst, FTMP);
3646         __ Andi(dst, dst, 1);
3647         break;
3648       case kCondGE:
3649         if (gt_bias) {
3650           __ CmpUleS(FTMP, rhs, lhs);
3651         } else {
3652           __ CmpLeS(FTMP, rhs, lhs);
3653         }
3654         __ Mfc1(dst, FTMP);
3655         __ Andi(dst, dst, 1);
3656         break;
3657       default:
3658         LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
3659         UNREACHABLE();
3660     }
3661   } else {
3662     DCHECK_EQ(type, Primitive::kPrimDouble);
3663     switch (cond) {
3664       case kCondEQ:
3665         __ CmpEqD(FTMP, lhs, rhs);
3666         __ Mfc1(dst, FTMP);
3667         __ Andi(dst, dst, 1);
3668         break;
3669       case kCondNE:
3670         __ CmpEqD(FTMP, lhs, rhs);
3671         __ Mfc1(dst, FTMP);
3672         __ Addiu(dst, dst, 1);
3673         break;
3674       case kCondLT:
3675         if (gt_bias) {
3676           __ CmpLtD(FTMP, lhs, rhs);
3677         } else {
3678           __ CmpUltD(FTMP, lhs, rhs);
3679         }
3680         __ Mfc1(dst, FTMP);
3681         __ Andi(dst, dst, 1);
3682         break;
3683       case kCondLE:
3684         if (gt_bias) {
3685           __ CmpLeD(FTMP, lhs, rhs);
3686         } else {
3687           __ CmpUleD(FTMP, lhs, rhs);
3688         }
3689         __ Mfc1(dst, FTMP);
3690         __ Andi(dst, dst, 1);
3691         break;
3692       case kCondGT:
3693         if (gt_bias) {
3694           __ CmpUltD(FTMP, rhs, lhs);
3695         } else {
3696           __ CmpLtD(FTMP, rhs, lhs);
3697         }
3698         __ Mfc1(dst, FTMP);
3699         __ Andi(dst, dst, 1);
3700         break;
3701       case kCondGE:
3702         if (gt_bias) {
3703           __ CmpUleD(FTMP, rhs, lhs);
3704         } else {
3705           __ CmpLeD(FTMP, rhs, lhs);
3706         }
3707         __ Mfc1(dst, FTMP);
3708         __ Andi(dst, dst, 1);
3709         break;
3710       default:
3711         LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
3712         UNREACHABLE();
3713     }
3714   }
3715 }
3716 
GenerateFpCompareAndBranch(IfCondition cond,bool gt_bias,Primitive::Type type,LocationSummary * locations,Mips64Label * label)3717 void InstructionCodeGeneratorMIPS64::GenerateFpCompareAndBranch(IfCondition cond,
3718                                                                 bool gt_bias,
3719                                                                 Primitive::Type type,
3720                                                                 LocationSummary* locations,
3721                                                                 Mips64Label* label) {
3722   FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
3723   FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
3724   if (type == Primitive::kPrimFloat) {
3725     switch (cond) {
3726       case kCondEQ:
3727         __ CmpEqS(FTMP, lhs, rhs);
3728         __ Bc1nez(FTMP, label);
3729         break;
3730       case kCondNE:
3731         __ CmpEqS(FTMP, lhs, rhs);
3732         __ Bc1eqz(FTMP, label);
3733         break;
3734       case kCondLT:
3735         if (gt_bias) {
3736           __ CmpLtS(FTMP, lhs, rhs);
3737         } else {
3738           __ CmpUltS(FTMP, lhs, rhs);
3739         }
3740         __ Bc1nez(FTMP, label);
3741         break;
3742       case kCondLE:
3743         if (gt_bias) {
3744           __ CmpLeS(FTMP, lhs, rhs);
3745         } else {
3746           __ CmpUleS(FTMP, lhs, rhs);
3747         }
3748         __ Bc1nez(FTMP, label);
3749         break;
3750       case kCondGT:
3751         if (gt_bias) {
3752           __ CmpUltS(FTMP, rhs, lhs);
3753         } else {
3754           __ CmpLtS(FTMP, rhs, lhs);
3755         }
3756         __ Bc1nez(FTMP, label);
3757         break;
3758       case kCondGE:
3759         if (gt_bias) {
3760           __ CmpUleS(FTMP, rhs, lhs);
3761         } else {
3762           __ CmpLeS(FTMP, rhs, lhs);
3763         }
3764         __ Bc1nez(FTMP, label);
3765         break;
3766       default:
3767         LOG(FATAL) << "Unexpected non-floating-point condition";
3768     }
3769   } else {
3770     DCHECK_EQ(type, Primitive::kPrimDouble);
3771     switch (cond) {
3772       case kCondEQ:
3773         __ CmpEqD(FTMP, lhs, rhs);
3774         __ Bc1nez(FTMP, label);
3775         break;
3776       case kCondNE:
3777         __ CmpEqD(FTMP, lhs, rhs);
3778         __ Bc1eqz(FTMP, label);
3779         break;
3780       case kCondLT:
3781         if (gt_bias) {
3782           __ CmpLtD(FTMP, lhs, rhs);
3783         } else {
3784           __ CmpUltD(FTMP, lhs, rhs);
3785         }
3786         __ Bc1nez(FTMP, label);
3787         break;
3788       case kCondLE:
3789         if (gt_bias) {
3790           __ CmpLeD(FTMP, lhs, rhs);
3791         } else {
3792           __ CmpUleD(FTMP, lhs, rhs);
3793         }
3794         __ Bc1nez(FTMP, label);
3795         break;
3796       case kCondGT:
3797         if (gt_bias) {
3798           __ CmpUltD(FTMP, rhs, lhs);
3799         } else {
3800           __ CmpLtD(FTMP, rhs, lhs);
3801         }
3802         __ Bc1nez(FTMP, label);
3803         break;
3804       case kCondGE:
3805         if (gt_bias) {
3806           __ CmpUleD(FTMP, rhs, lhs);
3807         } else {
3808           __ CmpLeD(FTMP, rhs, lhs);
3809         }
3810         __ Bc1nez(FTMP, label);
3811         break;
3812       default:
3813         LOG(FATAL) << "Unexpected non-floating-point condition";
3814     }
3815   }
3816 }
3817 
GenerateTestAndBranch(HInstruction * instruction,size_t condition_input_index,Mips64Label * true_target,Mips64Label * false_target)3818 void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
3819                                                            size_t condition_input_index,
3820                                                            Mips64Label* true_target,
3821                                                            Mips64Label* false_target) {
3822   HInstruction* cond = instruction->InputAt(condition_input_index);
3823 
3824   if (true_target == nullptr && false_target == nullptr) {
3825     // Nothing to do. The code always falls through.
3826     return;
3827   } else if (cond->IsIntConstant()) {
3828     // Constant condition, statically compared against "true" (integer value 1).
3829     if (cond->AsIntConstant()->IsTrue()) {
3830       if (true_target != nullptr) {
3831         __ Bc(true_target);
3832       }
3833     } else {
3834       DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue();
3835       if (false_target != nullptr) {
3836         __ Bc(false_target);
3837       }
3838     }
3839     return;
3840   }
3841 
3842   // The following code generates these patterns:
3843   //  (1) true_target == nullptr && false_target != nullptr
3844   //        - opposite condition true => branch to false_target
3845   //  (2) true_target != nullptr && false_target == nullptr
3846   //        - condition true => branch to true_target
3847   //  (3) true_target != nullptr && false_target != nullptr
3848   //        - condition true => branch to true_target
3849   //        - branch to false_target
3850   if (IsBooleanValueOrMaterializedCondition(cond)) {
3851     // The condition instruction has been materialized, compare the output to 0.
3852     Location cond_val = instruction->GetLocations()->InAt(condition_input_index);
3853     DCHECK(cond_val.IsRegister());
3854     if (true_target == nullptr) {
3855       __ Beqzc(cond_val.AsRegister<GpuRegister>(), false_target);
3856     } else {
3857       __ Bnezc(cond_val.AsRegister<GpuRegister>(), true_target);
3858     }
3859   } else {
3860     // The condition instruction has not been materialized, use its inputs as
3861     // the comparison and its condition as the branch condition.
3862     HCondition* condition = cond->AsCondition();
3863     Primitive::Type type = condition->InputAt(0)->GetType();
3864     LocationSummary* locations = cond->GetLocations();
3865     IfCondition if_cond = condition->GetCondition();
3866     Mips64Label* branch_target = true_target;
3867 
3868     if (true_target == nullptr) {
3869       if_cond = condition->GetOppositeCondition();
3870       branch_target = false_target;
3871     }
3872 
3873     switch (type) {
3874       default:
3875         GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ false, locations, branch_target);
3876         break;
3877       case Primitive::kPrimLong:
3878         GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ true, locations, branch_target);
3879         break;
3880       case Primitive::kPrimFloat:
3881       case Primitive::kPrimDouble:
3882         GenerateFpCompareAndBranch(if_cond, condition->IsGtBias(), type, locations, branch_target);
3883         break;
3884     }
3885   }
3886 
3887   // If neither branch falls through (case 3), the conditional branch to `true_target`
3888   // was already emitted (case 2) and we need to emit a jump to `false_target`.
3889   if (true_target != nullptr && false_target != nullptr) {
3890     __ Bc(false_target);
3891   }
3892 }
3893 
VisitIf(HIf * if_instr)3894 void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
3895   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
3896   if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
3897     locations->SetInAt(0, Location::RequiresRegister());
3898   }
3899 }
3900 
VisitIf(HIf * if_instr)3901 void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
3902   HBasicBlock* true_successor = if_instr->IfTrueSuccessor();
3903   HBasicBlock* false_successor = if_instr->IfFalseSuccessor();
3904   Mips64Label* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ?
3905       nullptr : codegen_->GetLabelOf(true_successor);
3906   Mips64Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
3907       nullptr : codegen_->GetLabelOf(false_successor);
3908   GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
3909 }
3910 
VisitDeoptimize(HDeoptimize * deoptimize)3911 void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
3912   LocationSummary* locations = new (GetGraph()->GetArena())
3913       LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
3914   InvokeRuntimeCallingConvention calling_convention;
3915   RegisterSet caller_saves = RegisterSet::Empty();
3916   caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3917   locations->SetCustomSlowPathCallerSaves(caller_saves);
3918   if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
3919     locations->SetInAt(0, Location::RequiresRegister());
3920   }
3921 }
3922 
VisitDeoptimize(HDeoptimize * deoptimize)3923 void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
3924   SlowPathCodeMIPS64* slow_path =
3925       deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS64>(deoptimize);
3926   GenerateTestAndBranch(deoptimize,
3927                         /* condition_input_index */ 0,
3928                         slow_path->GetEntryLabel(),
3929                         /* false_target */ nullptr);
3930 }
3931 
VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag * flag)3932 void LocationsBuilderMIPS64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
3933   LocationSummary* locations = new (GetGraph()->GetArena())
3934       LocationSummary(flag, LocationSummary::kNoCall);
3935   locations->SetOut(Location::RequiresRegister());
3936 }
3937 
VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag * flag)3938 void InstructionCodeGeneratorMIPS64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
3939   __ LoadFromOffset(kLoadWord,
3940                     flag->GetLocations()->Out().AsRegister<GpuRegister>(),
3941                     SP,
3942                     codegen_->GetStackOffsetOfShouldDeoptimizeFlag());
3943 }
3944 
VisitSelect(HSelect * select)3945 void LocationsBuilderMIPS64::VisitSelect(HSelect* select) {
3946   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
3947   if (Primitive::IsFloatingPointType(select->GetType())) {
3948     locations->SetInAt(0, Location::RequiresFpuRegister());
3949     locations->SetInAt(1, Location::RequiresFpuRegister());
3950   } else {
3951     locations->SetInAt(0, Location::RequiresRegister());
3952     locations->SetInAt(1, Location::RequiresRegister());
3953   }
3954   if (IsBooleanValueOrMaterializedCondition(select->GetCondition())) {
3955     locations->SetInAt(2, Location::RequiresRegister());
3956   }
3957   locations->SetOut(Location::SameAsFirstInput());
3958 }
3959 
VisitSelect(HSelect * select)3960 void InstructionCodeGeneratorMIPS64::VisitSelect(HSelect* select) {
3961   LocationSummary* locations = select->GetLocations();
3962   Mips64Label false_target;
3963   GenerateTestAndBranch(select,
3964                         /* condition_input_index */ 2,
3965                         /* true_target */ nullptr,
3966                         &false_target);
3967   codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
3968   __ Bind(&false_target);
3969 }
3970 
VisitNativeDebugInfo(HNativeDebugInfo * info)3971 void LocationsBuilderMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
3972   new (GetGraph()->GetArena()) LocationSummary(info);
3973 }
3974 
VisitNativeDebugInfo(HNativeDebugInfo *)3975 void InstructionCodeGeneratorMIPS64::VisitNativeDebugInfo(HNativeDebugInfo*) {
3976   // MaybeRecordNativeDebugInfo is already called implicitly in CodeGenerator::Compile.
3977 }
3978 
GenerateNop()3979 void CodeGeneratorMIPS64::GenerateNop() {
3980   __ Nop();
3981 }
3982 
HandleFieldGet(HInstruction * instruction,const FieldInfo & field_info)3983 void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
3984                                             const FieldInfo& field_info) {
3985   Primitive::Type field_type = field_info.GetFieldType();
3986   bool object_field_get_with_read_barrier =
3987       kEmitCompilerReadBarrier && (field_type == Primitive::kPrimNot);
3988   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
3989       instruction,
3990       object_field_get_with_read_barrier
3991           ? LocationSummary::kCallOnSlowPath
3992           : LocationSummary::kNoCall);
3993   locations->SetInAt(0, Location::RequiresRegister());
3994   if (Primitive::IsFloatingPointType(instruction->GetType())) {
3995     locations->SetOut(Location::RequiresFpuRegister());
3996   } else {
3997     // The output overlaps in the case of an object field get with
3998     // read barriers enabled: we do not want the move to overwrite the
3999     // object's location, as we need it to emit the read barrier.
4000     locations->SetOut(Location::RequiresRegister(),
4001                       object_field_get_with_read_barrier
4002                           ? Location::kOutputOverlap
4003                           : Location::kNoOutputOverlap);
4004   }
4005   if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
4006     // We need a temporary register for the read barrier marking slow
4007     // path in CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier.
4008     locations->AddTemp(Location::RequiresRegister());
4009   }
4010 }
4011 
HandleFieldGet(HInstruction * instruction,const FieldInfo & field_info)4012 void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
4013                                                     const FieldInfo& field_info) {
4014   Primitive::Type type = field_info.GetFieldType();
4015   LocationSummary* locations = instruction->GetLocations();
4016   Location obj_loc = locations->InAt(0);
4017   GpuRegister obj = obj_loc.AsRegister<GpuRegister>();
4018   Location dst_loc = locations->Out();
4019   LoadOperandType load_type = kLoadUnsignedByte;
4020   bool is_volatile = field_info.IsVolatile();
4021   uint32_t offset = field_info.GetFieldOffset().Uint32Value();
4022   auto null_checker = GetImplicitNullChecker(instruction, codegen_);
4023 
4024   switch (type) {
4025     case Primitive::kPrimBoolean:
4026       load_type = kLoadUnsignedByte;
4027       break;
4028     case Primitive::kPrimByte:
4029       load_type = kLoadSignedByte;
4030       break;
4031     case Primitive::kPrimShort:
4032       load_type = kLoadSignedHalfword;
4033       break;
4034     case Primitive::kPrimChar:
4035       load_type = kLoadUnsignedHalfword;
4036       break;
4037     case Primitive::kPrimInt:
4038     case Primitive::kPrimFloat:
4039       load_type = kLoadWord;
4040       break;
4041     case Primitive::kPrimLong:
4042     case Primitive::kPrimDouble:
4043       load_type = kLoadDoubleword;
4044       break;
4045     case Primitive::kPrimNot:
4046       load_type = kLoadUnsignedWord;
4047       break;
4048     case Primitive::kPrimVoid:
4049       LOG(FATAL) << "Unreachable type " << type;
4050       UNREACHABLE();
4051   }
4052   if (!Primitive::IsFloatingPointType(type)) {
4053     DCHECK(dst_loc.IsRegister());
4054     GpuRegister dst = dst_loc.AsRegister<GpuRegister>();
4055     if (type == Primitive::kPrimNot) {
4056       // /* HeapReference<Object> */ dst = *(obj + offset)
4057       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
4058         Location temp_loc = locations->GetTemp(0);
4059         // Note that a potential implicit null check is handled in this
4060         // CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier call.
4061         codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
4062                                                         dst_loc,
4063                                                         obj,
4064                                                         offset,
4065                                                         temp_loc,
4066                                                         /* needs_null_check */ true);
4067         if (is_volatile) {
4068           GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
4069         }
4070       } else {
4071         __ LoadFromOffset(kLoadUnsignedWord, dst, obj, offset, null_checker);
4072         if (is_volatile) {
4073           GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
4074         }
4075         // If read barriers are enabled, emit read barriers other than
4076         // Baker's using a slow path (and also unpoison the loaded
4077         // reference, if heap poisoning is enabled).
4078         codegen_->MaybeGenerateReadBarrierSlow(instruction, dst_loc, dst_loc, obj_loc, offset);
4079       }
4080     } else {
4081       __ LoadFromOffset(load_type, dst, obj, offset, null_checker);
4082     }
4083   } else {
4084     DCHECK(dst_loc.IsFpuRegister());
4085     FpuRegister dst = dst_loc.AsFpuRegister<FpuRegister>();
4086     __ LoadFpuFromOffset(load_type, dst, obj, offset, null_checker);
4087   }
4088 
4089   // Memory barriers, in the case of references, are handled in the
4090   // previous switch statement.
4091   if (is_volatile && (type != Primitive::kPrimNot)) {
4092     GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
4093   }
4094 }
4095 
HandleFieldSet(HInstruction * instruction,const FieldInfo & field_info ATTRIBUTE_UNUSED)4096 void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
4097                                             const FieldInfo& field_info ATTRIBUTE_UNUSED) {
4098   LocationSummary* locations =
4099       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4100   locations->SetInAt(0, Location::RequiresRegister());
4101   if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
4102     locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1)));
4103   } else {
4104     locations->SetInAt(1, RegisterOrZeroConstant(instruction->InputAt(1)));
4105   }
4106 }
4107 
HandleFieldSet(HInstruction * instruction,const FieldInfo & field_info,bool value_can_be_null)4108 void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
4109                                                     const FieldInfo& field_info,
4110                                                     bool value_can_be_null) {
4111   Primitive::Type type = field_info.GetFieldType();
4112   LocationSummary* locations = instruction->GetLocations();
4113   GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
4114   Location value_location = locations->InAt(1);
4115   StoreOperandType store_type = kStoreByte;
4116   bool is_volatile = field_info.IsVolatile();
4117   uint32_t offset = field_info.GetFieldOffset().Uint32Value();
4118   bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1));
4119   auto null_checker = GetImplicitNullChecker(instruction, codegen_);
4120 
4121   switch (type) {
4122     case Primitive::kPrimBoolean:
4123     case Primitive::kPrimByte:
4124       store_type = kStoreByte;
4125       break;
4126     case Primitive::kPrimShort:
4127     case Primitive::kPrimChar:
4128       store_type = kStoreHalfword;
4129       break;
4130     case Primitive::kPrimInt:
4131     case Primitive::kPrimFloat:
4132     case Primitive::kPrimNot:
4133       store_type = kStoreWord;
4134       break;
4135     case Primitive::kPrimLong:
4136     case Primitive::kPrimDouble:
4137       store_type = kStoreDoubleword;
4138       break;
4139     case Primitive::kPrimVoid:
4140       LOG(FATAL) << "Unreachable type " << type;
4141       UNREACHABLE();
4142   }
4143 
4144   if (is_volatile) {
4145     GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
4146   }
4147 
4148   if (value_location.IsConstant()) {
4149     int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
4150     __ StoreConstToOffset(store_type, value, obj, offset, TMP, null_checker);
4151   } else {
4152     if (!Primitive::IsFloatingPointType(type)) {
4153       DCHECK(value_location.IsRegister());
4154       GpuRegister src = value_location.AsRegister<GpuRegister>();
4155       if (kPoisonHeapReferences && needs_write_barrier) {
4156         // Note that in the case where `value` is a null reference,
4157         // we do not enter this block, as a null reference does not
4158         // need poisoning.
4159         DCHECK_EQ(type, Primitive::kPrimNot);
4160         __ PoisonHeapReference(TMP, src);
4161         __ StoreToOffset(store_type, TMP, obj, offset, null_checker);
4162       } else {
4163         __ StoreToOffset(store_type, src, obj, offset, null_checker);
4164       }
4165     } else {
4166       DCHECK(value_location.IsFpuRegister());
4167       FpuRegister src = value_location.AsFpuRegister<FpuRegister>();
4168       __ StoreFpuToOffset(store_type, src, obj, offset, null_checker);
4169     }
4170   }
4171 
4172   if (needs_write_barrier) {
4173     DCHECK(value_location.IsRegister());
4174     GpuRegister src = value_location.AsRegister<GpuRegister>();
4175     codegen_->MarkGCCard(obj, src, value_can_be_null);
4176   }
4177 
4178   if (is_volatile) {
4179     GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
4180   }
4181 }
4182 
VisitInstanceFieldGet(HInstanceFieldGet * instruction)4183 void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
4184   HandleFieldGet(instruction, instruction->GetFieldInfo());
4185 }
4186 
VisitInstanceFieldGet(HInstanceFieldGet * instruction)4187 void InstructionCodeGeneratorMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
4188   HandleFieldGet(instruction, instruction->GetFieldInfo());
4189 }
4190 
VisitInstanceFieldSet(HInstanceFieldSet * instruction)4191 void LocationsBuilderMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
4192   HandleFieldSet(instruction, instruction->GetFieldInfo());
4193 }
4194 
VisitInstanceFieldSet(HInstanceFieldSet * instruction)4195 void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
4196   HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
4197 }
4198 
GenerateReferenceLoadOneRegister(HInstruction * instruction,Location out,uint32_t offset,Location maybe_temp,ReadBarrierOption read_barrier_option)4199 void InstructionCodeGeneratorMIPS64::GenerateReferenceLoadOneRegister(
4200     HInstruction* instruction,
4201     Location out,
4202     uint32_t offset,
4203     Location maybe_temp,
4204     ReadBarrierOption read_barrier_option) {
4205   GpuRegister out_reg = out.AsRegister<GpuRegister>();
4206   if (read_barrier_option == kWithReadBarrier) {
4207     CHECK(kEmitCompilerReadBarrier);
4208     DCHECK(maybe_temp.IsRegister()) << maybe_temp;
4209     if (kUseBakerReadBarrier) {
4210       // Load with fast path based Baker's read barrier.
4211       // /* HeapReference<Object> */ out = *(out + offset)
4212       codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
4213                                                       out,
4214                                                       out_reg,
4215                                                       offset,
4216                                                       maybe_temp,
4217                                                       /* needs_null_check */ false);
4218     } else {
4219       // Load with slow path based read barrier.
4220       // Save the value of `out` into `maybe_temp` before overwriting it
4221       // in the following move operation, as we will need it for the
4222       // read barrier below.
4223       __ Move(maybe_temp.AsRegister<GpuRegister>(), out_reg);
4224       // /* HeapReference<Object> */ out = *(out + offset)
4225       __ LoadFromOffset(kLoadUnsignedWord, out_reg, out_reg, offset);
4226       codegen_->GenerateReadBarrierSlow(instruction, out, out, maybe_temp, offset);
4227     }
4228   } else {
4229     // Plain load with no read barrier.
4230     // /* HeapReference<Object> */ out = *(out + offset)
4231     __ LoadFromOffset(kLoadUnsignedWord, out_reg, out_reg, offset);
4232     __ MaybeUnpoisonHeapReference(out_reg);
4233   }
4234 }
4235 
GenerateReferenceLoadTwoRegisters(HInstruction * instruction,Location out,Location obj,uint32_t offset,Location maybe_temp,ReadBarrierOption read_barrier_option)4236 void InstructionCodeGeneratorMIPS64::GenerateReferenceLoadTwoRegisters(
4237     HInstruction* instruction,
4238     Location out,
4239     Location obj,
4240     uint32_t offset,
4241     Location maybe_temp,
4242     ReadBarrierOption read_barrier_option) {
4243   GpuRegister out_reg = out.AsRegister<GpuRegister>();
4244   GpuRegister obj_reg = obj.AsRegister<GpuRegister>();
4245   if (read_barrier_option == kWithReadBarrier) {
4246     CHECK(kEmitCompilerReadBarrier);
4247     if (kUseBakerReadBarrier) {
4248       DCHECK(maybe_temp.IsRegister()) << maybe_temp;
4249       // Load with fast path based Baker's read barrier.
4250       // /* HeapReference<Object> */ out = *(obj + offset)
4251       codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
4252                                                       out,
4253                                                       obj_reg,
4254                                                       offset,
4255                                                       maybe_temp,
4256                                                       /* needs_null_check */ false);
4257     } else {
4258       // Load with slow path based read barrier.
4259       // /* HeapReference<Object> */ out = *(obj + offset)
4260       __ LoadFromOffset(kLoadUnsignedWord, out_reg, obj_reg, offset);
4261       codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset);
4262     }
4263   } else {
4264     // Plain load with no read barrier.
4265     // /* HeapReference<Object> */ out = *(obj + offset)
4266     __ LoadFromOffset(kLoadUnsignedWord, out_reg, obj_reg, offset);
4267     __ MaybeUnpoisonHeapReference(out_reg);
4268   }
4269 }
4270 
GenerateGcRootFieldLoad(HInstruction * instruction,Location root,GpuRegister obj,uint32_t offset,ReadBarrierOption read_barrier_option)4271 void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(
4272     HInstruction* instruction,
4273     Location root,
4274     GpuRegister obj,
4275     uint32_t offset,
4276     ReadBarrierOption read_barrier_option) {
4277   GpuRegister root_reg = root.AsRegister<GpuRegister>();
4278   if (read_barrier_option == kWithReadBarrier) {
4279     DCHECK(kEmitCompilerReadBarrier);
4280     if (kUseBakerReadBarrier) {
4281       // Fast path implementation of art::ReadBarrier::BarrierForRoot when
4282       // Baker's read barrier are used:
4283       //
4284       //   root = obj.field;
4285       //   temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
4286       //   if (temp != null) {
4287       //     root = temp(root)
4288       //   }
4289 
4290       // /* GcRoot<mirror::Object> */ root = *(obj + offset)
4291       __ LoadFromOffset(kLoadUnsignedWord, root_reg, obj, offset);
4292       static_assert(
4293           sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
4294           "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
4295           "have different sizes.");
4296       static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
4297                     "art::mirror::CompressedReference<mirror::Object> and int32_t "
4298                     "have different sizes.");
4299 
4300       // Slow path marking the GC root `root`.
4301       Location temp = Location::RegisterLocation(T9);
4302       SlowPathCodeMIPS64* slow_path =
4303           new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS64(
4304               instruction,
4305               root,
4306               /*entrypoint*/ temp);
4307       codegen_->AddSlowPath(slow_path);
4308 
4309       // temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
4310       const int32_t entry_point_offset =
4311           CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(root.reg() - 1);
4312       // Loading the entrypoint does not require a load acquire since it is only changed when
4313       // threads are suspended or running a checkpoint.
4314       __ LoadFromOffset(kLoadDoubleword, temp.AsRegister<GpuRegister>(), TR, entry_point_offset);
4315       // The entrypoint is null when the GC is not marking, this prevents one load compared to
4316       // checking GetIsGcMarking.
4317       __ Bnezc(temp.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
4318       __ Bind(slow_path->GetExitLabel());
4319     } else {
4320       // GC root loaded through a slow path for read barriers other
4321       // than Baker's.
4322       // /* GcRoot<mirror::Object>* */ root = obj + offset
4323       __ Daddiu64(root_reg, obj, static_cast<int32_t>(offset));
4324       // /* mirror::Object* */ root = root->Read()
4325       codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
4326     }
4327   } else {
4328     // Plain GC root load with no read barrier.
4329     // /* GcRoot<mirror::Object> */ root = *(obj + offset)
4330     __ LoadFromOffset(kLoadUnsignedWord, root_reg, obj, offset);
4331     // Note that GC roots are not affected by heap poisoning, thus we
4332     // do not have to unpoison `root_reg` here.
4333   }
4334 }
4335 
GenerateFieldLoadWithBakerReadBarrier(HInstruction * instruction,Location ref,GpuRegister obj,uint32_t offset,Location temp,bool needs_null_check)4336 void CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
4337                                                                 Location ref,
4338                                                                 GpuRegister obj,
4339                                                                 uint32_t offset,
4340                                                                 Location temp,
4341                                                                 bool needs_null_check) {
4342   DCHECK(kEmitCompilerReadBarrier);
4343   DCHECK(kUseBakerReadBarrier);
4344 
4345   // /* HeapReference<Object> */ ref = *(obj + offset)
4346   Location no_index = Location::NoLocation();
4347   ScaleFactor no_scale_factor = TIMES_1;
4348   GenerateReferenceLoadWithBakerReadBarrier(instruction,
4349                                             ref,
4350                                             obj,
4351                                             offset,
4352                                             no_index,
4353                                             no_scale_factor,
4354                                             temp,
4355                                             needs_null_check);
4356 }
4357 
GenerateArrayLoadWithBakerReadBarrier(HInstruction * instruction,Location ref,GpuRegister obj,uint32_t data_offset,Location index,Location temp,bool needs_null_check)4358 void CodeGeneratorMIPS64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
4359                                                                 Location ref,
4360                                                                 GpuRegister obj,
4361                                                                 uint32_t data_offset,
4362                                                                 Location index,
4363                                                                 Location temp,
4364                                                                 bool needs_null_check) {
4365   DCHECK(kEmitCompilerReadBarrier);
4366   DCHECK(kUseBakerReadBarrier);
4367 
4368   static_assert(
4369       sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
4370       "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
4371   // /* HeapReference<Object> */ ref =
4372   //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
4373   ScaleFactor scale_factor = TIMES_4;
4374   GenerateReferenceLoadWithBakerReadBarrier(instruction,
4375                                             ref,
4376                                             obj,
4377                                             data_offset,
4378                                             index,
4379                                             scale_factor,
4380                                             temp,
4381                                             needs_null_check);
4382 }
4383 
GenerateReferenceLoadWithBakerReadBarrier(HInstruction * instruction,Location ref,GpuRegister obj,uint32_t offset,Location index,ScaleFactor scale_factor,Location temp,bool needs_null_check,bool always_update_field)4384 void CodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
4385                                                                     Location ref,
4386                                                                     GpuRegister obj,
4387                                                                     uint32_t offset,
4388                                                                     Location index,
4389                                                                     ScaleFactor scale_factor,
4390                                                                     Location temp,
4391                                                                     bool needs_null_check,
4392                                                                     bool always_update_field) {
4393   DCHECK(kEmitCompilerReadBarrier);
4394   DCHECK(kUseBakerReadBarrier);
4395 
4396   // In slow path based read barriers, the read barrier call is
4397   // inserted after the original load. However, in fast path based
4398   // Baker's read barriers, we need to perform the load of
4399   // mirror::Object::monitor_ *before* the original reference load.
4400   // This load-load ordering is required by the read barrier.
4401   // The fast path/slow path (for Baker's algorithm) should look like:
4402   //
4403   //   uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
4404   //   lfence;  // Load fence or artificial data dependency to prevent load-load reordering
4405   //   HeapReference<Object> ref = *src;  // Original reference load.
4406   //   bool is_gray = (rb_state == ReadBarrier::GrayState());
4407   //   if (is_gray) {
4408   //     ref = ReadBarrier::Mark(ref);  // Performed by runtime entrypoint slow path.
4409   //   }
4410   //
4411   // Note: the original implementation in ReadBarrier::Barrier is
4412   // slightly more complex as it performs additional checks that we do
4413   // not do here for performance reasons.
4414 
4415   GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
4416   GpuRegister temp_reg = temp.AsRegister<GpuRegister>();
4417   uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
4418 
4419   // /* int32_t */ monitor = obj->monitor_
4420   __ LoadFromOffset(kLoadWord, temp_reg, obj, monitor_offset);
4421   if (needs_null_check) {
4422     MaybeRecordImplicitNullCheck(instruction);
4423   }
4424   // /* LockWord */ lock_word = LockWord(monitor)
4425   static_assert(sizeof(LockWord) == sizeof(int32_t),
4426                 "art::LockWord and int32_t have different sizes.");
4427 
4428   __ Sync(0);  // Barrier to prevent load-load reordering.
4429 
4430   // The actual reference load.
4431   if (index.IsValid()) {
4432     // Load types involving an "index": ArrayGet,
4433     // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
4434     // intrinsics.
4435     // /* HeapReference<Object> */ ref = *(obj + offset + (index << scale_factor))
4436     if (index.IsConstant()) {
4437       size_t computed_offset =
4438           (index.GetConstant()->AsIntConstant()->GetValue() << scale_factor) + offset;
4439       __ LoadFromOffset(kLoadUnsignedWord, ref_reg, obj, computed_offset);
4440     } else {
4441       GpuRegister index_reg = index.AsRegister<GpuRegister>();
4442       if (scale_factor == TIMES_1) {
4443         __ Daddu(TMP, index_reg, obj);
4444       } else {
4445         __ Dlsa(TMP, index_reg, obj, scale_factor);
4446       }
4447       __ LoadFromOffset(kLoadUnsignedWord, ref_reg, TMP, offset);
4448     }
4449   } else {
4450     // /* HeapReference<Object> */ ref = *(obj + offset)
4451     __ LoadFromOffset(kLoadUnsignedWord, ref_reg, obj, offset);
4452   }
4453 
4454   // Object* ref = ref_addr->AsMirrorPtr()
4455   __ MaybeUnpoisonHeapReference(ref_reg);
4456 
4457   // Slow path marking the object `ref` when it is gray.
4458   SlowPathCodeMIPS64* slow_path;
4459   if (always_update_field) {
4460     // ReadBarrierMarkAndUpdateFieldSlowPathMIPS64 only supports address
4461     // of the form `obj + field_offset`, where `obj` is a register and
4462     // `field_offset` is a register. Thus `offset` and `scale_factor`
4463     // above are expected to be null in this code path.
4464     DCHECK_EQ(offset, 0u);
4465     DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
4466     slow_path = new (GetGraph()->GetArena())
4467         ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction,
4468                                                     ref,
4469                                                     obj,
4470                                                     /* field_offset */ index,
4471                                                     temp_reg);
4472   } else {
4473     slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
4474   }
4475   AddSlowPath(slow_path);
4476 
4477   // if (rb_state == ReadBarrier::GrayState())
4478   //   ref = ReadBarrier::Mark(ref);
4479   // Given the numeric representation, it's enough to check the low bit of the
4480   // rb_state. We do that by shifting the bit into the sign bit (31) and
4481   // performing a branch on less than zero.
4482   static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
4483   static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
4484   static_assert(LockWord::kReadBarrierStateSize == 1, "Expecting 1-bit read barrier state size");
4485   __ Sll(temp_reg, temp_reg, 31 - LockWord::kReadBarrierStateShift);
4486   __ Bltzc(temp_reg, slow_path->GetEntryLabel());
4487   __ Bind(slow_path->GetExitLabel());
4488 }
4489 
GenerateReadBarrierSlow(HInstruction * instruction,Location out,Location ref,Location obj,uint32_t offset,Location index)4490 void CodeGeneratorMIPS64::GenerateReadBarrierSlow(HInstruction* instruction,
4491                                                   Location out,
4492                                                   Location ref,
4493                                                   Location obj,
4494                                                   uint32_t offset,
4495                                                   Location index) {
4496   DCHECK(kEmitCompilerReadBarrier);
4497 
4498   // Insert a slow path based read barrier *after* the reference load.
4499   //
4500   // If heap poisoning is enabled, the unpoisoning of the loaded
4501   // reference will be carried out by the runtime within the slow
4502   // path.
4503   //
4504   // Note that `ref` currently does not get unpoisoned (when heap
4505   // poisoning is enabled), which is alright as the `ref` argument is
4506   // not used by the artReadBarrierSlow entry point.
4507   //
4508   // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
4509   SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
4510       ReadBarrierForHeapReferenceSlowPathMIPS64(instruction, out, ref, obj, offset, index);
4511   AddSlowPath(slow_path);
4512 
4513   __ Bc(slow_path->GetEntryLabel());
4514   __ Bind(slow_path->GetExitLabel());
4515 }
4516 
MaybeGenerateReadBarrierSlow(HInstruction * instruction,Location out,Location ref,Location obj,uint32_t offset,Location index)4517 void CodeGeneratorMIPS64::MaybeGenerateReadBarrierSlow(HInstruction* instruction,
4518                                                        Location out,
4519                                                        Location ref,
4520                                                        Location obj,
4521                                                        uint32_t offset,
4522                                                        Location index) {
4523   if (kEmitCompilerReadBarrier) {
4524     // Baker's read barriers shall be handled by the fast path
4525     // (CodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier).
4526     DCHECK(!kUseBakerReadBarrier);
4527     // If heap poisoning is enabled, unpoisoning will be taken care of
4528     // by the runtime within the slow path.
4529     GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index);
4530   } else if (kPoisonHeapReferences) {
4531     __ UnpoisonHeapReference(out.AsRegister<GpuRegister>());
4532   }
4533 }
4534 
GenerateReadBarrierForRootSlow(HInstruction * instruction,Location out,Location root)4535 void CodeGeneratorMIPS64::GenerateReadBarrierForRootSlow(HInstruction* instruction,
4536                                                          Location out,
4537                                                          Location root) {
4538   DCHECK(kEmitCompilerReadBarrier);
4539 
4540   // Insert a slow path based read barrier *after* the GC root load.
4541   //
4542   // Note that GC roots are not affected by heap poisoning, so we do
4543   // not need to do anything special for this here.
4544   SlowPathCodeMIPS64* slow_path =
4545       new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
4546   AddSlowPath(slow_path);
4547 
4548   __ Bc(slow_path->GetEntryLabel());
4549   __ Bind(slow_path->GetExitLabel());
4550 }
4551 
VisitInstanceOf(HInstanceOf * instruction)4552 void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
4553   LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
4554   TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
4555   switch (type_check_kind) {
4556     case TypeCheckKind::kExactCheck:
4557     case TypeCheckKind::kAbstractClassCheck:
4558     case TypeCheckKind::kClassHierarchyCheck:
4559     case TypeCheckKind::kArrayObjectCheck:
4560       call_kind =
4561           kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
4562       break;
4563     case TypeCheckKind::kArrayCheck:
4564     case TypeCheckKind::kUnresolvedCheck:
4565     case TypeCheckKind::kInterfaceCheck:
4566       call_kind = LocationSummary::kCallOnSlowPath;
4567       break;
4568   }
4569 
4570   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4571   locations->SetInAt(0, Location::RequiresRegister());
4572   locations->SetInAt(1, Location::RequiresRegister());
4573   // The output does overlap inputs.
4574   // Note that TypeCheckSlowPathMIPS64 uses this register too.
4575   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
4576   locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
4577 }
4578 
VisitInstanceOf(HInstanceOf * instruction)4579 void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
4580   TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
4581   LocationSummary* locations = instruction->GetLocations();
4582   Location obj_loc = locations->InAt(0);
4583   GpuRegister obj = obj_loc.AsRegister<GpuRegister>();
4584   GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
4585   Location out_loc = locations->Out();
4586   GpuRegister out = out_loc.AsRegister<GpuRegister>();
4587   const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
4588   DCHECK_LE(num_temps, 1u);
4589   Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
4590   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4591   uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
4592   uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
4593   uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
4594   Mips64Label done;
4595   SlowPathCodeMIPS64* slow_path = nullptr;
4596 
4597   // Return 0 if `obj` is null.
4598   // Avoid this check if we know `obj` is not null.
4599   if (instruction->MustDoNullCheck()) {
4600     __ Move(out, ZERO);
4601     __ Beqzc(obj, &done);
4602   }
4603 
4604   switch (type_check_kind) {
4605     case TypeCheckKind::kExactCheck: {
4606       // /* HeapReference<Class> */ out = obj->klass_
4607       GenerateReferenceLoadTwoRegisters(instruction,
4608                                         out_loc,
4609                                         obj_loc,
4610                                         class_offset,
4611                                         maybe_temp_loc,
4612                                         kCompilerReadBarrierOption);
4613       // Classes must be equal for the instanceof to succeed.
4614       __ Xor(out, out, cls);
4615       __ Sltiu(out, out, 1);
4616       break;
4617     }
4618 
4619     case TypeCheckKind::kAbstractClassCheck: {
4620       // /* HeapReference<Class> */ out = obj->klass_
4621       GenerateReferenceLoadTwoRegisters(instruction,
4622                                         out_loc,
4623                                         obj_loc,
4624                                         class_offset,
4625                                         maybe_temp_loc,
4626                                         kCompilerReadBarrierOption);
4627       // If the class is abstract, we eagerly fetch the super class of the
4628       // object to avoid doing a comparison we know will fail.
4629       Mips64Label loop;
4630       __ Bind(&loop);
4631       // /* HeapReference<Class> */ out = out->super_class_
4632       GenerateReferenceLoadOneRegister(instruction,
4633                                        out_loc,
4634                                        super_offset,
4635                                        maybe_temp_loc,
4636                                        kCompilerReadBarrierOption);
4637       // If `out` is null, we use it for the result, and jump to `done`.
4638       __ Beqzc(out, &done);
4639       __ Bnec(out, cls, &loop);
4640       __ LoadConst32(out, 1);
4641       break;
4642     }
4643 
4644     case TypeCheckKind::kClassHierarchyCheck: {
4645       // /* HeapReference<Class> */ out = obj->klass_
4646       GenerateReferenceLoadTwoRegisters(instruction,
4647                                         out_loc,
4648                                         obj_loc,
4649                                         class_offset,
4650                                         maybe_temp_loc,
4651                                         kCompilerReadBarrierOption);
4652       // Walk over the class hierarchy to find a match.
4653       Mips64Label loop, success;
4654       __ Bind(&loop);
4655       __ Beqc(out, cls, &success);
4656       // /* HeapReference<Class> */ out = out->super_class_
4657       GenerateReferenceLoadOneRegister(instruction,
4658                                        out_loc,
4659                                        super_offset,
4660                                        maybe_temp_loc,
4661                                        kCompilerReadBarrierOption);
4662       __ Bnezc(out, &loop);
4663       // If `out` is null, we use it for the result, and jump to `done`.
4664       __ Bc(&done);
4665       __ Bind(&success);
4666       __ LoadConst32(out, 1);
4667       break;
4668     }
4669 
4670     case TypeCheckKind::kArrayObjectCheck: {
4671       // /* HeapReference<Class> */ out = obj->klass_
4672       GenerateReferenceLoadTwoRegisters(instruction,
4673                                         out_loc,
4674                                         obj_loc,
4675                                         class_offset,
4676                                         maybe_temp_loc,
4677                                         kCompilerReadBarrierOption);
4678       // Do an exact check.
4679       Mips64Label success;
4680       __ Beqc(out, cls, &success);
4681       // Otherwise, we need to check that the object's class is a non-primitive array.
4682       // /* HeapReference<Class> */ out = out->component_type_
4683       GenerateReferenceLoadOneRegister(instruction,
4684                                        out_loc,
4685                                        component_offset,
4686                                        maybe_temp_loc,
4687                                        kCompilerReadBarrierOption);
4688       // If `out` is null, we use it for the result, and jump to `done`.
4689       __ Beqzc(out, &done);
4690       __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
4691       static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
4692       __ Sltiu(out, out, 1);
4693       __ Bc(&done);
4694       __ Bind(&success);
4695       __ LoadConst32(out, 1);
4696       break;
4697     }
4698 
4699     case TypeCheckKind::kArrayCheck: {
4700       // No read barrier since the slow path will retry upon failure.
4701       // /* HeapReference<Class> */ out = obj->klass_
4702       GenerateReferenceLoadTwoRegisters(instruction,
4703                                         out_loc,
4704                                         obj_loc,
4705                                         class_offset,
4706                                         maybe_temp_loc,
4707                                         kWithoutReadBarrier);
4708       DCHECK(locations->OnlyCallsOnSlowPath());
4709       slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
4710                                                                        /* is_fatal */ false);
4711       codegen_->AddSlowPath(slow_path);
4712       __ Bnec(out, cls, slow_path->GetEntryLabel());
4713       __ LoadConst32(out, 1);
4714       break;
4715     }
4716 
4717     case TypeCheckKind::kUnresolvedCheck:
4718     case TypeCheckKind::kInterfaceCheck: {
4719       // Note that we indeed only call on slow path, but we always go
4720       // into the slow path for the unresolved and interface check
4721       // cases.
4722       //
4723       // We cannot directly call the InstanceofNonTrivial runtime
4724       // entry point without resorting to a type checking slow path
4725       // here (i.e. by calling InvokeRuntime directly), as it would
4726       // require to assign fixed registers for the inputs of this
4727       // HInstanceOf instruction (following the runtime calling
4728       // convention), which might be cluttered by the potential first
4729       // read barrier emission at the beginning of this method.
4730       //
4731       // TODO: Introduce a new runtime entry point taking the object
4732       // to test (instead of its class) as argument, and let it deal
4733       // with the read barrier issues. This will let us refactor this
4734       // case of the `switch` code as it was previously (with a direct
4735       // call to the runtime not using a type checking slow path).
4736       // This should also be beneficial for the other cases above.
4737       DCHECK(locations->OnlyCallsOnSlowPath());
4738       slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
4739                                                                        /* is_fatal */ false);
4740       codegen_->AddSlowPath(slow_path);
4741       __ Bc(slow_path->GetEntryLabel());
4742       break;
4743     }
4744   }
4745 
4746   __ Bind(&done);
4747 
4748   if (slow_path != nullptr) {
4749     __ Bind(slow_path->GetExitLabel());
4750   }
4751 }
4752 
VisitIntConstant(HIntConstant * constant)4753 void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
4754   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
4755   locations->SetOut(Location::ConstantLocation(constant));
4756 }
4757 
VisitIntConstant(HIntConstant * constant ATTRIBUTE_UNUSED)4758 void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
4759   // Will be generated at use site.
4760 }
4761 
VisitNullConstant(HNullConstant * constant)4762 void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
4763   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
4764   locations->SetOut(Location::ConstantLocation(constant));
4765 }
4766 
VisitNullConstant(HNullConstant * constant ATTRIBUTE_UNUSED)4767 void InstructionCodeGeneratorMIPS64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
4768   // Will be generated at use site.
4769 }
4770 
VisitInvokeUnresolved(HInvokeUnresolved * invoke)4771 void LocationsBuilderMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
4772   // The trampoline uses the same calling convention as dex calling conventions,
4773   // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
4774   // the method_idx.
4775   HandleInvoke(invoke);
4776 }
4777 
VisitInvokeUnresolved(HInvokeUnresolved * invoke)4778 void InstructionCodeGeneratorMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
4779   codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
4780 }
4781 
HandleInvoke(HInvoke * invoke)4782 void LocationsBuilderMIPS64::HandleInvoke(HInvoke* invoke) {
4783   InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
4784   CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
4785 }
4786 
VisitInvokeInterface(HInvokeInterface * invoke)4787 void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
4788   HandleInvoke(invoke);
4789   // The register T0 is required to be used for the hidden argument in
4790   // art_quick_imt_conflict_trampoline, so add the hidden argument.
4791   invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
4792 }
4793 
VisitInvokeInterface(HInvokeInterface * invoke)4794 void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
4795   // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
4796   GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
4797   Location receiver = invoke->GetLocations()->InAt(0);
4798   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4799   Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64PointerSize);
4800 
4801   // Set the hidden argument.
4802   __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
4803                  invoke->GetDexMethodIndex());
4804 
4805   // temp = object->GetClass();
4806   if (receiver.IsStackSlot()) {
4807     __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
4808     __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
4809   } else {
4810     __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
4811   }
4812   codegen_->MaybeRecordImplicitNullCheck(invoke);
4813   // Instead of simply (possibly) unpoisoning `temp` here, we should
4814   // emit a read barrier for the previous class reference load.
4815   // However this is not required in practice, as this is an
4816   // intermediate/temporary reference and because the current
4817   // concurrent copying collector keeps the from-space memory
4818   // intact/accessible until the end of the marking phase (the
4819   // concurrent copying collector may not in the future).
4820   __ MaybeUnpoisonHeapReference(temp);
4821   __ LoadFromOffset(kLoadDoubleword, temp, temp,
4822       mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value());
4823   uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
4824       invoke->GetImtIndex(), kMips64PointerSize));
4825   // temp = temp->GetImtEntryAt(method_offset);
4826   __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
4827   // T9 = temp->GetEntryPoint();
4828   __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
4829   // T9();
4830   __ Jalr(T9);
4831   __ Nop();
4832   DCHECK(!codegen_->IsLeafMethod());
4833   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
4834 }
4835 
VisitInvokeVirtual(HInvokeVirtual * invoke)4836 void LocationsBuilderMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
4837   IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
4838   if (intrinsic.TryDispatch(invoke)) {
4839     return;
4840   }
4841 
4842   HandleInvoke(invoke);
4843 }
4844 
VisitInvokeStaticOrDirect(HInvokeStaticOrDirect * invoke)4845 void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
4846   // Explicit clinit checks triggered by static invokes must have been pruned by
4847   // art::PrepareForRegisterAllocation.
4848   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
4849 
4850   IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
4851   if (intrinsic.TryDispatch(invoke)) {
4852     return;
4853   }
4854 
4855   HandleInvoke(invoke);
4856 }
4857 
VisitInvokePolymorphic(HInvokePolymorphic * invoke)4858 void LocationsBuilderMIPS64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
4859   HandleInvoke(invoke);
4860 }
4861 
VisitInvokePolymorphic(HInvokePolymorphic * invoke)4862 void InstructionCodeGeneratorMIPS64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
4863   codegen_->GenerateInvokePolymorphicCall(invoke);
4864 }
4865 
TryGenerateIntrinsicCode(HInvoke * invoke,CodeGeneratorMIPS64 * codegen)4866 static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
4867   if (invoke->GetLocations()->Intrinsified()) {
4868     IntrinsicCodeGeneratorMIPS64 intrinsic(codegen);
4869     intrinsic.Dispatch(invoke);
4870     return true;
4871   }
4872   return false;
4873 }
4874 
GetSupportedLoadStringKind(HLoadString::LoadKind desired_string_load_kind)4875 HLoadString::LoadKind CodeGeneratorMIPS64::GetSupportedLoadStringKind(
4876     HLoadString::LoadKind desired_string_load_kind) {
4877   bool fallback_load = false;
4878   switch (desired_string_load_kind) {
4879     case HLoadString::LoadKind::kBootImageLinkTimeAddress:
4880       DCHECK(!GetCompilerOptions().GetCompilePic());
4881       break;
4882     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
4883       DCHECK(GetCompilerOptions().GetCompilePic());
4884       break;
4885     case HLoadString::LoadKind::kBootImageAddress:
4886       break;
4887     case HLoadString::LoadKind::kBssEntry:
4888       DCHECK(!Runtime::Current()->UseJitCompilation());
4889       break;
4890     case HLoadString::LoadKind::kDexCacheViaMethod:
4891       break;
4892     case HLoadString::LoadKind::kJitTableAddress:
4893       DCHECK(Runtime::Current()->UseJitCompilation());
4894       break;
4895   }
4896   if (fallback_load) {
4897     desired_string_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
4898   }
4899   return desired_string_load_kind;
4900 }
4901 
GetSupportedLoadClassKind(HLoadClass::LoadKind desired_class_load_kind)4902 HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
4903     HLoadClass::LoadKind desired_class_load_kind) {
4904   bool fallback_load = false;
4905   switch (desired_class_load_kind) {
4906     case HLoadClass::LoadKind::kInvalid:
4907       LOG(FATAL) << "UNREACHABLE";
4908       UNREACHABLE();
4909     case HLoadClass::LoadKind::kReferrersClass:
4910       break;
4911     case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
4912       DCHECK(!GetCompilerOptions().GetCompilePic());
4913       break;
4914     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
4915       DCHECK(GetCompilerOptions().GetCompilePic());
4916       break;
4917     case HLoadClass::LoadKind::kBootImageAddress:
4918       break;
4919     case HLoadClass::LoadKind::kBssEntry:
4920       DCHECK(!Runtime::Current()->UseJitCompilation());
4921       break;
4922     case HLoadClass::LoadKind::kJitTableAddress:
4923       DCHECK(Runtime::Current()->UseJitCompilation());
4924       break;
4925     case HLoadClass::LoadKind::kDexCacheViaMethod:
4926       break;
4927   }
4928   if (fallback_load) {
4929     desired_class_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
4930   }
4931   return desired_class_load_kind;
4932 }
4933 
GetSupportedInvokeStaticOrDirectDispatch(const HInvokeStaticOrDirect::DispatchInfo & desired_dispatch_info,HInvokeStaticOrDirect * invoke ATTRIBUTE_UNUSED)4934 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
4935       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
4936       HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
4937   // On MIPS64 we support all dispatch types.
4938   return desired_dispatch_info;
4939 }
4940 
GenerateStaticOrDirectCall(HInvokeStaticOrDirect * invoke,Location temp)4941 void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
4942   // All registers are assumed to be correctly set up per the calling convention.
4943   Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
4944   HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
4945   HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = invoke->GetCodePtrLocation();
4946 
4947   switch (method_load_kind) {
4948     case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
4949       // temp = thread->string_init_entrypoint
4950       uint32_t offset =
4951           GetThreadOffset<kMips64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
4952       __ LoadFromOffset(kLoadDoubleword,
4953                         temp.AsRegister<GpuRegister>(),
4954                         TR,
4955                         offset);
4956       break;
4957     }
4958     case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
4959       callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
4960       break;
4961     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
4962       __ LoadLiteral(temp.AsRegister<GpuRegister>(),
4963                      kLoadDoubleword,
4964                      DeduplicateUint64Literal(invoke->GetMethodAddress()));
4965       break;
4966     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
4967       uint32_t offset = invoke->GetDexCacheArrayOffset();
4968       CodeGeneratorMIPS64::PcRelativePatchInfo* info =
4969           NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset);
4970       EmitPcRelativeAddressPlaceholderHigh(info, AT);
4971       __ Ld(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
4972       break;
4973     }
4974     case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
4975       Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
4976       GpuRegister reg = temp.AsRegister<GpuRegister>();
4977       GpuRegister method_reg;
4978       if (current_method.IsRegister()) {
4979         method_reg = current_method.AsRegister<GpuRegister>();
4980       } else {
4981         // TODO: use the appropriate DCHECK() here if possible.
4982         // DCHECK(invoke->GetLocations()->Intrinsified());
4983         DCHECK(!current_method.IsValid());
4984         method_reg = reg;
4985         __ Ld(reg, SP, kCurrentMethodStackOffset);
4986       }
4987 
4988       // temp = temp->dex_cache_resolved_methods_;
4989       __ LoadFromOffset(kLoadDoubleword,
4990                         reg,
4991                         method_reg,
4992                         ArtMethod::DexCacheResolvedMethodsOffset(kMips64PointerSize).Int32Value());
4993       // temp = temp[index_in_cache];
4994       // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
4995       uint32_t index_in_cache = invoke->GetDexMethodIndex();
4996       __ LoadFromOffset(kLoadDoubleword,
4997                         reg,
4998                         reg,
4999                         CodeGenerator::GetCachePointerOffset(index_in_cache));
5000       break;
5001     }
5002   }
5003 
5004   switch (code_ptr_location) {
5005     case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
5006       __ Balc(&frame_entry_label_);
5007       break;
5008     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
5009       // T9 = callee_method->entry_point_from_quick_compiled_code_;
5010       __ LoadFromOffset(kLoadDoubleword,
5011                         T9,
5012                         callee_method.AsRegister<GpuRegister>(),
5013                         ArtMethod::EntryPointFromQuickCompiledCodeOffset(
5014                             kMips64PointerSize).Int32Value());
5015       // T9()
5016       __ Jalr(T9);
5017       __ Nop();
5018       break;
5019   }
5020   DCHECK(!IsLeafMethod());
5021 }
5022 
VisitInvokeStaticOrDirect(HInvokeStaticOrDirect * invoke)5023 void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
5024   // Explicit clinit checks triggered by static invokes must have been pruned by
5025   // art::PrepareForRegisterAllocation.
5026   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
5027 
5028   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
5029     return;
5030   }
5031 
5032   LocationSummary* locations = invoke->GetLocations();
5033   codegen_->GenerateStaticOrDirectCall(invoke,
5034                                        locations->HasTemps()
5035                                            ? locations->GetTemp(0)
5036                                            : Location::NoLocation());
5037   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
5038 }
5039 
GenerateVirtualCall(HInvokeVirtual * invoke,Location temp_location)5040 void CodeGeneratorMIPS64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
5041   // Use the calling convention instead of the location of the receiver, as
5042   // intrinsics may have put the receiver in a different register. In the intrinsics
5043   // slow path, the arguments have been moved to the right place, so here we are
5044   // guaranteed that the receiver is the first register of the calling convention.
5045   InvokeDexCallingConvention calling_convention;
5046   GpuRegister receiver = calling_convention.GetRegisterAt(0);
5047 
5048   GpuRegister temp = temp_location.AsRegister<GpuRegister>();
5049   size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
5050       invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
5051   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
5052   Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64PointerSize);
5053 
5054   // temp = object->GetClass();
5055   __ LoadFromOffset(kLoadUnsignedWord, temp, receiver, class_offset);
5056   MaybeRecordImplicitNullCheck(invoke);
5057   // Instead of simply (possibly) unpoisoning `temp` here, we should
5058   // emit a read barrier for the previous class reference load.
5059   // However this is not required in practice, as this is an
5060   // intermediate/temporary reference and because the current
5061   // concurrent copying collector keeps the from-space memory
5062   // intact/accessible until the end of the marking phase (the
5063   // concurrent copying collector may not in the future).
5064   __ MaybeUnpoisonHeapReference(temp);
5065   // temp = temp->GetMethodAt(method_offset);
5066   __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
5067   // T9 = temp->GetEntryPoint();
5068   __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
5069   // T9();
5070   __ Jalr(T9);
5071   __ Nop();
5072 }
5073 
VisitInvokeVirtual(HInvokeVirtual * invoke)5074 void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
5075   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
5076     return;
5077   }
5078 
5079   codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
5080   DCHECK(!codegen_->IsLeafMethod());
5081   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
5082 }
5083 
VisitLoadClass(HLoadClass * cls)5084 void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
5085   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
5086   if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
5087     InvokeRuntimeCallingConvention calling_convention;
5088     CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
5089         cls,
5090         Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
5091         calling_convention.GetReturnLocation(Primitive::kPrimNot));
5092     return;
5093   }
5094   DCHECK(!cls->NeedsAccessCheck());
5095 
5096   const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
5097   LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
5098       ? LocationSummary::kCallOnSlowPath
5099       : LocationSummary::kNoCall;
5100   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
5101   if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
5102     locations->SetInAt(0, Location::RequiresRegister());
5103   }
5104   locations->SetOut(Location::RequiresRegister());
5105 }
5106 
5107 // NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
5108 // move.
VisitLoadClass(HLoadClass * cls)5109 void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
5110   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
5111   if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
5112     codegen_->GenerateLoadClassRuntimeCall(cls);
5113     return;
5114   }
5115   DCHECK(!cls->NeedsAccessCheck());
5116 
5117   LocationSummary* locations = cls->GetLocations();
5118   Location out_loc = locations->Out();
5119   GpuRegister out = out_loc.AsRegister<GpuRegister>();
5120   GpuRegister current_method_reg = ZERO;
5121   if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
5122       load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
5123       current_method_reg = locations->InAt(0).AsRegister<GpuRegister>();
5124   }
5125 
5126   const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
5127       ? kWithoutReadBarrier
5128       : kCompilerReadBarrierOption;
5129   bool generate_null_check = false;
5130   switch (load_kind) {
5131     case HLoadClass::LoadKind::kReferrersClass:
5132       DCHECK(!cls->CanCallRuntime());
5133       DCHECK(!cls->MustGenerateClinitCheck());
5134       // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
5135       GenerateGcRootFieldLoad(cls,
5136                               out_loc,
5137                               current_method_reg,
5138                               ArtMethod::DeclaringClassOffset().Int32Value(),
5139                               read_barrier_option);
5140       break;
5141     case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
5142       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
5143       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
5144       __ LoadLiteral(out,
5145                      kLoadUnsignedWord,
5146                      codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
5147                                                                cls->GetTypeIndex()));
5148       break;
5149     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
5150       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
5151       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
5152       CodeGeneratorMIPS64::PcRelativePatchInfo* info =
5153           codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
5154       codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
5155       __ Daddiu(out, AT, /* placeholder */ 0x5678);
5156       break;
5157     }
5158     case HLoadClass::LoadKind::kBootImageAddress: {
5159       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
5160       uint32_t address = dchecked_integral_cast<uint32_t>(
5161           reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
5162       DCHECK_NE(address, 0u);
5163       __ LoadLiteral(out,
5164                      kLoadUnsignedWord,
5165                      codegen_->DeduplicateBootImageAddressLiteral(address));
5166       break;
5167     }
5168     case HLoadClass::LoadKind::kBssEntry: {
5169       CodeGeneratorMIPS64::PcRelativePatchInfo* info =
5170           codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
5171       codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out);
5172       GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678, read_barrier_option);
5173       generate_null_check = true;
5174       break;
5175     }
5176     case HLoadClass::LoadKind::kJitTableAddress:
5177       __ LoadLiteral(out,
5178                      kLoadUnsignedWord,
5179                      codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
5180                                                           cls->GetTypeIndex(),
5181                                                           cls->GetClass()));
5182       GenerateGcRootFieldLoad(cls, out_loc, out, 0, read_barrier_option);
5183       break;
5184     case HLoadClass::LoadKind::kDexCacheViaMethod:
5185     case HLoadClass::LoadKind::kInvalid:
5186       LOG(FATAL) << "UNREACHABLE";
5187       UNREACHABLE();
5188   }
5189 
5190   if (generate_null_check || cls->MustGenerateClinitCheck()) {
5191     DCHECK(cls->CanCallRuntime());
5192     SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
5193         cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
5194     codegen_->AddSlowPath(slow_path);
5195     if (generate_null_check) {
5196       __ Beqzc(out, slow_path->GetEntryLabel());
5197     }
5198     if (cls->MustGenerateClinitCheck()) {
5199       GenerateClassInitializationCheck(slow_path, out);
5200     } else {
5201       __ Bind(slow_path->GetExitLabel());
5202     }
5203   }
5204 }
5205 
GetExceptionTlsOffset()5206 static int32_t GetExceptionTlsOffset() {
5207   return Thread::ExceptionOffset<kMips64PointerSize>().Int32Value();
5208 }
5209 
VisitLoadException(HLoadException * load)5210 void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
5211   LocationSummary* locations =
5212       new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
5213   locations->SetOut(Location::RequiresRegister());
5214 }
5215 
VisitLoadException(HLoadException * load)5216 void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
5217   GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
5218   __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset());
5219 }
5220 
VisitClearException(HClearException * clear)5221 void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
5222   new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
5223 }
5224 
VisitClearException(HClearException * clear ATTRIBUTE_UNUSED)5225 void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
5226   __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
5227 }
5228 
VisitLoadString(HLoadString * load)5229 void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
5230   HLoadString::LoadKind load_kind = load->GetLoadKind();
5231   LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
5232   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
5233   if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
5234     InvokeRuntimeCallingConvention calling_convention;
5235     locations->SetOut(calling_convention.GetReturnLocation(load->GetType()));
5236   } else {
5237     locations->SetOut(Location::RequiresRegister());
5238   }
5239 }
5240 
5241 // NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
5242 // move.
VisitLoadString(HLoadString * load)5243 void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
5244   HLoadString::LoadKind load_kind = load->GetLoadKind();
5245   LocationSummary* locations = load->GetLocations();
5246   Location out_loc = locations->Out();
5247   GpuRegister out = out_loc.AsRegister<GpuRegister>();
5248 
5249   switch (load_kind) {
5250     case HLoadString::LoadKind::kBootImageLinkTimeAddress:
5251       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
5252       __ LoadLiteral(out,
5253                      kLoadUnsignedWord,
5254                      codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
5255                                                                  load->GetStringIndex()));
5256       return;  // No dex cache slow path.
5257     case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
5258       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
5259       CodeGeneratorMIPS64::PcRelativePatchInfo* info =
5260           codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
5261       codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
5262       __ Daddiu(out, AT, /* placeholder */ 0x5678);
5263       return;  // No dex cache slow path.
5264     }
5265     case HLoadString::LoadKind::kBootImageAddress: {
5266       uint32_t address = dchecked_integral_cast<uint32_t>(
5267           reinterpret_cast<uintptr_t>(load->GetString().Get()));
5268       DCHECK_NE(address, 0u);
5269       __ LoadLiteral(out,
5270                      kLoadUnsignedWord,
5271                      codegen_->DeduplicateBootImageAddressLiteral(address));
5272       return;  // No dex cache slow path.
5273     }
5274     case HLoadString::LoadKind::kBssEntry: {
5275       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
5276       CodeGeneratorMIPS64::PcRelativePatchInfo* info =
5277           codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
5278       codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out);
5279       GenerateGcRootFieldLoad(load,
5280                               out_loc,
5281                               out,
5282                               /* placeholder */ 0x5678,
5283                               kCompilerReadBarrierOption);
5284       SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
5285       codegen_->AddSlowPath(slow_path);
5286       __ Beqzc(out, slow_path->GetEntryLabel());
5287       __ Bind(slow_path->GetExitLabel());
5288       return;
5289     }
5290     case HLoadString::LoadKind::kJitTableAddress:
5291       __ LoadLiteral(out,
5292                      kLoadUnsignedWord,
5293                      codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
5294                                                            load->GetStringIndex(),
5295                                                            load->GetString()));
5296       GenerateGcRootFieldLoad(load, out_loc, out, 0, kCompilerReadBarrierOption);
5297       return;
5298     default:
5299       break;
5300   }
5301 
5302   // TODO: Re-add the compiler code to do string dex cache lookup again.
5303   DCHECK(load_kind == HLoadString::LoadKind::kDexCacheViaMethod);
5304   InvokeRuntimeCallingConvention calling_convention;
5305   __ LoadConst32(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
5306   codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
5307   CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
5308 }
5309 
VisitLongConstant(HLongConstant * constant)5310 void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
5311   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
5312   locations->SetOut(Location::ConstantLocation(constant));
5313 }
5314 
VisitLongConstant(HLongConstant * constant ATTRIBUTE_UNUSED)5315 void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
5316   // Will be generated at use site.
5317 }
5318 
VisitMonitorOperation(HMonitorOperation * instruction)5319 void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
5320   LocationSummary* locations =
5321       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
5322   InvokeRuntimeCallingConvention calling_convention;
5323   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
5324 }
5325 
VisitMonitorOperation(HMonitorOperation * instruction)5326 void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
5327   codegen_->InvokeRuntime(instruction->IsEnter() ? kQuickLockObject : kQuickUnlockObject,
5328                           instruction,
5329                           instruction->GetDexPc());
5330   if (instruction->IsEnter()) {
5331     CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
5332   } else {
5333     CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
5334   }
5335 }
5336 
VisitMul(HMul * mul)5337 void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
5338   LocationSummary* locations =
5339       new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
5340   switch (mul->GetResultType()) {
5341     case Primitive::kPrimInt:
5342     case Primitive::kPrimLong:
5343       locations->SetInAt(0, Location::RequiresRegister());
5344       locations->SetInAt(1, Location::RequiresRegister());
5345       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
5346       break;
5347 
5348     case Primitive::kPrimFloat:
5349     case Primitive::kPrimDouble:
5350       locations->SetInAt(0, Location::RequiresFpuRegister());
5351       locations->SetInAt(1, Location::RequiresFpuRegister());
5352       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
5353       break;
5354 
5355     default:
5356       LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
5357   }
5358 }
5359 
VisitMul(HMul * instruction)5360 void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
5361   Primitive::Type type = instruction->GetType();
5362   LocationSummary* locations = instruction->GetLocations();
5363 
5364   switch (type) {
5365     case Primitive::kPrimInt:
5366     case Primitive::kPrimLong: {
5367       GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
5368       GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
5369       GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
5370       if (type == Primitive::kPrimInt)
5371         __ MulR6(dst, lhs, rhs);
5372       else
5373         __ Dmul(dst, lhs, rhs);
5374       break;
5375     }
5376     case Primitive::kPrimFloat:
5377     case Primitive::kPrimDouble: {
5378       FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
5379       FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
5380       FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
5381       if (type == Primitive::kPrimFloat)
5382         __ MulS(dst, lhs, rhs);
5383       else
5384         __ MulD(dst, lhs, rhs);
5385       break;
5386     }
5387     default:
5388       LOG(FATAL) << "Unexpected mul type " << type;
5389   }
5390 }
5391 
VisitNeg(HNeg * neg)5392 void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
5393   LocationSummary* locations =
5394       new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
5395   switch (neg->GetResultType()) {
5396     case Primitive::kPrimInt:
5397     case Primitive::kPrimLong:
5398       locations->SetInAt(0, Location::RequiresRegister());
5399       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
5400       break;
5401 
5402     case Primitive::kPrimFloat:
5403     case Primitive::kPrimDouble:
5404       locations->SetInAt(0, Location::RequiresFpuRegister());
5405       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
5406       break;
5407 
5408     default:
5409       LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
5410   }
5411 }
5412 
VisitNeg(HNeg * instruction)5413 void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
5414   Primitive::Type type = instruction->GetType();
5415   LocationSummary* locations = instruction->GetLocations();
5416 
5417   switch (type) {
5418     case Primitive::kPrimInt:
5419     case Primitive::kPrimLong: {
5420       GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
5421       GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
5422       if (type == Primitive::kPrimInt)
5423         __ Subu(dst, ZERO, src);
5424       else
5425         __ Dsubu(dst, ZERO, src);
5426       break;
5427     }
5428     case Primitive::kPrimFloat:
5429     case Primitive::kPrimDouble: {
5430       FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
5431       FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
5432       if (type == Primitive::kPrimFloat)
5433         __ NegS(dst, src);
5434       else
5435         __ NegD(dst, src);
5436       break;
5437     }
5438     default:
5439       LOG(FATAL) << "Unexpected neg type " << type;
5440   }
5441 }
5442 
VisitNewArray(HNewArray * instruction)5443 void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
5444   LocationSummary* locations =
5445       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
5446   InvokeRuntimeCallingConvention calling_convention;
5447   locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
5448   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
5449   locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
5450 }
5451 
VisitNewArray(HNewArray * instruction)5452 void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
5453   // Note: if heap poisoning is enabled, the entry point takes care
5454   // of poisoning the reference.
5455   codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
5456   CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
5457 }
5458 
VisitNewInstance(HNewInstance * instruction)5459 void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
5460   LocationSummary* locations =
5461       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
5462   InvokeRuntimeCallingConvention calling_convention;
5463   if (instruction->IsStringAlloc()) {
5464     locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
5465   } else {
5466     locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
5467   }
5468   locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
5469 }
5470 
VisitNewInstance(HNewInstance * instruction)5471 void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
5472   // Note: if heap poisoning is enabled, the entry point takes care
5473   // of poisoning the reference.
5474   if (instruction->IsStringAlloc()) {
5475     // String is allocated through StringFactory. Call NewEmptyString entry point.
5476     GpuRegister temp = instruction->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
5477     MemberOffset code_offset =
5478         ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64PointerSize);
5479     __ LoadFromOffset(kLoadDoubleword, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
5480     __ LoadFromOffset(kLoadDoubleword, T9, temp, code_offset.Int32Value());
5481     __ Jalr(T9);
5482     __ Nop();
5483     codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
5484   } else {
5485     codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
5486     CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
5487   }
5488 }
5489 
VisitNot(HNot * instruction)5490 void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
5491   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
5492   locations->SetInAt(0, Location::RequiresRegister());
5493   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
5494 }
5495 
VisitNot(HNot * instruction)5496 void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
5497   Primitive::Type type = instruction->GetType();
5498   LocationSummary* locations = instruction->GetLocations();
5499 
5500   switch (type) {
5501     case Primitive::kPrimInt:
5502     case Primitive::kPrimLong: {
5503       GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
5504       GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
5505       __ Nor(dst, src, ZERO);
5506       break;
5507     }
5508 
5509     default:
5510       LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
5511   }
5512 }
5513 
VisitBooleanNot(HBooleanNot * instruction)5514 void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
5515   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
5516   locations->SetInAt(0, Location::RequiresRegister());
5517   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
5518 }
5519 
VisitBooleanNot(HBooleanNot * instruction)5520 void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
5521   LocationSummary* locations = instruction->GetLocations();
5522   __ Xori(locations->Out().AsRegister<GpuRegister>(),
5523           locations->InAt(0).AsRegister<GpuRegister>(),
5524           1);
5525 }
5526 
VisitNullCheck(HNullCheck * instruction)5527 void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
5528   LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
5529   locations->SetInAt(0, Location::RequiresRegister());
5530 }
5531 
GenerateImplicitNullCheck(HNullCheck * instruction)5532 void CodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
5533   if (CanMoveNullCheckToUser(instruction)) {
5534     return;
5535   }
5536   Location obj = instruction->GetLocations()->InAt(0);
5537 
5538   __ Lw(ZERO, obj.AsRegister<GpuRegister>(), 0);
5539   RecordPcInfo(instruction, instruction->GetDexPc());
5540 }
5541 
GenerateExplicitNullCheck(HNullCheck * instruction)5542 void CodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
5543   SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
5544   AddSlowPath(slow_path);
5545 
5546   Location obj = instruction->GetLocations()->InAt(0);
5547 
5548   __ Beqzc(obj.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
5549 }
5550 
VisitNullCheck(HNullCheck * instruction)5551 void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
5552   codegen_->GenerateNullCheck(instruction);
5553 }
5554 
VisitOr(HOr * instruction)5555 void LocationsBuilderMIPS64::VisitOr(HOr* instruction) {
5556   HandleBinaryOp(instruction);
5557 }
5558 
VisitOr(HOr * instruction)5559 void InstructionCodeGeneratorMIPS64::VisitOr(HOr* instruction) {
5560   HandleBinaryOp(instruction);
5561 }
5562 
VisitParallelMove(HParallelMove * instruction ATTRIBUTE_UNUSED)5563 void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
5564   LOG(FATAL) << "Unreachable";
5565 }
5566 
VisitParallelMove(HParallelMove * instruction)5567 void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
5568   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
5569 }
5570 
VisitParameterValue(HParameterValue * instruction)5571 void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
5572   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
5573   Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
5574   if (location.IsStackSlot()) {
5575     location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
5576   } else if (location.IsDoubleStackSlot()) {
5577     location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
5578   }
5579   locations->SetOut(location);
5580 }
5581 
VisitParameterValue(HParameterValue * instruction ATTRIBUTE_UNUSED)5582 void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instruction
5583                                                          ATTRIBUTE_UNUSED) {
5584   // Nothing to do, the parameter is already at its location.
5585 }
5586 
VisitCurrentMethod(HCurrentMethod * instruction)5587 void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
5588   LocationSummary* locations =
5589       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
5590   locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
5591 }
5592 
VisitCurrentMethod(HCurrentMethod * instruction ATTRIBUTE_UNUSED)5593 void InstructionCodeGeneratorMIPS64::VisitCurrentMethod(HCurrentMethod* instruction
5594                                                         ATTRIBUTE_UNUSED) {
5595   // Nothing to do, the method is already at its location.
5596 }
5597 
VisitPhi(HPhi * instruction)5598 void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
5599   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
5600   for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
5601     locations->SetInAt(i, Location::Any());
5602   }
5603   locations->SetOut(Location::Any());
5604 }
5605 
VisitPhi(HPhi * instruction ATTRIBUTE_UNUSED)5606 void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
5607   LOG(FATAL) << "Unreachable";
5608 }
5609 
VisitRem(HRem * rem)5610 void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
5611   Primitive::Type type = rem->GetResultType();
5612   LocationSummary::CallKind call_kind =
5613       Primitive::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly
5614                                            : LocationSummary::kNoCall;
5615   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
5616 
5617   switch (type) {
5618     case Primitive::kPrimInt:
5619     case Primitive::kPrimLong:
5620       locations->SetInAt(0, Location::RequiresRegister());
5621       locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
5622       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
5623       break;
5624 
5625     case Primitive::kPrimFloat:
5626     case Primitive::kPrimDouble: {
5627       InvokeRuntimeCallingConvention calling_convention;
5628       locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
5629       locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
5630       locations->SetOut(calling_convention.GetReturnLocation(type));
5631       break;
5632     }
5633 
5634     default:
5635       LOG(FATAL) << "Unexpected rem type " << type;
5636   }
5637 }
5638 
VisitRem(HRem * instruction)5639 void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) {
5640   Primitive::Type type = instruction->GetType();
5641 
5642   switch (type) {
5643     case Primitive::kPrimInt:
5644     case Primitive::kPrimLong:
5645       GenerateDivRemIntegral(instruction);
5646       break;
5647 
5648     case Primitive::kPrimFloat:
5649     case Primitive::kPrimDouble: {
5650       QuickEntrypointEnum entrypoint = (type == Primitive::kPrimFloat) ? kQuickFmodf : kQuickFmod;
5651       codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
5652       if (type == Primitive::kPrimFloat) {
5653         CheckEntrypointTypes<kQuickFmodf, float, float, float>();
5654       } else {
5655         CheckEntrypointTypes<kQuickFmod, double, double, double>();
5656       }
5657       break;
5658     }
5659     default:
5660       LOG(FATAL) << "Unexpected rem type " << type;
5661   }
5662 }
5663 
VisitMemoryBarrier(HMemoryBarrier * memory_barrier)5664 void LocationsBuilderMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
5665   memory_barrier->SetLocations(nullptr);
5666 }
5667 
VisitMemoryBarrier(HMemoryBarrier * memory_barrier)5668 void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
5669   GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
5670 }
5671 
VisitReturn(HReturn * ret)5672 void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
5673   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
5674   Primitive::Type return_type = ret->InputAt(0)->GetType();
5675   locations->SetInAt(0, Mips64ReturnLocation(return_type));
5676 }
5677 
VisitReturn(HReturn * ret ATTRIBUTE_UNUSED)5678 void InstructionCodeGeneratorMIPS64::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
5679   codegen_->GenerateFrameExit();
5680 }
5681 
VisitReturnVoid(HReturnVoid * ret)5682 void LocationsBuilderMIPS64::VisitReturnVoid(HReturnVoid* ret) {
5683   ret->SetLocations(nullptr);
5684 }
5685 
VisitReturnVoid(HReturnVoid * ret ATTRIBUTE_UNUSED)5686 void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
5687   codegen_->GenerateFrameExit();
5688 }
5689 
VisitRor(HRor * ror)5690 void LocationsBuilderMIPS64::VisitRor(HRor* ror) {
5691   HandleShift(ror);
5692 }
5693 
VisitRor(HRor * ror)5694 void InstructionCodeGeneratorMIPS64::VisitRor(HRor* ror) {
5695   HandleShift(ror);
5696 }
5697 
VisitShl(HShl * shl)5698 void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
5699   HandleShift(shl);
5700 }
5701 
VisitShl(HShl * shl)5702 void InstructionCodeGeneratorMIPS64::VisitShl(HShl* shl) {
5703   HandleShift(shl);
5704 }
5705 
VisitShr(HShr * shr)5706 void LocationsBuilderMIPS64::VisitShr(HShr* shr) {
5707   HandleShift(shr);
5708 }
5709 
VisitShr(HShr * shr)5710 void InstructionCodeGeneratorMIPS64::VisitShr(HShr* shr) {
5711   HandleShift(shr);
5712 }
5713 
VisitSub(HSub * instruction)5714 void LocationsBuilderMIPS64::VisitSub(HSub* instruction) {
5715   HandleBinaryOp(instruction);
5716 }
5717 
VisitSub(HSub * instruction)5718 void InstructionCodeGeneratorMIPS64::VisitSub(HSub* instruction) {
5719   HandleBinaryOp(instruction);
5720 }
5721 
VisitStaticFieldGet(HStaticFieldGet * instruction)5722 void LocationsBuilderMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
5723   HandleFieldGet(instruction, instruction->GetFieldInfo());
5724 }
5725 
VisitStaticFieldGet(HStaticFieldGet * instruction)5726 void InstructionCodeGeneratorMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
5727   HandleFieldGet(instruction, instruction->GetFieldInfo());
5728 }
5729 
VisitStaticFieldSet(HStaticFieldSet * instruction)5730 void LocationsBuilderMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
5731   HandleFieldSet(instruction, instruction->GetFieldInfo());
5732 }
5733 
VisitStaticFieldSet(HStaticFieldSet * instruction)5734 void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
5735   HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
5736 }
5737 
VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet * instruction)5738 void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldGet(
5739     HUnresolvedInstanceFieldGet* instruction) {
5740   FieldAccessCallingConventionMIPS64 calling_convention;
5741   codegen_->CreateUnresolvedFieldLocationSummary(
5742       instruction, instruction->GetFieldType(), calling_convention);
5743 }
5744 
VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet * instruction)5745 void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldGet(
5746     HUnresolvedInstanceFieldGet* instruction) {
5747   FieldAccessCallingConventionMIPS64 calling_convention;
5748   codegen_->GenerateUnresolvedFieldAccess(instruction,
5749                                           instruction->GetFieldType(),
5750                                           instruction->GetFieldIndex(),
5751                                           instruction->GetDexPc(),
5752                                           calling_convention);
5753 }
5754 
VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet * instruction)5755 void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldSet(
5756     HUnresolvedInstanceFieldSet* instruction) {
5757   FieldAccessCallingConventionMIPS64 calling_convention;
5758   codegen_->CreateUnresolvedFieldLocationSummary(
5759       instruction, instruction->GetFieldType(), calling_convention);
5760 }
5761 
VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet * instruction)5762 void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldSet(
5763     HUnresolvedInstanceFieldSet* instruction) {
5764   FieldAccessCallingConventionMIPS64 calling_convention;
5765   codegen_->GenerateUnresolvedFieldAccess(instruction,
5766                                           instruction->GetFieldType(),
5767                                           instruction->GetFieldIndex(),
5768                                           instruction->GetDexPc(),
5769                                           calling_convention);
5770 }
5771 
VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet * instruction)5772 void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldGet(
5773     HUnresolvedStaticFieldGet* instruction) {
5774   FieldAccessCallingConventionMIPS64 calling_convention;
5775   codegen_->CreateUnresolvedFieldLocationSummary(
5776       instruction, instruction->GetFieldType(), calling_convention);
5777 }
5778 
VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet * instruction)5779 void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldGet(
5780     HUnresolvedStaticFieldGet* instruction) {
5781   FieldAccessCallingConventionMIPS64 calling_convention;
5782   codegen_->GenerateUnresolvedFieldAccess(instruction,
5783                                           instruction->GetFieldType(),
5784                                           instruction->GetFieldIndex(),
5785                                           instruction->GetDexPc(),
5786                                           calling_convention);
5787 }
5788 
VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet * instruction)5789 void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldSet(
5790     HUnresolvedStaticFieldSet* instruction) {
5791   FieldAccessCallingConventionMIPS64 calling_convention;
5792   codegen_->CreateUnresolvedFieldLocationSummary(
5793       instruction, instruction->GetFieldType(), calling_convention);
5794 }
5795 
VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet * instruction)5796 void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet(
5797     HUnresolvedStaticFieldSet* instruction) {
5798   FieldAccessCallingConventionMIPS64 calling_convention;
5799   codegen_->GenerateUnresolvedFieldAccess(instruction,
5800                                           instruction->GetFieldType(),
5801                                           instruction->GetFieldIndex(),
5802                                           instruction->GetDexPc(),
5803                                           calling_convention);
5804 }
5805 
VisitSuspendCheck(HSuspendCheck * instruction)5806 void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
5807   LocationSummary* locations =
5808       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
5809   locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
5810 }
5811 
VisitSuspendCheck(HSuspendCheck * instruction)5812 void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
5813   HBasicBlock* block = instruction->GetBlock();
5814   if (block->GetLoopInformation() != nullptr) {
5815     DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
5816     // The back edge will generate the suspend check.
5817     return;
5818   }
5819   if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
5820     // The goto will generate the suspend check.
5821     return;
5822   }
5823   GenerateSuspendCheck(instruction, nullptr);
5824 }
5825 
VisitThrow(HThrow * instruction)5826 void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
5827   LocationSummary* locations =
5828       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
5829   InvokeRuntimeCallingConvention calling_convention;
5830   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
5831 }
5832 
VisitThrow(HThrow * instruction)5833 void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
5834   codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
5835   CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
5836 }
5837 
VisitTypeConversion(HTypeConversion * conversion)5838 void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
5839   Primitive::Type input_type = conversion->GetInputType();
5840   Primitive::Type result_type = conversion->GetResultType();
5841   DCHECK_NE(input_type, result_type);
5842 
5843   if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
5844       (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
5845     LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
5846   }
5847 
5848   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion);
5849 
5850   if (Primitive::IsFloatingPointType(input_type)) {
5851     locations->SetInAt(0, Location::RequiresFpuRegister());
5852   } else {
5853     locations->SetInAt(0, Location::RequiresRegister());
5854   }
5855 
5856   if (Primitive::IsFloatingPointType(result_type)) {
5857     locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
5858   } else {
5859     locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
5860   }
5861 }
5862 
VisitTypeConversion(HTypeConversion * conversion)5863 void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
5864   LocationSummary* locations = conversion->GetLocations();
5865   Primitive::Type result_type = conversion->GetResultType();
5866   Primitive::Type input_type = conversion->GetInputType();
5867 
5868   DCHECK_NE(input_type, result_type);
5869 
5870   if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
5871     GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
5872     GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
5873 
5874     switch (result_type) {
5875       case Primitive::kPrimChar:
5876         __ Andi(dst, src, 0xFFFF);
5877         break;
5878       case Primitive::kPrimByte:
5879         if (input_type == Primitive::kPrimLong) {
5880           // Type conversion from long to types narrower than int is a result of code
5881           // transformations. To avoid unpredictable results for SEB and SEH, we first
5882           // need to sign-extend the low 32-bit value into bits 32 through 63.
5883           __ Sll(dst, src, 0);
5884           __ Seb(dst, dst);
5885         } else {
5886           __ Seb(dst, src);
5887         }
5888         break;
5889       case Primitive::kPrimShort:
5890         if (input_type == Primitive::kPrimLong) {
5891           // Type conversion from long to types narrower than int is a result of code
5892           // transformations. To avoid unpredictable results for SEB and SEH, we first
5893           // need to sign-extend the low 32-bit value into bits 32 through 63.
5894           __ Sll(dst, src, 0);
5895           __ Seh(dst, dst);
5896         } else {
5897           __ Seh(dst, src);
5898         }
5899         break;
5900       case Primitive::kPrimInt:
5901       case Primitive::kPrimLong:
5902         // Sign-extend 32-bit int into bits 32 through 63 for int-to-long and long-to-int
5903         // conversions, except when the input and output registers are the same and we are not
5904         // converting longs to shorter types. In these cases, do nothing.
5905         if ((input_type == Primitive::kPrimLong) || (dst != src)) {
5906           __ Sll(dst, src, 0);
5907         }
5908         break;
5909 
5910       default:
5911         LOG(FATAL) << "Unexpected type conversion from " << input_type
5912                    << " to " << result_type;
5913     }
5914   } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
5915     FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
5916     GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
5917     if (input_type == Primitive::kPrimLong) {
5918       __ Dmtc1(src, FTMP);
5919       if (result_type == Primitive::kPrimFloat) {
5920         __ Cvtsl(dst, FTMP);
5921       } else {
5922         __ Cvtdl(dst, FTMP);
5923       }
5924     } else {
5925       __ Mtc1(src, FTMP);
5926       if (result_type == Primitive::kPrimFloat) {
5927         __ Cvtsw(dst, FTMP);
5928       } else {
5929         __ Cvtdw(dst, FTMP);
5930       }
5931     }
5932   } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
5933     CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
5934     GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
5935     FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
5936     Mips64Label truncate;
5937     Mips64Label done;
5938 
5939     // When NAN2008=0 (R2 and before), the truncate instruction produces the maximum positive
5940     // value when the input is either a NaN or is outside of the range of the output type
5941     // after the truncation. IOW, the three special cases (NaN, too small, too big) produce
5942     // the same result.
5943     //
5944     // When NAN2008=1 (R6), the truncate instruction caps the output at the minimum/maximum
5945     // value of the output type if the input is outside of the range after the truncation or
5946     // produces 0 when the input is a NaN. IOW, the three special cases produce three distinct
5947     // results. This matches the desired float/double-to-int/long conversion exactly.
5948     //
5949     // So, NAN2008 affects handling of negative values and NaNs by the truncate instruction.
5950     //
5951     // The following code supports both NAN2008=0 and NAN2008=1 behaviors of the truncate
5952     // instruction, the reason being that the emulator implements NAN2008=0 on MIPS64R6,
5953     // even though it must be NAN2008=1 on R6.
5954     //
5955     // The code takes care of the different behaviors by first comparing the input to the
5956     // minimum output value (-2**-63 for truncating to long, -2**-31 for truncating to int).
5957     // If the input is greater than or equal to the minimum, it procedes to the truncate
5958     // instruction, which will handle such an input the same way irrespective of NAN2008.
5959     // Otherwise the input is compared to itself to determine whether it is a NaN or not
5960     // in order to return either zero or the minimum value.
5961     //
5962     // TODO: simplify this when the emulator correctly implements NAN2008=1 behavior of the
5963     // truncate instruction for MIPS64R6.
5964     if (input_type == Primitive::kPrimFloat) {
5965       uint32_t min_val = (result_type == Primitive::kPrimLong)
5966           ? bit_cast<uint32_t, float>(std::numeric_limits<int64_t>::min())
5967           : bit_cast<uint32_t, float>(std::numeric_limits<int32_t>::min());
5968       __ LoadConst32(TMP, min_val);
5969       __ Mtc1(TMP, FTMP);
5970       __ CmpLeS(FTMP, FTMP, src);
5971     } else {
5972       uint64_t min_val = (result_type == Primitive::kPrimLong)
5973           ? bit_cast<uint64_t, double>(std::numeric_limits<int64_t>::min())
5974           : bit_cast<uint64_t, double>(std::numeric_limits<int32_t>::min());
5975       __ LoadConst64(TMP, min_val);
5976       __ Dmtc1(TMP, FTMP);
5977       __ CmpLeD(FTMP, FTMP, src);
5978     }
5979 
5980     __ Bc1nez(FTMP, &truncate);
5981 
5982     if (input_type == Primitive::kPrimFloat) {
5983       __ CmpEqS(FTMP, src, src);
5984     } else {
5985       __ CmpEqD(FTMP, src, src);
5986     }
5987     if (result_type == Primitive::kPrimLong) {
5988       __ LoadConst64(dst, std::numeric_limits<int64_t>::min());
5989     } else {
5990       __ LoadConst32(dst, std::numeric_limits<int32_t>::min());
5991     }
5992     __ Mfc1(TMP, FTMP);
5993     __ And(dst, dst, TMP);
5994 
5995     __ Bc(&done);
5996 
5997     __ Bind(&truncate);
5998 
5999     if (result_type == Primitive::kPrimLong) {
6000       if (input_type == Primitive::kPrimFloat) {
6001         __ TruncLS(FTMP, src);
6002       } else {
6003         __ TruncLD(FTMP, src);
6004       }
6005       __ Dmfc1(dst, FTMP);
6006     } else {
6007       if (input_type == Primitive::kPrimFloat) {
6008         __ TruncWS(FTMP, src);
6009       } else {
6010         __ TruncWD(FTMP, src);
6011       }
6012       __ Mfc1(dst, FTMP);
6013     }
6014 
6015     __ Bind(&done);
6016   } else if (Primitive::IsFloatingPointType(result_type) &&
6017              Primitive::IsFloatingPointType(input_type)) {
6018     FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
6019     FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
6020     if (result_type == Primitive::kPrimFloat) {
6021       __ Cvtsd(dst, src);
6022     } else {
6023       __ Cvtds(dst, src);
6024     }
6025   } else {
6026     LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
6027                 << " to " << result_type;
6028   }
6029 }
6030 
VisitUShr(HUShr * ushr)6031 void LocationsBuilderMIPS64::VisitUShr(HUShr* ushr) {
6032   HandleShift(ushr);
6033 }
6034 
VisitUShr(HUShr * ushr)6035 void InstructionCodeGeneratorMIPS64::VisitUShr(HUShr* ushr) {
6036   HandleShift(ushr);
6037 }
6038 
VisitXor(HXor * instruction)6039 void LocationsBuilderMIPS64::VisitXor(HXor* instruction) {
6040   HandleBinaryOp(instruction);
6041 }
6042 
VisitXor(HXor * instruction)6043 void InstructionCodeGeneratorMIPS64::VisitXor(HXor* instruction) {
6044   HandleBinaryOp(instruction);
6045 }
6046 
VisitBoundType(HBoundType * instruction ATTRIBUTE_UNUSED)6047 void LocationsBuilderMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
6048   // Nothing to do, this should be removed during prepare for register allocator.
6049   LOG(FATAL) << "Unreachable";
6050 }
6051 
VisitBoundType(HBoundType * instruction ATTRIBUTE_UNUSED)6052 void InstructionCodeGeneratorMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
6053   // Nothing to do, this should be removed during prepare for register allocator.
6054   LOG(FATAL) << "Unreachable";
6055 }
6056 
VisitEqual(HEqual * comp)6057 void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
6058   HandleCondition(comp);
6059 }
6060 
VisitEqual(HEqual * comp)6061 void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
6062   HandleCondition(comp);
6063 }
6064 
VisitNotEqual(HNotEqual * comp)6065 void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
6066   HandleCondition(comp);
6067 }
6068 
VisitNotEqual(HNotEqual * comp)6069 void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
6070   HandleCondition(comp);
6071 }
6072 
VisitLessThan(HLessThan * comp)6073 void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
6074   HandleCondition(comp);
6075 }
6076 
VisitLessThan(HLessThan * comp)6077 void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
6078   HandleCondition(comp);
6079 }
6080 
VisitLessThanOrEqual(HLessThanOrEqual * comp)6081 void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
6082   HandleCondition(comp);
6083 }
6084 
VisitLessThanOrEqual(HLessThanOrEqual * comp)6085 void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
6086   HandleCondition(comp);
6087 }
6088 
VisitGreaterThan(HGreaterThan * comp)6089 void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
6090   HandleCondition(comp);
6091 }
6092 
VisitGreaterThan(HGreaterThan * comp)6093 void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
6094   HandleCondition(comp);
6095 }
6096 
VisitGreaterThanOrEqual(HGreaterThanOrEqual * comp)6097 void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
6098   HandleCondition(comp);
6099 }
6100 
VisitGreaterThanOrEqual(HGreaterThanOrEqual * comp)6101 void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
6102   HandleCondition(comp);
6103 }
6104 
VisitBelow(HBelow * comp)6105 void LocationsBuilderMIPS64::VisitBelow(HBelow* comp) {
6106   HandleCondition(comp);
6107 }
6108 
VisitBelow(HBelow * comp)6109 void InstructionCodeGeneratorMIPS64::VisitBelow(HBelow* comp) {
6110   HandleCondition(comp);
6111 }
6112 
VisitBelowOrEqual(HBelowOrEqual * comp)6113 void LocationsBuilderMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
6114   HandleCondition(comp);
6115 }
6116 
VisitBelowOrEqual(HBelowOrEqual * comp)6117 void InstructionCodeGeneratorMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
6118   HandleCondition(comp);
6119 }
6120 
VisitAbove(HAbove * comp)6121 void LocationsBuilderMIPS64::VisitAbove(HAbove* comp) {
6122   HandleCondition(comp);
6123 }
6124 
VisitAbove(HAbove * comp)6125 void InstructionCodeGeneratorMIPS64::VisitAbove(HAbove* comp) {
6126   HandleCondition(comp);
6127 }
6128 
VisitAboveOrEqual(HAboveOrEqual * comp)6129 void LocationsBuilderMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
6130   HandleCondition(comp);
6131 }
6132 
VisitAboveOrEqual(HAboveOrEqual * comp)6133 void InstructionCodeGeneratorMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
6134   HandleCondition(comp);
6135 }
6136 
6137 // Simple implementation of packed switch - generate cascaded compare/jumps.
VisitPackedSwitch(HPackedSwitch * switch_instr)6138 void LocationsBuilderMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
6139   LocationSummary* locations =
6140       new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
6141   locations->SetInAt(0, Location::RequiresRegister());
6142 }
6143 
GenPackedSwitchWithCompares(GpuRegister value_reg,int32_t lower_bound,uint32_t num_entries,HBasicBlock * switch_block,HBasicBlock * default_block)6144 void InstructionCodeGeneratorMIPS64::GenPackedSwitchWithCompares(GpuRegister value_reg,
6145                                                                  int32_t lower_bound,
6146                                                                  uint32_t num_entries,
6147                                                                  HBasicBlock* switch_block,
6148                                                                  HBasicBlock* default_block) {
6149   // Create a set of compare/jumps.
6150   GpuRegister temp_reg = TMP;
6151   __ Addiu32(temp_reg, value_reg, -lower_bound);
6152   // Jump to default if index is negative
6153   // Note: We don't check the case that index is positive while value < lower_bound, because in
6154   // this case, index >= num_entries must be true. So that we can save one branch instruction.
6155   __ Bltzc(temp_reg, codegen_->GetLabelOf(default_block));
6156 
6157   const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
6158   // Jump to successors[0] if value == lower_bound.
6159   __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[0]));
6160   int32_t last_index = 0;
6161   for (; num_entries - last_index > 2; last_index += 2) {
6162     __ Addiu(temp_reg, temp_reg, -2);
6163     // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
6164     __ Bltzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
6165     // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
6166     __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 2]));
6167   }
6168   if (num_entries - last_index == 2) {
6169     // The last missing case_value.
6170     __ Addiu(temp_reg, temp_reg, -1);
6171     __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
6172   }
6173 
6174   // And the default for any other value.
6175   if (!codegen_->GoesToNextBlock(switch_block, default_block)) {
6176     __ Bc(codegen_->GetLabelOf(default_block));
6177   }
6178 }
6179 
GenTableBasedPackedSwitch(GpuRegister value_reg,int32_t lower_bound,uint32_t num_entries,HBasicBlock * switch_block,HBasicBlock * default_block)6180 void InstructionCodeGeneratorMIPS64::GenTableBasedPackedSwitch(GpuRegister value_reg,
6181                                                                int32_t lower_bound,
6182                                                                uint32_t num_entries,
6183                                                                HBasicBlock* switch_block,
6184                                                                HBasicBlock* default_block) {
6185   // Create a jump table.
6186   std::vector<Mips64Label*> labels(num_entries);
6187   const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
6188   for (uint32_t i = 0; i < num_entries; i++) {
6189     labels[i] = codegen_->GetLabelOf(successors[i]);
6190   }
6191   JumpTable* table = __ CreateJumpTable(std::move(labels));
6192 
6193   // Is the value in range?
6194   __ Addiu32(TMP, value_reg, -lower_bound);
6195   __ LoadConst32(AT, num_entries);
6196   __ Bgeuc(TMP, AT, codegen_->GetLabelOf(default_block));
6197 
6198   // We are in the range of the table.
6199   // Load the target address from the jump table, indexing by the value.
6200   __ LoadLabelAddress(AT, table->GetLabel());
6201   __ Dlsa(TMP, TMP, AT, 2);
6202   __ Lw(TMP, TMP, 0);
6203   // Compute the absolute target address by adding the table start address
6204   // (the table contains offsets to targets relative to its start).
6205   __ Daddu(TMP, TMP, AT);
6206   // And jump.
6207   __ Jr(TMP);
6208   __ Nop();
6209 }
6210 
VisitPackedSwitch(HPackedSwitch * switch_instr)6211 void InstructionCodeGeneratorMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
6212   int32_t lower_bound = switch_instr->GetStartValue();
6213   uint32_t num_entries = switch_instr->GetNumEntries();
6214   LocationSummary* locations = switch_instr->GetLocations();
6215   GpuRegister value_reg = locations->InAt(0).AsRegister<GpuRegister>();
6216   HBasicBlock* switch_block = switch_instr->GetBlock();
6217   HBasicBlock* default_block = switch_instr->GetDefaultBlock();
6218 
6219   if (num_entries > kPackedSwitchJumpTableThreshold) {
6220     GenTableBasedPackedSwitch(value_reg,
6221                               lower_bound,
6222                               num_entries,
6223                               switch_block,
6224                               default_block);
6225   } else {
6226     GenPackedSwitchWithCompares(value_reg,
6227                                 lower_bound,
6228                                 num_entries,
6229                                 switch_block,
6230                                 default_block);
6231   }
6232 }
6233 
VisitClassTableGet(HClassTableGet * instruction)6234 void LocationsBuilderMIPS64::VisitClassTableGet(HClassTableGet* instruction) {
6235   LocationSummary* locations =
6236       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
6237   locations->SetInAt(0, Location::RequiresRegister());
6238   locations->SetOut(Location::RequiresRegister());
6239 }
6240 
VisitClassTableGet(HClassTableGet * instruction)6241 void InstructionCodeGeneratorMIPS64::VisitClassTableGet(HClassTableGet* instruction) {
6242   LocationSummary* locations = instruction->GetLocations();
6243   if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) {
6244     uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
6245         instruction->GetIndex(), kMips64PointerSize).SizeValue();
6246     __ LoadFromOffset(kLoadDoubleword,
6247                       locations->Out().AsRegister<GpuRegister>(),
6248                       locations->InAt(0).AsRegister<GpuRegister>(),
6249                       method_offset);
6250   } else {
6251     uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
6252         instruction->GetIndex(), kMips64PointerSize));
6253     __ LoadFromOffset(kLoadDoubleword,
6254                       locations->Out().AsRegister<GpuRegister>(),
6255                       locations->InAt(0).AsRegister<GpuRegister>(),
6256                       mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value());
6257     __ LoadFromOffset(kLoadDoubleword,
6258                       locations->Out().AsRegister<GpuRegister>(),
6259                       locations->Out().AsRegister<GpuRegister>(),
6260                       method_offset);
6261   }
6262 }
6263 
6264 }  // namespace mips64
6265 }  // namespace art
6266