1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "code_generator_mips.h"
18
19 #include "arch/mips/entrypoints_direct_mips.h"
20 #include "arch/mips/instruction_set_features_mips.h"
21 #include "art_method.h"
22 #include "code_generator_utils.h"
23 #include "compiled_method.h"
24 #include "entrypoints/quick/quick_entrypoints.h"
25 #include "entrypoints/quick/quick_entrypoints_enum.h"
26 #include "gc/accounting/card_table.h"
27 #include "intrinsics.h"
28 #include "intrinsics_mips.h"
29 #include "mirror/array-inl.h"
30 #include "mirror/class-inl.h"
31 #include "offsets.h"
32 #include "thread.h"
33 #include "utils/assembler.h"
34 #include "utils/mips/assembler_mips.h"
35 #include "utils/stack_checks.h"
36
37 namespace art {
38 namespace mips {
39
40 static constexpr int kCurrentMethodStackOffset = 0;
41 static constexpr Register kMethodRegisterArgument = A0;
42
43 // We'll maximize the range of a single load instruction for dex cache array accesses
44 // by aligning offset -32768 with the offset of the first used element.
45 static constexpr uint32_t kDexCacheArrayLwOffset = 0x8000;
46
MipsReturnLocation(Primitive::Type return_type)47 Location MipsReturnLocation(Primitive::Type return_type) {
48 switch (return_type) {
49 case Primitive::kPrimBoolean:
50 case Primitive::kPrimByte:
51 case Primitive::kPrimChar:
52 case Primitive::kPrimShort:
53 case Primitive::kPrimInt:
54 case Primitive::kPrimNot:
55 return Location::RegisterLocation(V0);
56
57 case Primitive::kPrimLong:
58 return Location::RegisterPairLocation(V0, V1);
59
60 case Primitive::kPrimFloat:
61 case Primitive::kPrimDouble:
62 return Location::FpuRegisterLocation(F0);
63
64 case Primitive::kPrimVoid:
65 return Location();
66 }
67 UNREACHABLE();
68 }
69
GetReturnLocation(Primitive::Type type) const70 Location InvokeDexCallingConventionVisitorMIPS::GetReturnLocation(Primitive::Type type) const {
71 return MipsReturnLocation(type);
72 }
73
GetMethodLocation() const74 Location InvokeDexCallingConventionVisitorMIPS::GetMethodLocation() const {
75 return Location::RegisterLocation(kMethodRegisterArgument);
76 }
77
GetNextLocation(Primitive::Type type)78 Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(Primitive::Type type) {
79 Location next_location;
80
81 switch (type) {
82 case Primitive::kPrimBoolean:
83 case Primitive::kPrimByte:
84 case Primitive::kPrimChar:
85 case Primitive::kPrimShort:
86 case Primitive::kPrimInt:
87 case Primitive::kPrimNot: {
88 uint32_t gp_index = gp_index_++;
89 if (gp_index < calling_convention.GetNumberOfRegisters()) {
90 next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index));
91 } else {
92 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
93 next_location = Location::StackSlot(stack_offset);
94 }
95 break;
96 }
97
98 case Primitive::kPrimLong: {
99 uint32_t gp_index = gp_index_;
100 gp_index_ += 2;
101 if (gp_index + 1 < calling_convention.GetNumberOfRegisters()) {
102 Register reg = calling_convention.GetRegisterAt(gp_index);
103 if (reg == A1 || reg == A3) {
104 gp_index_++; // Skip A1(A3), and use A2_A3(T0_T1) instead.
105 gp_index++;
106 }
107 Register low_even = calling_convention.GetRegisterAt(gp_index);
108 Register high_odd = calling_convention.GetRegisterAt(gp_index + 1);
109 DCHECK_EQ(low_even + 1, high_odd);
110 next_location = Location::RegisterPairLocation(low_even, high_odd);
111 } else {
112 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
113 next_location = Location::DoubleStackSlot(stack_offset);
114 }
115 break;
116 }
117
118 // Note: both float and double types are stored in even FPU registers. On 32 bit FPU, double
119 // will take up the even/odd pair, while floats are stored in even regs only.
120 // On 64 bit FPU, both double and float are stored in even registers only.
121 case Primitive::kPrimFloat:
122 case Primitive::kPrimDouble: {
123 uint32_t float_index = float_index_++;
124 if (float_index < calling_convention.GetNumberOfFpuRegisters()) {
125 next_location = Location::FpuRegisterLocation(
126 calling_convention.GetFpuRegisterAt(float_index));
127 } else {
128 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
129 next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
130 : Location::StackSlot(stack_offset);
131 }
132 break;
133 }
134
135 case Primitive::kPrimVoid:
136 LOG(FATAL) << "Unexpected parameter type " << type;
137 break;
138 }
139
140 // Space on the stack is reserved for all arguments.
141 stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
142
143 return next_location;
144 }
145
GetReturnLocation(Primitive::Type type)146 Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
147 return MipsReturnLocation(type);
148 }
149
150 // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
151 #define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()-> // NOLINT
152 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
153
154 class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
155 public:
BoundsCheckSlowPathMIPS(HBoundsCheck * instruction)156 explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : SlowPathCodeMIPS(instruction) {}
157
EmitNativeCode(CodeGenerator * codegen)158 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
159 LocationSummary* locations = instruction_->GetLocations();
160 CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
161 __ Bind(GetEntryLabel());
162 if (instruction_->CanThrowIntoCatchBlock()) {
163 // Live registers will be restored in the catch block if caught.
164 SaveLiveRegisters(codegen, instruction_->GetLocations());
165 }
166 // We're moving two locations to locations that could overlap, so we need a parallel
167 // move resolver.
168 InvokeRuntimeCallingConvention calling_convention;
169 codegen->EmitParallelMoves(locations->InAt(0),
170 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
171 Primitive::kPrimInt,
172 locations->InAt(1),
173 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
174 Primitive::kPrimInt);
175 QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt()
176 ? kQuickThrowStringBounds
177 : kQuickThrowArrayBounds;
178 mips_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this);
179 CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
180 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
181 }
182
IsFatal() const183 bool IsFatal() const OVERRIDE { return true; }
184
GetDescription() const185 const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS"; }
186
187 private:
188 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS);
189 };
190
191 class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS {
192 public:
DivZeroCheckSlowPathMIPS(HDivZeroCheck * instruction)193 explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : SlowPathCodeMIPS(instruction) {}
194
EmitNativeCode(CodeGenerator * codegen)195 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
196 CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
197 __ Bind(GetEntryLabel());
198 mips_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
199 CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
200 }
201
IsFatal() const202 bool IsFatal() const OVERRIDE { return true; }
203
GetDescription() const204 const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS"; }
205
206 private:
207 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS);
208 };
209
210 class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
211 public:
LoadClassSlowPathMIPS(HLoadClass * cls,HInstruction * at,uint32_t dex_pc,bool do_clinit)212 LoadClassSlowPathMIPS(HLoadClass* cls,
213 HInstruction* at,
214 uint32_t dex_pc,
215 bool do_clinit)
216 : SlowPathCodeMIPS(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
217 DCHECK(at->IsLoadClass() || at->IsClinitCheck());
218 }
219
EmitNativeCode(CodeGenerator * codegen)220 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
221 LocationSummary* locations = instruction_->GetLocations();
222 CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
223
224 __ Bind(GetEntryLabel());
225 SaveLiveRegisters(codegen, locations);
226
227 InvokeRuntimeCallingConvention calling_convention;
228 dex::TypeIndex type_index = cls_->GetTypeIndex();
229 __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
230
231 QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
232 : kQuickInitializeType;
233 mips_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
234 if (do_clinit_) {
235 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
236 } else {
237 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
238 }
239
240 // Move the class to the desired location.
241 Location out = locations->Out();
242 if (out.IsValid()) {
243 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
244 Primitive::Type type = instruction_->GetType();
245 mips_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
246 }
247
248 RestoreLiveRegisters(codegen, locations);
249 // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
250 DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
251 if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
252 DCHECK(out.IsValid());
253 // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
254 // kSaveEverything and use a temporary for the .bss entry address in the fast path,
255 // so that we can avoid another calculation here.
256 bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
257 Register base = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
258 DCHECK_NE(out.AsRegister<Register>(), AT);
259 CodeGeneratorMIPS::PcRelativePatchInfo* info =
260 mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
261 bool reordering = __ SetReorder(false);
262 mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info, TMP, base);
263 __ StoreToOffset(kStoreWord, out.AsRegister<Register>(), TMP, /* placeholder */ 0x5678);
264 __ SetReorder(reordering);
265 }
266 __ B(GetExitLabel());
267 }
268
GetDescription() const269 const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS"; }
270
271 private:
272 // The class this slow path will load.
273 HLoadClass* const cls_;
274
275 // The dex PC of `at_`.
276 const uint32_t dex_pc_;
277
278 // Whether to initialize the class.
279 const bool do_clinit_;
280
281 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS);
282 };
283
284 class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
285 public:
LoadStringSlowPathMIPS(HLoadString * instruction)286 explicit LoadStringSlowPathMIPS(HLoadString* instruction) : SlowPathCodeMIPS(instruction) {}
287
EmitNativeCode(CodeGenerator * codegen)288 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
289 LocationSummary* locations = instruction_->GetLocations();
290 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
291 CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
292
293 __ Bind(GetEntryLabel());
294 SaveLiveRegisters(codegen, locations);
295
296 InvokeRuntimeCallingConvention calling_convention;
297 HLoadString* load = instruction_->AsLoadString();
298 const dex::StringIndex string_index = load->GetStringIndex();
299 __ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
300 mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
301 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
302 Primitive::Type type = instruction_->GetType();
303 mips_codegen->MoveLocation(locations->Out(),
304 calling_convention.GetReturnLocation(type),
305 type);
306
307 RestoreLiveRegisters(codegen, locations);
308
309 // Store the resolved String to the BSS entry.
310 // TODO: Change art_quick_resolve_string to kSaveEverything and use a temporary for the
311 // .bss entry address in the fast path, so that we can avoid another calculation here.
312 bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
313 Register base = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
314 Register out = locations->Out().AsRegister<Register>();
315 DCHECK_NE(out, AT);
316 CodeGeneratorMIPS::PcRelativePatchInfo* info =
317 mips_codegen->NewPcRelativeStringPatch(load->GetDexFile(), string_index);
318 bool reordering = __ SetReorder(false);
319 mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info, TMP, base);
320 __ StoreToOffset(kStoreWord, out, TMP, /* placeholder */ 0x5678);
321 __ SetReorder(reordering);
322
323 __ B(GetExitLabel());
324 }
325
GetDescription() const326 const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
327
328 private:
329 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
330 };
331
332 class NullCheckSlowPathMIPS : public SlowPathCodeMIPS {
333 public:
NullCheckSlowPathMIPS(HNullCheck * instr)334 explicit NullCheckSlowPathMIPS(HNullCheck* instr) : SlowPathCodeMIPS(instr) {}
335
EmitNativeCode(CodeGenerator * codegen)336 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
337 CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
338 __ Bind(GetEntryLabel());
339 if (instruction_->CanThrowIntoCatchBlock()) {
340 // Live registers will be restored in the catch block if caught.
341 SaveLiveRegisters(codegen, instruction_->GetLocations());
342 }
343 mips_codegen->InvokeRuntime(kQuickThrowNullPointer,
344 instruction_,
345 instruction_->GetDexPc(),
346 this);
347 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
348 }
349
IsFatal() const350 bool IsFatal() const OVERRIDE { return true; }
351
GetDescription() const352 const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS"; }
353
354 private:
355 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS);
356 };
357
358 class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
359 public:
SuspendCheckSlowPathMIPS(HSuspendCheck * instruction,HBasicBlock * successor)360 SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor)
361 : SlowPathCodeMIPS(instruction), successor_(successor) {}
362
EmitNativeCode(CodeGenerator * codegen)363 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
364 CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
365 __ Bind(GetEntryLabel());
366 mips_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
367 CheckEntrypointTypes<kQuickTestSuspend, void, void>();
368 if (successor_ == nullptr) {
369 __ B(GetReturnLabel());
370 } else {
371 __ B(mips_codegen->GetLabelOf(successor_));
372 }
373 }
374
GetReturnLabel()375 MipsLabel* GetReturnLabel() {
376 DCHECK(successor_ == nullptr);
377 return &return_label_;
378 }
379
GetDescription() const380 const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; }
381
382 private:
383 // If not null, the block to branch to after the suspend check.
384 HBasicBlock* const successor_;
385
386 // If `successor_` is null, the label to branch to after the suspend check.
387 MipsLabel return_label_;
388
389 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS);
390 };
391
392 class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
393 public:
TypeCheckSlowPathMIPS(HInstruction * instruction,bool is_fatal)394 explicit TypeCheckSlowPathMIPS(HInstruction* instruction, bool is_fatal)
395 : SlowPathCodeMIPS(instruction), is_fatal_(is_fatal) {}
396
EmitNativeCode(CodeGenerator * codegen)397 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
398 LocationSummary* locations = instruction_->GetLocations();
399 uint32_t dex_pc = instruction_->GetDexPc();
400 DCHECK(instruction_->IsCheckCast()
401 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
402 CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
403
404 __ Bind(GetEntryLabel());
405 if (!is_fatal_) {
406 SaveLiveRegisters(codegen, locations);
407 }
408
409 // We're moving two locations to locations that could overlap, so we need a parallel
410 // move resolver.
411 InvokeRuntimeCallingConvention calling_convention;
412 codegen->EmitParallelMoves(locations->InAt(0),
413 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
414 Primitive::kPrimNot,
415 locations->InAt(1),
416 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
417 Primitive::kPrimNot);
418 if (instruction_->IsInstanceOf()) {
419 mips_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
420 CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
421 Primitive::Type ret_type = instruction_->GetType();
422 Location ret_loc = calling_convention.GetReturnLocation(ret_type);
423 mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
424 } else {
425 DCHECK(instruction_->IsCheckCast());
426 mips_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
427 CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
428 }
429
430 if (!is_fatal_) {
431 RestoreLiveRegisters(codegen, locations);
432 __ B(GetExitLabel());
433 }
434 }
435
GetDescription() const436 const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; }
437
IsFatal() const438 bool IsFatal() const OVERRIDE { return is_fatal_; }
439
440 private:
441 const bool is_fatal_;
442
443 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS);
444 };
445
446 class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
447 public:
DeoptimizationSlowPathMIPS(HDeoptimize * instruction)448 explicit DeoptimizationSlowPathMIPS(HDeoptimize* instruction)
449 : SlowPathCodeMIPS(instruction) {}
450
EmitNativeCode(CodeGenerator * codegen)451 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
452 CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
453 __ Bind(GetEntryLabel());
454 LocationSummary* locations = instruction_->GetLocations();
455 SaveLiveRegisters(codegen, locations);
456 InvokeRuntimeCallingConvention calling_convention;
457 __ LoadConst32(calling_convention.GetRegisterAt(0),
458 static_cast<uint32_t>(instruction_->AsDeoptimize()->GetDeoptimizationKind()));
459 mips_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
460 CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
461 }
462
GetDescription() const463 const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
464
465 private:
466 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
467 };
468
469 class ArraySetSlowPathMIPS : public SlowPathCodeMIPS {
470 public:
ArraySetSlowPathMIPS(HInstruction * instruction)471 explicit ArraySetSlowPathMIPS(HInstruction* instruction) : SlowPathCodeMIPS(instruction) {}
472
EmitNativeCode(CodeGenerator * codegen)473 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
474 LocationSummary* locations = instruction_->GetLocations();
475 __ Bind(GetEntryLabel());
476 SaveLiveRegisters(codegen, locations);
477
478 InvokeRuntimeCallingConvention calling_convention;
479 HParallelMove parallel_move(codegen->GetGraph()->GetArena());
480 parallel_move.AddMove(
481 locations->InAt(0),
482 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
483 Primitive::kPrimNot,
484 nullptr);
485 parallel_move.AddMove(
486 locations->InAt(1),
487 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
488 Primitive::kPrimInt,
489 nullptr);
490 parallel_move.AddMove(
491 locations->InAt(2),
492 Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
493 Primitive::kPrimNot,
494 nullptr);
495 codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
496
497 CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
498 mips_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this);
499 CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
500 RestoreLiveRegisters(codegen, locations);
501 __ B(GetExitLabel());
502 }
503
GetDescription() const504 const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS"; }
505
506 private:
507 DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS);
508 };
509
510 // Slow path marking an object reference `ref` during a read
511 // barrier. The field `obj.field` in the object `obj` holding this
512 // reference does not get updated by this slow path after marking (see
513 // ReadBarrierMarkAndUpdateFieldSlowPathMIPS below for that).
514 //
515 // This means that after the execution of this slow path, `ref` will
516 // always be up-to-date, but `obj.field` may not; i.e., after the
517 // flip, `ref` will be a to-space reference, but `obj.field` will
518 // probably still be a from-space reference (unless it gets updated by
519 // another thread, or if another thread installed another object
520 // reference (different from `ref`) in `obj.field`).
521 //
522 // If `entrypoint` is a valid location it is assumed to already be
523 // holding the entrypoint. The case where the entrypoint is passed in
524 // is for the GcRoot read barrier.
525 class ReadBarrierMarkSlowPathMIPS : public SlowPathCodeMIPS {
526 public:
ReadBarrierMarkSlowPathMIPS(HInstruction * instruction,Location ref,Location entrypoint=Location::NoLocation ())527 ReadBarrierMarkSlowPathMIPS(HInstruction* instruction,
528 Location ref,
529 Location entrypoint = Location::NoLocation())
530 : SlowPathCodeMIPS(instruction), ref_(ref), entrypoint_(entrypoint) {
531 DCHECK(kEmitCompilerReadBarrier);
532 }
533
GetDescription() const534 const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; }
535
EmitNativeCode(CodeGenerator * codegen)536 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
537 LocationSummary* locations = instruction_->GetLocations();
538 Register ref_reg = ref_.AsRegister<Register>();
539 DCHECK(locations->CanCall());
540 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
541 DCHECK(instruction_->IsInstanceFieldGet() ||
542 instruction_->IsStaticFieldGet() ||
543 instruction_->IsArrayGet() ||
544 instruction_->IsArraySet() ||
545 instruction_->IsLoadClass() ||
546 instruction_->IsLoadString() ||
547 instruction_->IsInstanceOf() ||
548 instruction_->IsCheckCast() ||
549 (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
550 (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
551 << "Unexpected instruction in read barrier marking slow path: "
552 << instruction_->DebugName();
553
554 __ Bind(GetEntryLabel());
555 // No need to save live registers; it's taken care of by the
556 // entrypoint. Also, there is no need to update the stack mask,
557 // as this runtime call will not trigger a garbage collection.
558 CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
559 DCHECK((V0 <= ref_reg && ref_reg <= T7) ||
560 (S2 <= ref_reg && ref_reg <= S7) ||
561 (ref_reg == FP)) << ref_reg;
562 // "Compact" slow path, saving two moves.
563 //
564 // Instead of using the standard runtime calling convention (input
565 // and output in A0 and V0 respectively):
566 //
567 // A0 <- ref
568 // V0 <- ReadBarrierMark(A0)
569 // ref <- V0
570 //
571 // we just use rX (the register containing `ref`) as input and output
572 // of a dedicated entrypoint:
573 //
574 // rX <- ReadBarrierMarkRegX(rX)
575 //
576 if (entrypoint_.IsValid()) {
577 mips_codegen->ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction_, this);
578 DCHECK_EQ(entrypoint_.AsRegister<Register>(), T9);
579 __ Jalr(entrypoint_.AsRegister<Register>());
580 __ NopIfNoReordering();
581 } else {
582 int32_t entry_point_offset =
583 CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(ref_reg - 1);
584 // This runtime call does not require a stack map.
585 mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
586 instruction_,
587 this,
588 /* direct */ false);
589 }
590 __ B(GetExitLabel());
591 }
592
593 private:
594 // The location (register) of the marked object reference.
595 const Location ref_;
596
597 // The location of the entrypoint if already loaded.
598 const Location entrypoint_;
599
600 DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathMIPS);
601 };
602
603 // Slow path marking an object reference `ref` during a read barrier,
604 // and if needed, atomically updating the field `obj.field` in the
605 // object `obj` holding this reference after marking (contrary to
606 // ReadBarrierMarkSlowPathMIPS above, which never tries to update
607 // `obj.field`).
608 //
609 // This means that after the execution of this slow path, both `ref`
610 // and `obj.field` will be up-to-date; i.e., after the flip, both will
611 // hold the same to-space reference (unless another thread installed
612 // another object reference (different from `ref`) in `obj.field`).
613 class ReadBarrierMarkAndUpdateFieldSlowPathMIPS : public SlowPathCodeMIPS {
614 public:
ReadBarrierMarkAndUpdateFieldSlowPathMIPS(HInstruction * instruction,Location ref,Register obj,Location field_offset,Register temp1)615 ReadBarrierMarkAndUpdateFieldSlowPathMIPS(HInstruction* instruction,
616 Location ref,
617 Register obj,
618 Location field_offset,
619 Register temp1)
620 : SlowPathCodeMIPS(instruction),
621 ref_(ref),
622 obj_(obj),
623 field_offset_(field_offset),
624 temp1_(temp1) {
625 DCHECK(kEmitCompilerReadBarrier);
626 }
627
GetDescription() const628 const char* GetDescription() const OVERRIDE {
629 return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS";
630 }
631
EmitNativeCode(CodeGenerator * codegen)632 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
633 LocationSummary* locations = instruction_->GetLocations();
634 Register ref_reg = ref_.AsRegister<Register>();
635 DCHECK(locations->CanCall());
636 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
637 // This slow path is only used by the UnsafeCASObject intrinsic.
638 DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
639 << "Unexpected instruction in read barrier marking and field updating slow path: "
640 << instruction_->DebugName();
641 DCHECK(instruction_->GetLocations()->Intrinsified());
642 DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
643 DCHECK(field_offset_.IsRegisterPair()) << field_offset_;
644
645 __ Bind(GetEntryLabel());
646
647 // Save the old reference.
648 // Note that we cannot use AT or TMP to save the old reference, as those
649 // are used by the code that follows, but we need the old reference after
650 // the call to the ReadBarrierMarkRegX entry point.
651 DCHECK_NE(temp1_, AT);
652 DCHECK_NE(temp1_, TMP);
653 __ Move(temp1_, ref_reg);
654
655 // No need to save live registers; it's taken care of by the
656 // entrypoint. Also, there is no need to update the stack mask,
657 // as this runtime call will not trigger a garbage collection.
658 CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
659 DCHECK((V0 <= ref_reg && ref_reg <= T7) ||
660 (S2 <= ref_reg && ref_reg <= S7) ||
661 (ref_reg == FP)) << ref_reg;
662 // "Compact" slow path, saving two moves.
663 //
664 // Instead of using the standard runtime calling convention (input
665 // and output in A0 and V0 respectively):
666 //
667 // A0 <- ref
668 // V0 <- ReadBarrierMark(A0)
669 // ref <- V0
670 //
671 // we just use rX (the register containing `ref`) as input and output
672 // of a dedicated entrypoint:
673 //
674 // rX <- ReadBarrierMarkRegX(rX)
675 //
676 int32_t entry_point_offset =
677 CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(ref_reg - 1);
678 // This runtime call does not require a stack map.
679 mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
680 instruction_,
681 this,
682 /* direct */ false);
683
684 // If the new reference is different from the old reference,
685 // update the field in the holder (`*(obj_ + field_offset_)`).
686 //
687 // Note that this field could also hold a different object, if
688 // another thread had concurrently changed it. In that case, the
689 // the compare-and-set (CAS) loop below would abort, leaving the
690 // field as-is.
691 MipsLabel done;
692 __ Beq(temp1_, ref_reg, &done);
693
694 // Update the the holder's field atomically. This may fail if
695 // mutator updates before us, but it's OK. This is achieved
696 // using a strong compare-and-set (CAS) operation with relaxed
697 // memory synchronization ordering, where the expected value is
698 // the old reference and the desired value is the new reference.
699
700 // Convenience aliases.
701 Register base = obj_;
702 // The UnsafeCASObject intrinsic uses a register pair as field
703 // offset ("long offset"), of which only the low part contains
704 // data.
705 Register offset = field_offset_.AsRegisterPairLow<Register>();
706 Register expected = temp1_;
707 Register value = ref_reg;
708 Register tmp_ptr = TMP; // Pointer to actual memory.
709 Register tmp = AT; // Value in memory.
710
711 __ Addu(tmp_ptr, base, offset);
712
713 if (kPoisonHeapReferences) {
714 __ PoisonHeapReference(expected);
715 // Do not poison `value` if it is the same register as
716 // `expected`, which has just been poisoned.
717 if (value != expected) {
718 __ PoisonHeapReference(value);
719 }
720 }
721
722 // do {
723 // tmp = [r_ptr] - expected;
724 // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
725
726 bool is_r6 = mips_codegen->GetInstructionSetFeatures().IsR6();
727 MipsLabel loop_head, exit_loop;
728 __ Bind(&loop_head);
729 if (is_r6) {
730 __ LlR6(tmp, tmp_ptr);
731 } else {
732 __ LlR2(tmp, tmp_ptr);
733 }
734 __ Bne(tmp, expected, &exit_loop);
735 __ Move(tmp, value);
736 if (is_r6) {
737 __ ScR6(tmp, tmp_ptr);
738 } else {
739 __ ScR2(tmp, tmp_ptr);
740 }
741 __ Beqz(tmp, &loop_head);
742 __ Bind(&exit_loop);
743
744 if (kPoisonHeapReferences) {
745 __ UnpoisonHeapReference(expected);
746 // Do not unpoison `value` if it is the same register as
747 // `expected`, which has just been unpoisoned.
748 if (value != expected) {
749 __ UnpoisonHeapReference(value);
750 }
751 }
752
753 __ Bind(&done);
754 __ B(GetExitLabel());
755 }
756
757 private:
758 // The location (register) of the marked object reference.
759 const Location ref_;
760 // The register containing the object holding the marked object reference field.
761 const Register obj_;
762 // The location of the offset of the marked reference field within `obj_`.
763 Location field_offset_;
764
765 const Register temp1_;
766
767 DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathMIPS);
768 };
769
770 // Slow path generating a read barrier for a heap reference.
771 class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS {
772 public:
ReadBarrierForHeapReferenceSlowPathMIPS(HInstruction * instruction,Location out,Location ref,Location obj,uint32_t offset,Location index)773 ReadBarrierForHeapReferenceSlowPathMIPS(HInstruction* instruction,
774 Location out,
775 Location ref,
776 Location obj,
777 uint32_t offset,
778 Location index)
779 : SlowPathCodeMIPS(instruction),
780 out_(out),
781 ref_(ref),
782 obj_(obj),
783 offset_(offset),
784 index_(index) {
785 DCHECK(kEmitCompilerReadBarrier);
786 // If `obj` is equal to `out` or `ref`, it means the initial object
787 // has been overwritten by (or after) the heap object reference load
788 // to be instrumented, e.g.:
789 //
790 // __ LoadFromOffset(kLoadWord, out, out, offset);
791 // codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset);
792 //
793 // In that case, we have lost the information about the original
794 // object, and the emitted read barrier cannot work properly.
795 DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out;
796 DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
797 }
798
EmitNativeCode(CodeGenerator * codegen)799 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
800 CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
801 LocationSummary* locations = instruction_->GetLocations();
802 Register reg_out = out_.AsRegister<Register>();
803 DCHECK(locations->CanCall());
804 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
805 DCHECK(instruction_->IsInstanceFieldGet() ||
806 instruction_->IsStaticFieldGet() ||
807 instruction_->IsArrayGet() ||
808 instruction_->IsInstanceOf() ||
809 instruction_->IsCheckCast() ||
810 (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
811 << "Unexpected instruction in read barrier for heap reference slow path: "
812 << instruction_->DebugName();
813
814 __ Bind(GetEntryLabel());
815 SaveLiveRegisters(codegen, locations);
816
817 // We may have to change the index's value, but as `index_` is a
818 // constant member (like other "inputs" of this slow path),
819 // introduce a copy of it, `index`.
820 Location index = index_;
821 if (index_.IsValid()) {
822 // Handle `index_` for HArrayGet and UnsafeGetObject/UnsafeGetObjectVolatile intrinsics.
823 if (instruction_->IsArrayGet()) {
824 // Compute the actual memory offset and store it in `index`.
825 Register index_reg = index_.AsRegister<Register>();
826 DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg));
827 if (codegen->IsCoreCalleeSaveRegister(index_reg)) {
828 // We are about to change the value of `index_reg` (see the
829 // calls to art::mips::MipsAssembler::Sll and
830 // art::mips::MipsAssembler::Addiu32 below), but it has
831 // not been saved by the previous call to
832 // art::SlowPathCode::SaveLiveRegisters, as it is a
833 // callee-save register --
834 // art::SlowPathCode::SaveLiveRegisters does not consider
835 // callee-save registers, as it has been designed with the
836 // assumption that callee-save registers are supposed to be
837 // handled by the called function. So, as a callee-save
838 // register, `index_reg` _would_ eventually be saved onto
839 // the stack, but it would be too late: we would have
840 // changed its value earlier. Therefore, we manually save
841 // it here into another freely available register,
842 // `free_reg`, chosen of course among the caller-save
843 // registers (as a callee-save `free_reg` register would
844 // exhibit the same problem).
845 //
846 // Note we could have requested a temporary register from
847 // the register allocator instead; but we prefer not to, as
848 // this is a slow path, and we know we can find a
849 // caller-save register that is available.
850 Register free_reg = FindAvailableCallerSaveRegister(codegen);
851 __ Move(free_reg, index_reg);
852 index_reg = free_reg;
853 index = Location::RegisterLocation(index_reg);
854 } else {
855 // The initial register stored in `index_` has already been
856 // saved in the call to art::SlowPathCode::SaveLiveRegisters
857 // (as it is not a callee-save register), so we can freely
858 // use it.
859 }
860 // Shifting the index value contained in `index_reg` by the scale
861 // factor (2) cannot overflow in practice, as the runtime is
862 // unable to allocate object arrays with a size larger than
863 // 2^26 - 1 (that is, 2^28 - 4 bytes).
864 __ Sll(index_reg, index_reg, TIMES_4);
865 static_assert(
866 sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
867 "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
868 __ Addiu32(index_reg, index_reg, offset_);
869 } else {
870 // In the case of the UnsafeGetObject/UnsafeGetObjectVolatile
871 // intrinsics, `index_` is not shifted by a scale factor of 2
872 // (as in the case of ArrayGet), as it is actually an offset
873 // to an object field within an object.
874 DCHECK(instruction_->IsInvoke()) << instruction_->DebugName();
875 DCHECK(instruction_->GetLocations()->Intrinsified());
876 DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
877 (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
878 << instruction_->AsInvoke()->GetIntrinsic();
879 DCHECK_EQ(offset_, 0U);
880 DCHECK(index_.IsRegisterPair());
881 // UnsafeGet's offset location is a register pair, the low
882 // part contains the correct offset.
883 index = index_.ToLow();
884 }
885 }
886
887 // We're moving two or three locations to locations that could
888 // overlap, so we need a parallel move resolver.
889 InvokeRuntimeCallingConvention calling_convention;
890 HParallelMove parallel_move(codegen->GetGraph()->GetArena());
891 parallel_move.AddMove(ref_,
892 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
893 Primitive::kPrimNot,
894 nullptr);
895 parallel_move.AddMove(obj_,
896 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
897 Primitive::kPrimNot,
898 nullptr);
899 if (index.IsValid()) {
900 parallel_move.AddMove(index,
901 Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
902 Primitive::kPrimInt,
903 nullptr);
904 codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
905 } else {
906 codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
907 __ LoadConst32(calling_convention.GetRegisterAt(2), offset_);
908 }
909 mips_codegen->InvokeRuntime(kQuickReadBarrierSlow,
910 instruction_,
911 instruction_->GetDexPc(),
912 this);
913 CheckEntrypointTypes<
914 kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
915 mips_codegen->Move32(out_, calling_convention.GetReturnLocation(Primitive::kPrimNot));
916
917 RestoreLiveRegisters(codegen, locations);
918 __ B(GetExitLabel());
919 }
920
GetDescription() const921 const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathMIPS"; }
922
923 private:
FindAvailableCallerSaveRegister(CodeGenerator * codegen)924 Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
925 size_t ref = static_cast<int>(ref_.AsRegister<Register>());
926 size_t obj = static_cast<int>(obj_.AsRegister<Register>());
927 for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
928 if (i != ref &&
929 i != obj &&
930 !codegen->IsCoreCalleeSaveRegister(i) &&
931 !codegen->IsBlockedCoreRegister(i)) {
932 return static_cast<Register>(i);
933 }
934 }
935 // We shall never fail to find a free caller-save register, as
936 // there are more than two core caller-save registers on MIPS
937 // (meaning it is possible to find one which is different from
938 // `ref` and `obj`).
939 DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u);
940 LOG(FATAL) << "Could not find a free caller-save register";
941 UNREACHABLE();
942 }
943
944 const Location out_;
945 const Location ref_;
946 const Location obj_;
947 const uint32_t offset_;
948 // An additional location containing an index to an array.
949 // Only used for HArrayGet and the UnsafeGetObject &
950 // UnsafeGetObjectVolatile intrinsics.
951 const Location index_;
952
953 DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathMIPS);
954 };
955
956 // Slow path generating a read barrier for a GC root.
957 class ReadBarrierForRootSlowPathMIPS : public SlowPathCodeMIPS {
958 public:
ReadBarrierForRootSlowPathMIPS(HInstruction * instruction,Location out,Location root)959 ReadBarrierForRootSlowPathMIPS(HInstruction* instruction, Location out, Location root)
960 : SlowPathCodeMIPS(instruction), out_(out), root_(root) {
961 DCHECK(kEmitCompilerReadBarrier);
962 }
963
EmitNativeCode(CodeGenerator * codegen)964 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
965 LocationSummary* locations = instruction_->GetLocations();
966 Register reg_out = out_.AsRegister<Register>();
967 DCHECK(locations->CanCall());
968 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
969 DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
970 << "Unexpected instruction in read barrier for GC root slow path: "
971 << instruction_->DebugName();
972
973 __ Bind(GetEntryLabel());
974 SaveLiveRegisters(codegen, locations);
975
976 InvokeRuntimeCallingConvention calling_convention;
977 CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
978 mips_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), root_);
979 mips_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow,
980 instruction_,
981 instruction_->GetDexPc(),
982 this);
983 CheckEntrypointTypes<kQuickReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*>();
984 mips_codegen->Move32(out_, calling_convention.GetReturnLocation(Primitive::kPrimNot));
985
986 RestoreLiveRegisters(codegen, locations);
987 __ B(GetExitLabel());
988 }
989
GetDescription() const990 const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS"; }
991
992 private:
993 const Location out_;
994 const Location root_;
995
996 DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathMIPS);
997 };
998
CodeGeneratorMIPS(HGraph * graph,const MipsInstructionSetFeatures & isa_features,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)999 CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
1000 const MipsInstructionSetFeatures& isa_features,
1001 const CompilerOptions& compiler_options,
1002 OptimizingCompilerStats* stats)
1003 : CodeGenerator(graph,
1004 kNumberOfCoreRegisters,
1005 kNumberOfFRegisters,
1006 kNumberOfRegisterPairs,
1007 ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
1008 arraysize(kCoreCalleeSaves)),
1009 ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
1010 arraysize(kFpuCalleeSaves)),
1011 compiler_options,
1012 stats),
1013 block_labels_(nullptr),
1014 location_builder_(graph, this),
1015 instruction_visitor_(graph, this),
1016 move_resolver_(graph->GetArena(), this),
1017 assembler_(graph->GetArena(), &isa_features),
1018 isa_features_(isa_features),
1019 uint32_literals_(std::less<uint32_t>(),
1020 graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
1021 pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
1022 boot_image_string_patches_(StringReferenceValueComparator(),
1023 graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
1024 pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
1025 boot_image_type_patches_(TypeReferenceValueComparator(),
1026 graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
1027 pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
1028 type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
1029 jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
1030 jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
1031 clobbered_ra_(false) {
1032 // Save RA (containing the return address) to mimic Quick.
1033 AddAllocatedRegister(Location::RegisterLocation(RA));
1034 }
1035
1036 #undef __
1037 // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
1038 #define __ down_cast<MipsAssembler*>(GetAssembler())-> // NOLINT
1039 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
1040
Finalize(CodeAllocator * allocator)1041 void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
1042 // Ensure that we fix up branches.
1043 __ FinalizeCode();
1044
1045 // Adjust native pc offsets in stack maps.
1046 for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
1047 uint32_t old_position =
1048 stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips);
1049 uint32_t new_position = __ GetAdjustedPosition(old_position);
1050 DCHECK_GE(new_position, old_position);
1051 stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
1052 }
1053
1054 // Adjust pc offsets for the disassembly information.
1055 if (disasm_info_ != nullptr) {
1056 GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
1057 frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
1058 frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
1059 for (auto& it : *disasm_info_->GetInstructionIntervals()) {
1060 it.second.start = __ GetAdjustedPosition(it.second.start);
1061 it.second.end = __ GetAdjustedPosition(it.second.end);
1062 }
1063 for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
1064 it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
1065 it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
1066 }
1067 }
1068
1069 CodeGenerator::Finalize(allocator);
1070 }
1071
GetAssembler() const1072 MipsAssembler* ParallelMoveResolverMIPS::GetAssembler() const {
1073 return codegen_->GetAssembler();
1074 }
1075
EmitMove(size_t index)1076 void ParallelMoveResolverMIPS::EmitMove(size_t index) {
1077 DCHECK_LT(index, moves_.size());
1078 MoveOperands* move = moves_[index];
1079 codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
1080 }
1081
EmitSwap(size_t index)1082 void ParallelMoveResolverMIPS::EmitSwap(size_t index) {
1083 DCHECK_LT(index, moves_.size());
1084 MoveOperands* move = moves_[index];
1085 Primitive::Type type = move->GetType();
1086 Location loc1 = move->GetDestination();
1087 Location loc2 = move->GetSource();
1088
1089 DCHECK(!loc1.IsConstant());
1090 DCHECK(!loc2.IsConstant());
1091
1092 if (loc1.Equals(loc2)) {
1093 return;
1094 }
1095
1096 if (loc1.IsRegister() && loc2.IsRegister()) {
1097 // Swap 2 GPRs.
1098 Register r1 = loc1.AsRegister<Register>();
1099 Register r2 = loc2.AsRegister<Register>();
1100 __ Move(TMP, r2);
1101 __ Move(r2, r1);
1102 __ Move(r1, TMP);
1103 } else if (loc1.IsFpuRegister() && loc2.IsFpuRegister()) {
1104 FRegister f1 = loc1.AsFpuRegister<FRegister>();
1105 FRegister f2 = loc2.AsFpuRegister<FRegister>();
1106 if (type == Primitive::kPrimFloat) {
1107 __ MovS(FTMP, f2);
1108 __ MovS(f2, f1);
1109 __ MovS(f1, FTMP);
1110 } else {
1111 DCHECK_EQ(type, Primitive::kPrimDouble);
1112 __ MovD(FTMP, f2);
1113 __ MovD(f2, f1);
1114 __ MovD(f1, FTMP);
1115 }
1116 } else if ((loc1.IsRegister() && loc2.IsFpuRegister()) ||
1117 (loc1.IsFpuRegister() && loc2.IsRegister())) {
1118 // Swap FPR and GPR.
1119 DCHECK_EQ(type, Primitive::kPrimFloat); // Can only swap a float.
1120 FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
1121 : loc2.AsFpuRegister<FRegister>();
1122 Register r2 = loc1.IsRegister() ? loc1.AsRegister<Register>() : loc2.AsRegister<Register>();
1123 __ Move(TMP, r2);
1124 __ Mfc1(r2, f1);
1125 __ Mtc1(TMP, f1);
1126 } else if (loc1.IsRegisterPair() && loc2.IsRegisterPair()) {
1127 // Swap 2 GPR register pairs.
1128 Register r1 = loc1.AsRegisterPairLow<Register>();
1129 Register r2 = loc2.AsRegisterPairLow<Register>();
1130 __ Move(TMP, r2);
1131 __ Move(r2, r1);
1132 __ Move(r1, TMP);
1133 r1 = loc1.AsRegisterPairHigh<Register>();
1134 r2 = loc2.AsRegisterPairHigh<Register>();
1135 __ Move(TMP, r2);
1136 __ Move(r2, r1);
1137 __ Move(r1, TMP);
1138 } else if ((loc1.IsRegisterPair() && loc2.IsFpuRegister()) ||
1139 (loc1.IsFpuRegister() && loc2.IsRegisterPair())) {
1140 // Swap FPR and GPR register pair.
1141 DCHECK_EQ(type, Primitive::kPrimDouble);
1142 FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
1143 : loc2.AsFpuRegister<FRegister>();
1144 Register r2_l = loc1.IsRegisterPair() ? loc1.AsRegisterPairLow<Register>()
1145 : loc2.AsRegisterPairLow<Register>();
1146 Register r2_h = loc1.IsRegisterPair() ? loc1.AsRegisterPairHigh<Register>()
1147 : loc2.AsRegisterPairHigh<Register>();
1148 // Use 2 temporary registers because we can't first swap the low 32 bits of an FPR and
1149 // then swap the high 32 bits of the same FPR. mtc1 makes the high 32 bits of an FPR
1150 // unpredictable and the following mfch1 will fail.
1151 __ Mfc1(TMP, f1);
1152 __ MoveFromFpuHigh(AT, f1);
1153 __ Mtc1(r2_l, f1);
1154 __ MoveToFpuHigh(r2_h, f1);
1155 __ Move(r2_l, TMP);
1156 __ Move(r2_h, AT);
1157 } else if (loc1.IsStackSlot() && loc2.IsStackSlot()) {
1158 Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ false);
1159 } else if (loc1.IsDoubleStackSlot() && loc2.IsDoubleStackSlot()) {
1160 Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ true);
1161 } else if ((loc1.IsRegister() && loc2.IsStackSlot()) ||
1162 (loc1.IsStackSlot() && loc2.IsRegister())) {
1163 Register reg = loc1.IsRegister() ? loc1.AsRegister<Register>() : loc2.AsRegister<Register>();
1164 intptr_t offset = loc1.IsStackSlot() ? loc1.GetStackIndex() : loc2.GetStackIndex();
1165 __ Move(TMP, reg);
1166 __ LoadFromOffset(kLoadWord, reg, SP, offset);
1167 __ StoreToOffset(kStoreWord, TMP, SP, offset);
1168 } else if ((loc1.IsRegisterPair() && loc2.IsDoubleStackSlot()) ||
1169 (loc1.IsDoubleStackSlot() && loc2.IsRegisterPair())) {
1170 Register reg_l = loc1.IsRegisterPair() ? loc1.AsRegisterPairLow<Register>()
1171 : loc2.AsRegisterPairLow<Register>();
1172 Register reg_h = loc1.IsRegisterPair() ? loc1.AsRegisterPairHigh<Register>()
1173 : loc2.AsRegisterPairHigh<Register>();
1174 intptr_t offset_l = loc1.IsDoubleStackSlot() ? loc1.GetStackIndex() : loc2.GetStackIndex();
1175 intptr_t offset_h = loc1.IsDoubleStackSlot() ? loc1.GetHighStackIndex(kMipsWordSize)
1176 : loc2.GetHighStackIndex(kMipsWordSize);
1177 __ Move(TMP, reg_l);
1178 __ LoadFromOffset(kLoadWord, reg_l, SP, offset_l);
1179 __ StoreToOffset(kStoreWord, TMP, SP, offset_l);
1180 __ Move(TMP, reg_h);
1181 __ LoadFromOffset(kLoadWord, reg_h, SP, offset_h);
1182 __ StoreToOffset(kStoreWord, TMP, SP, offset_h);
1183 } else if (loc1.IsFpuRegister() || loc2.IsFpuRegister()) {
1184 FRegister reg = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
1185 : loc2.AsFpuRegister<FRegister>();
1186 intptr_t offset = loc1.IsFpuRegister() ? loc2.GetStackIndex() : loc1.GetStackIndex();
1187 if (type == Primitive::kPrimFloat) {
1188 __ MovS(FTMP, reg);
1189 __ LoadSFromOffset(reg, SP, offset);
1190 __ StoreSToOffset(FTMP, SP, offset);
1191 } else {
1192 DCHECK_EQ(type, Primitive::kPrimDouble);
1193 __ MovD(FTMP, reg);
1194 __ LoadDFromOffset(reg, SP, offset);
1195 __ StoreDToOffset(FTMP, SP, offset);
1196 }
1197 } else {
1198 LOG(FATAL) << "Swap between " << loc1 << " and " << loc2 << " is unsupported";
1199 }
1200 }
1201
RestoreScratch(int reg)1202 void ParallelMoveResolverMIPS::RestoreScratch(int reg) {
1203 __ Pop(static_cast<Register>(reg));
1204 }
1205
SpillScratch(int reg)1206 void ParallelMoveResolverMIPS::SpillScratch(int reg) {
1207 __ Push(static_cast<Register>(reg));
1208 }
1209
Exchange(int index1,int index2,bool double_slot)1210 void ParallelMoveResolverMIPS::Exchange(int index1, int index2, bool double_slot) {
1211 // Allocate a scratch register other than TMP, if available.
1212 // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
1213 // automatically unspilled when the scratch scope object is destroyed).
1214 ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
1215 // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
1216 int stack_offset = ensure_scratch.IsSpilled() ? kMipsWordSize : 0;
1217 for (int i = 0; i <= (double_slot ? 1 : 0); i++, stack_offset += kMipsWordSize) {
1218 __ LoadFromOffset(kLoadWord,
1219 Register(ensure_scratch.GetRegister()),
1220 SP,
1221 index1 + stack_offset);
1222 __ LoadFromOffset(kLoadWord,
1223 TMP,
1224 SP,
1225 index2 + stack_offset);
1226 __ StoreToOffset(kStoreWord,
1227 Register(ensure_scratch.GetRegister()),
1228 SP,
1229 index2 + stack_offset);
1230 __ StoreToOffset(kStoreWord, TMP, SP, index1 + stack_offset);
1231 }
1232 }
1233
ComputeSpillMask()1234 void CodeGeneratorMIPS::ComputeSpillMask() {
1235 core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
1236 fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
1237 DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
1238 // If there're FPU callee-saved registers and there's an odd number of GPR callee-saved
1239 // registers, include the ZERO register to force alignment of FPU callee-saved registers
1240 // within the stack frame.
1241 if ((fpu_spill_mask_ != 0) && (POPCOUNT(core_spill_mask_) % 2 != 0)) {
1242 core_spill_mask_ |= (1 << ZERO);
1243 }
1244 }
1245
HasAllocatedCalleeSaveRegisters() const1246 bool CodeGeneratorMIPS::HasAllocatedCalleeSaveRegisters() const {
1247 // If RA is clobbered by PC-relative operations on R2 and it's the only spilled register
1248 // (this can happen in leaf methods), force CodeGenerator::InitializeCodeGeneration()
1249 // into the path that creates a stack frame so that RA can be explicitly saved and restored.
1250 // RA can't otherwise be saved/restored when it's the only spilled register.
1251 return CodeGenerator::HasAllocatedCalleeSaveRegisters() || clobbered_ra_;
1252 }
1253
DWARFReg(Register reg)1254 static dwarf::Reg DWARFReg(Register reg) {
1255 return dwarf::Reg::MipsCore(static_cast<int>(reg));
1256 }
1257
1258 // TODO: mapping of floating-point registers to DWARF.
1259
GenerateFrameEntry()1260 void CodeGeneratorMIPS::GenerateFrameEntry() {
1261 __ Bind(&frame_entry_label_);
1262
1263 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips) || !IsLeafMethod();
1264
1265 if (do_overflow_check) {
1266 __ LoadFromOffset(kLoadWord,
1267 ZERO,
1268 SP,
1269 -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips)));
1270 RecordPcInfo(nullptr, 0);
1271 }
1272
1273 if (HasEmptyFrame()) {
1274 CHECK_EQ(fpu_spill_mask_, 0u);
1275 CHECK_EQ(core_spill_mask_, 1u << RA);
1276 CHECK(!clobbered_ra_);
1277 return;
1278 }
1279
1280 // Make sure the frame size isn't unreasonably large.
1281 if (GetFrameSize() > GetStackOverflowReservedBytes(kMips)) {
1282 LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips) << " bytes";
1283 }
1284
1285 // Spill callee-saved registers.
1286
1287 uint32_t ofs = GetFrameSize();
1288 __ IncreaseFrameSize(ofs);
1289
1290 for (uint32_t mask = core_spill_mask_; mask != 0; ) {
1291 Register reg = static_cast<Register>(MostSignificantBit(mask));
1292 mask ^= 1u << reg;
1293 ofs -= kMipsWordSize;
1294 // The ZERO register is only included for alignment.
1295 if (reg != ZERO) {
1296 __ StoreToOffset(kStoreWord, reg, SP, ofs);
1297 __ cfi().RelOffset(DWARFReg(reg), ofs);
1298 }
1299 }
1300
1301 for (uint32_t mask = fpu_spill_mask_; mask != 0; ) {
1302 FRegister reg = static_cast<FRegister>(MostSignificantBit(mask));
1303 mask ^= 1u << reg;
1304 ofs -= kMipsDoublewordSize;
1305 __ StoreDToOffset(reg, SP, ofs);
1306 // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
1307 }
1308
1309 // Save the current method if we need it. Note that we do not
1310 // do this in HCurrentMethod, as the instruction might have been removed
1311 // in the SSA graph.
1312 if (RequiresCurrentMethod()) {
1313 __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
1314 }
1315
1316 if (GetGraph()->HasShouldDeoptimizeFlag()) {
1317 // Initialize should deoptimize flag to 0.
1318 __ StoreToOffset(kStoreWord, ZERO, SP, GetStackOffsetOfShouldDeoptimizeFlag());
1319 }
1320 }
1321
GenerateFrameExit()1322 void CodeGeneratorMIPS::GenerateFrameExit() {
1323 __ cfi().RememberState();
1324
1325 if (!HasEmptyFrame()) {
1326 // Restore callee-saved registers.
1327
1328 // For better instruction scheduling restore RA before other registers.
1329 uint32_t ofs = GetFrameSize();
1330 for (uint32_t mask = core_spill_mask_; mask != 0; ) {
1331 Register reg = static_cast<Register>(MostSignificantBit(mask));
1332 mask ^= 1u << reg;
1333 ofs -= kMipsWordSize;
1334 // The ZERO register is only included for alignment.
1335 if (reg != ZERO) {
1336 __ LoadFromOffset(kLoadWord, reg, SP, ofs);
1337 __ cfi().Restore(DWARFReg(reg));
1338 }
1339 }
1340
1341 for (uint32_t mask = fpu_spill_mask_; mask != 0; ) {
1342 FRegister reg = static_cast<FRegister>(MostSignificantBit(mask));
1343 mask ^= 1u << reg;
1344 ofs -= kMipsDoublewordSize;
1345 __ LoadDFromOffset(reg, SP, ofs);
1346 // TODO: __ cfi().Restore(DWARFReg(reg));
1347 }
1348
1349 size_t frame_size = GetFrameSize();
1350 // Adjust the stack pointer in the delay slot if doing so doesn't break CFI.
1351 bool exchange = IsInt<16>(static_cast<int32_t>(frame_size));
1352 bool reordering = __ SetReorder(false);
1353 if (exchange) {
1354 __ Jr(RA);
1355 __ DecreaseFrameSize(frame_size); // Single instruction in delay slot.
1356 } else {
1357 __ DecreaseFrameSize(frame_size);
1358 __ Jr(RA);
1359 __ Nop(); // In delay slot.
1360 }
1361 __ SetReorder(reordering);
1362 } else {
1363 __ Jr(RA);
1364 __ NopIfNoReordering();
1365 }
1366
1367 __ cfi().RestoreState();
1368 __ cfi().DefCFAOffset(GetFrameSize());
1369 }
1370
Bind(HBasicBlock * block)1371 void CodeGeneratorMIPS::Bind(HBasicBlock* block) {
1372 __ Bind(GetLabelOf(block));
1373 }
1374
MoveLocation(Location dst,Location src,Primitive::Type dst_type)1375 void CodeGeneratorMIPS::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
1376 if (src.Equals(dst)) {
1377 return;
1378 }
1379
1380 if (src.IsConstant()) {
1381 MoveConstant(dst, src.GetConstant());
1382 } else {
1383 if (Primitive::Is64BitType(dst_type)) {
1384 Move64(dst, src);
1385 } else {
1386 Move32(dst, src);
1387 }
1388 }
1389 }
1390
Move32(Location destination,Location source)1391 void CodeGeneratorMIPS::Move32(Location destination, Location source) {
1392 if (source.Equals(destination)) {
1393 return;
1394 }
1395
1396 if (destination.IsRegister()) {
1397 if (source.IsRegister()) {
1398 __ Move(destination.AsRegister<Register>(), source.AsRegister<Register>());
1399 } else if (source.IsFpuRegister()) {
1400 __ Mfc1(destination.AsRegister<Register>(), source.AsFpuRegister<FRegister>());
1401 } else {
1402 DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
1403 __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
1404 }
1405 } else if (destination.IsFpuRegister()) {
1406 if (source.IsRegister()) {
1407 __ Mtc1(source.AsRegister<Register>(), destination.AsFpuRegister<FRegister>());
1408 } else if (source.IsFpuRegister()) {
1409 __ MovS(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
1410 } else {
1411 DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
1412 __ LoadSFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
1413 }
1414 } else {
1415 DCHECK(destination.IsStackSlot()) << destination;
1416 if (source.IsRegister()) {
1417 __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
1418 } else if (source.IsFpuRegister()) {
1419 __ StoreSToOffset(source.AsFpuRegister<FRegister>(), SP, destination.GetStackIndex());
1420 } else {
1421 DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
1422 __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
1423 __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
1424 }
1425 }
1426 }
1427
Move64(Location destination,Location source)1428 void CodeGeneratorMIPS::Move64(Location destination, Location source) {
1429 if (source.Equals(destination)) {
1430 return;
1431 }
1432
1433 if (destination.IsRegisterPair()) {
1434 if (source.IsRegisterPair()) {
1435 __ Move(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
1436 __ Move(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
1437 } else if (source.IsFpuRegister()) {
1438 Register dst_high = destination.AsRegisterPairHigh<Register>();
1439 Register dst_low = destination.AsRegisterPairLow<Register>();
1440 FRegister src = source.AsFpuRegister<FRegister>();
1441 __ Mfc1(dst_low, src);
1442 __ MoveFromFpuHigh(dst_high, src);
1443 } else {
1444 DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
1445 int32_t off = source.GetStackIndex();
1446 Register r = destination.AsRegisterPairLow<Register>();
1447 __ LoadFromOffset(kLoadDoubleword, r, SP, off);
1448 }
1449 } else if (destination.IsFpuRegister()) {
1450 if (source.IsRegisterPair()) {
1451 FRegister dst = destination.AsFpuRegister<FRegister>();
1452 Register src_high = source.AsRegisterPairHigh<Register>();
1453 Register src_low = source.AsRegisterPairLow<Register>();
1454 __ Mtc1(src_low, dst);
1455 __ MoveToFpuHigh(src_high, dst);
1456 } else if (source.IsFpuRegister()) {
1457 __ MovD(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
1458 } else {
1459 DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
1460 __ LoadDFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
1461 }
1462 } else {
1463 DCHECK(destination.IsDoubleStackSlot()) << destination;
1464 int32_t off = destination.GetStackIndex();
1465 if (source.IsRegisterPair()) {
1466 __ StoreToOffset(kStoreDoubleword, source.AsRegisterPairLow<Register>(), SP, off);
1467 } else if (source.IsFpuRegister()) {
1468 __ StoreDToOffset(source.AsFpuRegister<FRegister>(), SP, off);
1469 } else {
1470 DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
1471 __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
1472 __ StoreToOffset(kStoreWord, TMP, SP, off);
1473 __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex() + 4);
1474 __ StoreToOffset(kStoreWord, TMP, SP, off + 4);
1475 }
1476 }
1477 }
1478
MoveConstant(Location destination,HConstant * c)1479 void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
1480 if (c->IsIntConstant() || c->IsNullConstant()) {
1481 // Move 32 bit constant.
1482 int32_t value = GetInt32ValueOf(c);
1483 if (destination.IsRegister()) {
1484 Register dst = destination.AsRegister<Register>();
1485 __ LoadConst32(dst, value);
1486 } else {
1487 DCHECK(destination.IsStackSlot())
1488 << "Cannot move " << c->DebugName() << " to " << destination;
1489 __ StoreConstToOffset(kStoreWord, value, SP, destination.GetStackIndex(), TMP);
1490 }
1491 } else if (c->IsLongConstant()) {
1492 // Move 64 bit constant.
1493 int64_t value = GetInt64ValueOf(c);
1494 if (destination.IsRegisterPair()) {
1495 Register r_h = destination.AsRegisterPairHigh<Register>();
1496 Register r_l = destination.AsRegisterPairLow<Register>();
1497 __ LoadConst64(r_h, r_l, value);
1498 } else {
1499 DCHECK(destination.IsDoubleStackSlot())
1500 << "Cannot move " << c->DebugName() << " to " << destination;
1501 __ StoreConstToOffset(kStoreDoubleword, value, SP, destination.GetStackIndex(), TMP);
1502 }
1503 } else if (c->IsFloatConstant()) {
1504 // Move 32 bit float constant.
1505 int32_t value = GetInt32ValueOf(c);
1506 if (destination.IsFpuRegister()) {
1507 __ LoadSConst32(destination.AsFpuRegister<FRegister>(), value, TMP);
1508 } else {
1509 DCHECK(destination.IsStackSlot())
1510 << "Cannot move " << c->DebugName() << " to " << destination;
1511 __ StoreConstToOffset(kStoreWord, value, SP, destination.GetStackIndex(), TMP);
1512 }
1513 } else {
1514 // Move 64 bit double constant.
1515 DCHECK(c->IsDoubleConstant()) << c->DebugName();
1516 int64_t value = GetInt64ValueOf(c);
1517 if (destination.IsFpuRegister()) {
1518 FRegister fd = destination.AsFpuRegister<FRegister>();
1519 __ LoadDConst64(fd, value, TMP);
1520 } else {
1521 DCHECK(destination.IsDoubleStackSlot())
1522 << "Cannot move " << c->DebugName() << " to " << destination;
1523 __ StoreConstToOffset(kStoreDoubleword, value, SP, destination.GetStackIndex(), TMP);
1524 }
1525 }
1526 }
1527
MoveConstant(Location destination,int32_t value)1528 void CodeGeneratorMIPS::MoveConstant(Location destination, int32_t value) {
1529 DCHECK(destination.IsRegister());
1530 Register dst = destination.AsRegister<Register>();
1531 __ LoadConst32(dst, value);
1532 }
1533
AddLocationAsTemp(Location location,LocationSummary * locations)1534 void CodeGeneratorMIPS::AddLocationAsTemp(Location location, LocationSummary* locations) {
1535 if (location.IsRegister()) {
1536 locations->AddTemp(location);
1537 } else if (location.IsRegisterPair()) {
1538 locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
1539 locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
1540 } else {
1541 UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
1542 }
1543 }
1544
1545 template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo> & infos,ArenaVector<LinkerPatch> * linker_patches)1546 inline void CodeGeneratorMIPS::EmitPcRelativeLinkerPatches(
1547 const ArenaDeque<PcRelativePatchInfo>& infos,
1548 ArenaVector<LinkerPatch>* linker_patches) {
1549 for (const PcRelativePatchInfo& info : infos) {
1550 const DexFile& dex_file = info.target_dex_file;
1551 size_t offset_or_index = info.offset_or_index;
1552 DCHECK(info.high_label.IsBound());
1553 uint32_t high_offset = __ GetLabelLocation(&info.high_label);
1554 // On R2 we use HMipsComputeBaseMethodAddress and patch relative to
1555 // the assembler's base label used for PC-relative addressing.
1556 uint32_t pc_rel_offset = info.pc_rel_label.IsBound()
1557 ? __ GetLabelLocation(&info.pc_rel_label)
1558 : __ GetPcRelBaseLabelLocation();
1559 linker_patches->push_back(Factory(high_offset, &dex_file, pc_rel_offset, offset_or_index));
1560 }
1561 }
1562
EmitLinkerPatches(ArenaVector<LinkerPatch> * linker_patches)1563 void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
1564 DCHECK(linker_patches->empty());
1565 size_t size =
1566 pc_relative_dex_cache_patches_.size() +
1567 pc_relative_string_patches_.size() +
1568 pc_relative_type_patches_.size() +
1569 type_bss_entry_patches_.size() +
1570 boot_image_string_patches_.size() +
1571 boot_image_type_patches_.size();
1572 linker_patches->reserve(size);
1573 EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
1574 linker_patches);
1575 if (!GetCompilerOptions().IsBootImage()) {
1576 DCHECK(pc_relative_type_patches_.empty());
1577 EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
1578 linker_patches);
1579 } else {
1580 EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
1581 linker_patches);
1582 EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
1583 linker_patches);
1584 }
1585 EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
1586 linker_patches);
1587 for (const auto& entry : boot_image_string_patches_) {
1588 const StringReference& target_string = entry.first;
1589 Literal* literal = entry.second;
1590 DCHECK(literal->GetLabel()->IsBound());
1591 uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
1592 linker_patches->push_back(LinkerPatch::StringPatch(literal_offset,
1593 target_string.dex_file,
1594 target_string.string_index.index_));
1595 }
1596 for (const auto& entry : boot_image_type_patches_) {
1597 const TypeReference& target_type = entry.first;
1598 Literal* literal = entry.second;
1599 DCHECK(literal->GetLabel()->IsBound());
1600 uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
1601 linker_patches->push_back(LinkerPatch::TypePatch(literal_offset,
1602 target_type.dex_file,
1603 target_type.type_index.index_));
1604 }
1605 DCHECK_EQ(size, linker_patches->size());
1606 }
1607
NewPcRelativeStringPatch(const DexFile & dex_file,dex::StringIndex string_index)1608 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeStringPatch(
1609 const DexFile& dex_file, dex::StringIndex string_index) {
1610 return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
1611 }
1612
NewPcRelativeTypePatch(const DexFile & dex_file,dex::TypeIndex type_index)1613 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeTypePatch(
1614 const DexFile& dex_file, dex::TypeIndex type_index) {
1615 return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
1616 }
1617
NewTypeBssEntryPatch(const DexFile & dex_file,dex::TypeIndex type_index)1618 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewTypeBssEntryPatch(
1619 const DexFile& dex_file, dex::TypeIndex type_index) {
1620 return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
1621 }
1622
NewPcRelativeDexCacheArrayPatch(const DexFile & dex_file,uint32_t element_offset)1623 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeDexCacheArrayPatch(
1624 const DexFile& dex_file, uint32_t element_offset) {
1625 return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
1626 }
1627
NewPcRelativePatch(const DexFile & dex_file,uint32_t offset_or_index,ArenaDeque<PcRelativePatchInfo> * patches)1628 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativePatch(
1629 const DexFile& dex_file, uint32_t offset_or_index, ArenaDeque<PcRelativePatchInfo>* patches) {
1630 patches->emplace_back(dex_file, offset_or_index);
1631 return &patches->back();
1632 }
1633
DeduplicateUint32Literal(uint32_t value,Uint32ToLiteralMap * map)1634 Literal* CodeGeneratorMIPS::DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map) {
1635 return map->GetOrCreate(
1636 value,
1637 [this, value]() { return __ NewLiteral<uint32_t>(value); });
1638 }
1639
DeduplicateMethodLiteral(MethodReference target_method,MethodToLiteralMap * map)1640 Literal* CodeGeneratorMIPS::DeduplicateMethodLiteral(MethodReference target_method,
1641 MethodToLiteralMap* map) {
1642 return map->GetOrCreate(
1643 target_method,
1644 [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
1645 }
1646
DeduplicateBootImageStringLiteral(const DexFile & dex_file,dex::StringIndex string_index)1647 Literal* CodeGeneratorMIPS::DeduplicateBootImageStringLiteral(const DexFile& dex_file,
1648 dex::StringIndex string_index) {
1649 return boot_image_string_patches_.GetOrCreate(
1650 StringReference(&dex_file, string_index),
1651 [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
1652 }
1653
DeduplicateBootImageTypeLiteral(const DexFile & dex_file,dex::TypeIndex type_index)1654 Literal* CodeGeneratorMIPS::DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
1655 dex::TypeIndex type_index) {
1656 return boot_image_type_patches_.GetOrCreate(
1657 TypeReference(&dex_file, type_index),
1658 [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
1659 }
1660
DeduplicateBootImageAddressLiteral(uint32_t address)1661 Literal* CodeGeneratorMIPS::DeduplicateBootImageAddressLiteral(uint32_t address) {
1662 return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
1663 }
1664
EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo * info,Register out,Register base)1665 void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info,
1666 Register out,
1667 Register base) {
1668 if (GetInstructionSetFeatures().IsR6()) {
1669 DCHECK_EQ(base, ZERO);
1670 __ Bind(&info->high_label);
1671 __ Bind(&info->pc_rel_label);
1672 // Add the high half of a 32-bit offset to PC.
1673 __ Auipc(out, /* placeholder */ 0x1234);
1674 } else {
1675 // If base is ZERO, emit NAL to obtain the actual base.
1676 if (base == ZERO) {
1677 // Generate a dummy PC-relative call to obtain PC.
1678 __ Nal();
1679 }
1680 __ Bind(&info->high_label);
1681 __ Lui(out, /* placeholder */ 0x1234);
1682 // If we emitted the NAL, bind the pc_rel_label, otherwise base is a register holding
1683 // the HMipsComputeBaseMethodAddress which has its own label stored in MipsAssembler.
1684 if (base == ZERO) {
1685 __ Bind(&info->pc_rel_label);
1686 }
1687 // Add the high half of a 32-bit offset to PC.
1688 __ Addu(out, out, (base == ZERO) ? RA : base);
1689 }
1690 // The immediately following instruction will add the sign-extended low half of the 32-bit
1691 // offset to `out` (e.g. lw, jialc, addiu).
1692 }
1693
NewJitRootStringPatch(const DexFile & dex_file,dex::StringIndex dex_index,Handle<mirror::String> handle)1694 CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
1695 const DexFile& dex_file,
1696 dex::StringIndex dex_index,
1697 Handle<mirror::String> handle) {
1698 jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index),
1699 reinterpret_cast64<uint64_t>(handle.GetReference()));
1700 jit_string_patches_.emplace_back(dex_file, dex_index.index_);
1701 return &jit_string_patches_.back();
1702 }
1703
NewJitRootClassPatch(const DexFile & dex_file,dex::TypeIndex dex_index,Handle<mirror::Class> handle)1704 CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootClassPatch(
1705 const DexFile& dex_file,
1706 dex::TypeIndex dex_index,
1707 Handle<mirror::Class> handle) {
1708 jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
1709 reinterpret_cast64<uint64_t>(handle.GetReference()));
1710 jit_class_patches_.emplace_back(dex_file, dex_index.index_);
1711 return &jit_class_patches_.back();
1712 }
1713
PatchJitRootUse(uint8_t * code,const uint8_t * roots_data,const CodeGeneratorMIPS::JitPatchInfo & info,uint64_t index_in_table) const1714 void CodeGeneratorMIPS::PatchJitRootUse(uint8_t* code,
1715 const uint8_t* roots_data,
1716 const CodeGeneratorMIPS::JitPatchInfo& info,
1717 uint64_t index_in_table) const {
1718 uint32_t literal_offset = GetAssembler().GetLabelLocation(&info.high_label);
1719 uintptr_t address =
1720 reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
1721 uint32_t addr32 = dchecked_integral_cast<uint32_t>(address);
1722 // lui reg, addr32_high
1723 DCHECK_EQ(code[literal_offset + 0], 0x34);
1724 DCHECK_EQ(code[literal_offset + 1], 0x12);
1725 DCHECK_EQ((code[literal_offset + 2] & 0xE0), 0x00);
1726 DCHECK_EQ(code[literal_offset + 3], 0x3C);
1727 // lw reg, reg, addr32_low
1728 DCHECK_EQ(code[literal_offset + 4], 0x78);
1729 DCHECK_EQ(code[literal_offset + 5], 0x56);
1730 DCHECK_EQ((code[literal_offset + 7] & 0xFC), 0x8C);
1731 addr32 += (addr32 & 0x8000) << 1; // Account for sign extension in "lw reg, reg, addr32_low".
1732 // lui reg, addr32_high
1733 code[literal_offset + 0] = static_cast<uint8_t>(addr32 >> 16);
1734 code[literal_offset + 1] = static_cast<uint8_t>(addr32 >> 24);
1735 // lw reg, reg, addr32_low
1736 code[literal_offset + 4] = static_cast<uint8_t>(addr32 >> 0);
1737 code[literal_offset + 5] = static_cast<uint8_t>(addr32 >> 8);
1738 }
1739
EmitJitRootPatches(uint8_t * code,const uint8_t * roots_data)1740 void CodeGeneratorMIPS::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
1741 for (const JitPatchInfo& info : jit_string_patches_) {
1742 const auto& it = jit_string_roots_.find(StringReference(&info.target_dex_file,
1743 dex::StringIndex(info.index)));
1744 DCHECK(it != jit_string_roots_.end());
1745 PatchJitRootUse(code, roots_data, info, it->second);
1746 }
1747 for (const JitPatchInfo& info : jit_class_patches_) {
1748 const auto& it = jit_class_roots_.find(TypeReference(&info.target_dex_file,
1749 dex::TypeIndex(info.index)));
1750 DCHECK(it != jit_class_roots_.end());
1751 PatchJitRootUse(code, roots_data, info, it->second);
1752 }
1753 }
1754
MarkGCCard(Register object,Register value,bool value_can_be_null)1755 void CodeGeneratorMIPS::MarkGCCard(Register object,
1756 Register value,
1757 bool value_can_be_null) {
1758 MipsLabel done;
1759 Register card = AT;
1760 Register temp = TMP;
1761 if (value_can_be_null) {
1762 __ Beqz(value, &done);
1763 }
1764 __ LoadFromOffset(kLoadWord,
1765 card,
1766 TR,
1767 Thread::CardTableOffset<kMipsPointerSize>().Int32Value());
1768 __ Srl(temp, object, gc::accounting::CardTable::kCardShift);
1769 __ Addu(temp, card, temp);
1770 __ Sb(card, temp, 0);
1771 if (value_can_be_null) {
1772 __ Bind(&done);
1773 }
1774 }
1775
SetupBlockedRegisters() const1776 void CodeGeneratorMIPS::SetupBlockedRegisters() const {
1777 // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
1778 blocked_core_registers_[ZERO] = true;
1779 blocked_core_registers_[K0] = true;
1780 blocked_core_registers_[K1] = true;
1781 blocked_core_registers_[GP] = true;
1782 blocked_core_registers_[SP] = true;
1783 blocked_core_registers_[RA] = true;
1784
1785 // AT and TMP(T8) are used as temporary/scratch registers
1786 // (similar to how AT is used by MIPS assemblers).
1787 blocked_core_registers_[AT] = true;
1788 blocked_core_registers_[TMP] = true;
1789 blocked_fpu_registers_[FTMP] = true;
1790
1791 // Reserve suspend and thread registers.
1792 blocked_core_registers_[S0] = true;
1793 blocked_core_registers_[TR] = true;
1794
1795 // Reserve T9 for function calls
1796 blocked_core_registers_[T9] = true;
1797
1798 // Reserve odd-numbered FPU registers.
1799 for (size_t i = 1; i < kNumberOfFRegisters; i += 2) {
1800 blocked_fpu_registers_[i] = true;
1801 }
1802
1803 if (GetGraph()->IsDebuggable()) {
1804 // Stubs do not save callee-save floating point registers. If the graph
1805 // is debuggable, we need to deal with these registers differently. For
1806 // now, just block them.
1807 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
1808 blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
1809 }
1810 }
1811 }
1812
SaveCoreRegister(size_t stack_index,uint32_t reg_id)1813 size_t CodeGeneratorMIPS::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
1814 __ StoreToOffset(kStoreWord, Register(reg_id), SP, stack_index);
1815 return kMipsWordSize;
1816 }
1817
RestoreCoreRegister(size_t stack_index,uint32_t reg_id)1818 size_t CodeGeneratorMIPS::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
1819 __ LoadFromOffset(kLoadWord, Register(reg_id), SP, stack_index);
1820 return kMipsWordSize;
1821 }
1822
SaveFloatingPointRegister(size_t stack_index,uint32_t reg_id)1823 size_t CodeGeneratorMIPS::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
1824 __ StoreDToOffset(FRegister(reg_id), SP, stack_index);
1825 return kMipsDoublewordSize;
1826 }
1827
RestoreFloatingPointRegister(size_t stack_index,uint32_t reg_id)1828 size_t CodeGeneratorMIPS::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
1829 __ LoadDFromOffset(FRegister(reg_id), SP, stack_index);
1830 return kMipsDoublewordSize;
1831 }
1832
DumpCoreRegister(std::ostream & stream,int reg) const1833 void CodeGeneratorMIPS::DumpCoreRegister(std::ostream& stream, int reg) const {
1834 stream << Register(reg);
1835 }
1836
DumpFloatingPointRegister(std::ostream & stream,int reg) const1837 void CodeGeneratorMIPS::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
1838 stream << FRegister(reg);
1839 }
1840
1841 constexpr size_t kMipsDirectEntrypointRuntimeOffset = 16;
1842
InvokeRuntime(QuickEntrypointEnum entrypoint,HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)1843 void CodeGeneratorMIPS::InvokeRuntime(QuickEntrypointEnum entrypoint,
1844 HInstruction* instruction,
1845 uint32_t dex_pc,
1846 SlowPathCode* slow_path) {
1847 ValidateInvokeRuntime(entrypoint, instruction, slow_path);
1848 GenerateInvokeRuntime(GetThreadOffset<kMipsPointerSize>(entrypoint).Int32Value(),
1849 IsDirectEntrypoint(entrypoint));
1850 if (EntrypointRequiresStackMap(entrypoint)) {
1851 RecordPcInfo(instruction, dex_pc, slow_path);
1852 }
1853 }
1854
InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,HInstruction * instruction,SlowPathCode * slow_path,bool direct)1855 void CodeGeneratorMIPS::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
1856 HInstruction* instruction,
1857 SlowPathCode* slow_path,
1858 bool direct) {
1859 ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
1860 GenerateInvokeRuntime(entry_point_offset, direct);
1861 }
1862
GenerateInvokeRuntime(int32_t entry_point_offset,bool direct)1863 void CodeGeneratorMIPS::GenerateInvokeRuntime(int32_t entry_point_offset, bool direct) {
1864 bool reordering = __ SetReorder(false);
1865 __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
1866 __ Jalr(T9);
1867 if (direct) {
1868 // Reserve argument space on stack (for $a0-$a3) for
1869 // entrypoints that directly reference native implementations.
1870 // Called function may use this space to store $a0-$a3 regs.
1871 __ IncreaseFrameSize(kMipsDirectEntrypointRuntimeOffset); // Single instruction in delay slot.
1872 __ DecreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);
1873 } else {
1874 __ Nop(); // In delay slot.
1875 }
1876 __ SetReorder(reordering);
1877 }
1878
GenerateClassInitializationCheck(SlowPathCodeMIPS * slow_path,Register class_reg)1879 void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
1880 Register class_reg) {
1881 __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
1882 __ LoadConst32(AT, mirror::Class::kStatusInitialized);
1883 __ Blt(TMP, AT, slow_path->GetEntryLabel());
1884 // Even if the initialized flag is set, we need to ensure consistent memory ordering.
1885 __ Sync(0);
1886 __ Bind(slow_path->GetExitLabel());
1887 }
1888
GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED)1889 void InstructionCodeGeneratorMIPS::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
1890 __ Sync(0); // Only stype 0 is supported.
1891 }
1892
GenerateSuspendCheck(HSuspendCheck * instruction,HBasicBlock * successor)1893 void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
1894 HBasicBlock* successor) {
1895 SuspendCheckSlowPathMIPS* slow_path =
1896 new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS(instruction, successor);
1897 codegen_->AddSlowPath(slow_path);
1898
1899 __ LoadFromOffset(kLoadUnsignedHalfword,
1900 TMP,
1901 TR,
1902 Thread::ThreadFlagsOffset<kMipsPointerSize>().Int32Value());
1903 if (successor == nullptr) {
1904 __ Bnez(TMP, slow_path->GetEntryLabel());
1905 __ Bind(slow_path->GetReturnLabel());
1906 } else {
1907 __ Beqz(TMP, codegen_->GetLabelOf(successor));
1908 __ B(slow_path->GetEntryLabel());
1909 // slow_path will return to GetLabelOf(successor).
1910 }
1911 }
1912
InstructionCodeGeneratorMIPS(HGraph * graph,CodeGeneratorMIPS * codegen)1913 InstructionCodeGeneratorMIPS::InstructionCodeGeneratorMIPS(HGraph* graph,
1914 CodeGeneratorMIPS* codegen)
1915 : InstructionCodeGenerator(graph, codegen),
1916 assembler_(codegen->GetAssembler()),
1917 codegen_(codegen) {}
1918
HandleBinaryOp(HBinaryOperation * instruction)1919 void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
1920 DCHECK_EQ(instruction->InputCount(), 2U);
1921 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1922 Primitive::Type type = instruction->GetResultType();
1923 switch (type) {
1924 case Primitive::kPrimInt: {
1925 locations->SetInAt(0, Location::RequiresRegister());
1926 HInstruction* right = instruction->InputAt(1);
1927 bool can_use_imm = false;
1928 if (right->IsConstant()) {
1929 int32_t imm = CodeGenerator::GetInt32ValueOf(right->AsConstant());
1930 if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
1931 can_use_imm = IsUint<16>(imm);
1932 } else if (instruction->IsAdd()) {
1933 can_use_imm = IsInt<16>(imm);
1934 } else {
1935 DCHECK(instruction->IsSub());
1936 can_use_imm = IsInt<16>(-imm);
1937 }
1938 }
1939 if (can_use_imm)
1940 locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
1941 else
1942 locations->SetInAt(1, Location::RequiresRegister());
1943 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1944 break;
1945 }
1946
1947 case Primitive::kPrimLong: {
1948 locations->SetInAt(0, Location::RequiresRegister());
1949 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1950 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1951 break;
1952 }
1953
1954 case Primitive::kPrimFloat:
1955 case Primitive::kPrimDouble:
1956 DCHECK(instruction->IsAdd() || instruction->IsSub());
1957 locations->SetInAt(0, Location::RequiresFpuRegister());
1958 locations->SetInAt(1, Location::RequiresFpuRegister());
1959 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1960 break;
1961
1962 default:
1963 LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
1964 }
1965 }
1966
HandleBinaryOp(HBinaryOperation * instruction)1967 void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
1968 Primitive::Type type = instruction->GetType();
1969 LocationSummary* locations = instruction->GetLocations();
1970
1971 switch (type) {
1972 case Primitive::kPrimInt: {
1973 Register dst = locations->Out().AsRegister<Register>();
1974 Register lhs = locations->InAt(0).AsRegister<Register>();
1975 Location rhs_location = locations->InAt(1);
1976
1977 Register rhs_reg = ZERO;
1978 int32_t rhs_imm = 0;
1979 bool use_imm = rhs_location.IsConstant();
1980 if (use_imm) {
1981 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
1982 } else {
1983 rhs_reg = rhs_location.AsRegister<Register>();
1984 }
1985
1986 if (instruction->IsAnd()) {
1987 if (use_imm)
1988 __ Andi(dst, lhs, rhs_imm);
1989 else
1990 __ And(dst, lhs, rhs_reg);
1991 } else if (instruction->IsOr()) {
1992 if (use_imm)
1993 __ Ori(dst, lhs, rhs_imm);
1994 else
1995 __ Or(dst, lhs, rhs_reg);
1996 } else if (instruction->IsXor()) {
1997 if (use_imm)
1998 __ Xori(dst, lhs, rhs_imm);
1999 else
2000 __ Xor(dst, lhs, rhs_reg);
2001 } else if (instruction->IsAdd()) {
2002 if (use_imm)
2003 __ Addiu(dst, lhs, rhs_imm);
2004 else
2005 __ Addu(dst, lhs, rhs_reg);
2006 } else {
2007 DCHECK(instruction->IsSub());
2008 if (use_imm)
2009 __ Addiu(dst, lhs, -rhs_imm);
2010 else
2011 __ Subu(dst, lhs, rhs_reg);
2012 }
2013 break;
2014 }
2015
2016 case Primitive::kPrimLong: {
2017 Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
2018 Register dst_low = locations->Out().AsRegisterPairLow<Register>();
2019 Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
2020 Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
2021 Location rhs_location = locations->InAt(1);
2022 bool use_imm = rhs_location.IsConstant();
2023 if (!use_imm) {
2024 Register rhs_high = rhs_location.AsRegisterPairHigh<Register>();
2025 Register rhs_low = rhs_location.AsRegisterPairLow<Register>();
2026 if (instruction->IsAnd()) {
2027 __ And(dst_low, lhs_low, rhs_low);
2028 __ And(dst_high, lhs_high, rhs_high);
2029 } else if (instruction->IsOr()) {
2030 __ Or(dst_low, lhs_low, rhs_low);
2031 __ Or(dst_high, lhs_high, rhs_high);
2032 } else if (instruction->IsXor()) {
2033 __ Xor(dst_low, lhs_low, rhs_low);
2034 __ Xor(dst_high, lhs_high, rhs_high);
2035 } else if (instruction->IsAdd()) {
2036 if (lhs_low == rhs_low) {
2037 // Special case for lhs = rhs and the sum potentially overwriting both lhs and rhs.
2038 __ Slt(TMP, lhs_low, ZERO);
2039 __ Addu(dst_low, lhs_low, rhs_low);
2040 } else {
2041 __ Addu(dst_low, lhs_low, rhs_low);
2042 // If the sum overwrites rhs, lhs remains unchanged, otherwise rhs remains unchanged.
2043 __ Sltu(TMP, dst_low, (dst_low == rhs_low) ? lhs_low : rhs_low);
2044 }
2045 __ Addu(dst_high, lhs_high, rhs_high);
2046 __ Addu(dst_high, dst_high, TMP);
2047 } else {
2048 DCHECK(instruction->IsSub());
2049 __ Sltu(TMP, lhs_low, rhs_low);
2050 __ Subu(dst_low, lhs_low, rhs_low);
2051 __ Subu(dst_high, lhs_high, rhs_high);
2052 __ Subu(dst_high, dst_high, TMP);
2053 }
2054 } else {
2055 int64_t value = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()->AsConstant());
2056 if (instruction->IsOr()) {
2057 uint32_t low = Low32Bits(value);
2058 uint32_t high = High32Bits(value);
2059 if (IsUint<16>(low)) {
2060 if (dst_low != lhs_low || low != 0) {
2061 __ Ori(dst_low, lhs_low, low);
2062 }
2063 } else {
2064 __ LoadConst32(TMP, low);
2065 __ Or(dst_low, lhs_low, TMP);
2066 }
2067 if (IsUint<16>(high)) {
2068 if (dst_high != lhs_high || high != 0) {
2069 __ Ori(dst_high, lhs_high, high);
2070 }
2071 } else {
2072 if (high != low) {
2073 __ LoadConst32(TMP, high);
2074 }
2075 __ Or(dst_high, lhs_high, TMP);
2076 }
2077 } else if (instruction->IsXor()) {
2078 uint32_t low = Low32Bits(value);
2079 uint32_t high = High32Bits(value);
2080 if (IsUint<16>(low)) {
2081 if (dst_low != lhs_low || low != 0) {
2082 __ Xori(dst_low, lhs_low, low);
2083 }
2084 } else {
2085 __ LoadConst32(TMP, low);
2086 __ Xor(dst_low, lhs_low, TMP);
2087 }
2088 if (IsUint<16>(high)) {
2089 if (dst_high != lhs_high || high != 0) {
2090 __ Xori(dst_high, lhs_high, high);
2091 }
2092 } else {
2093 if (high != low) {
2094 __ LoadConst32(TMP, high);
2095 }
2096 __ Xor(dst_high, lhs_high, TMP);
2097 }
2098 } else if (instruction->IsAnd()) {
2099 uint32_t low = Low32Bits(value);
2100 uint32_t high = High32Bits(value);
2101 if (IsUint<16>(low)) {
2102 __ Andi(dst_low, lhs_low, low);
2103 } else if (low != 0xFFFFFFFF) {
2104 __ LoadConst32(TMP, low);
2105 __ And(dst_low, lhs_low, TMP);
2106 } else if (dst_low != lhs_low) {
2107 __ Move(dst_low, lhs_low);
2108 }
2109 if (IsUint<16>(high)) {
2110 __ Andi(dst_high, lhs_high, high);
2111 } else if (high != 0xFFFFFFFF) {
2112 if (high != low) {
2113 __ LoadConst32(TMP, high);
2114 }
2115 __ And(dst_high, lhs_high, TMP);
2116 } else if (dst_high != lhs_high) {
2117 __ Move(dst_high, lhs_high);
2118 }
2119 } else {
2120 if (instruction->IsSub()) {
2121 value = -value;
2122 } else {
2123 DCHECK(instruction->IsAdd());
2124 }
2125 int32_t low = Low32Bits(value);
2126 int32_t high = High32Bits(value);
2127 if (IsInt<16>(low)) {
2128 if (dst_low != lhs_low || low != 0) {
2129 __ Addiu(dst_low, lhs_low, low);
2130 }
2131 if (low != 0) {
2132 __ Sltiu(AT, dst_low, low);
2133 }
2134 } else {
2135 __ LoadConst32(TMP, low);
2136 __ Addu(dst_low, lhs_low, TMP);
2137 __ Sltu(AT, dst_low, TMP);
2138 }
2139 if (IsInt<16>(high)) {
2140 if (dst_high != lhs_high || high != 0) {
2141 __ Addiu(dst_high, lhs_high, high);
2142 }
2143 } else {
2144 if (high != low) {
2145 __ LoadConst32(TMP, high);
2146 }
2147 __ Addu(dst_high, lhs_high, TMP);
2148 }
2149 if (low != 0) {
2150 __ Addu(dst_high, dst_high, AT);
2151 }
2152 }
2153 }
2154 break;
2155 }
2156
2157 case Primitive::kPrimFloat:
2158 case Primitive::kPrimDouble: {
2159 FRegister dst = locations->Out().AsFpuRegister<FRegister>();
2160 FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
2161 FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
2162 if (instruction->IsAdd()) {
2163 if (type == Primitive::kPrimFloat) {
2164 __ AddS(dst, lhs, rhs);
2165 } else {
2166 __ AddD(dst, lhs, rhs);
2167 }
2168 } else {
2169 DCHECK(instruction->IsSub());
2170 if (type == Primitive::kPrimFloat) {
2171 __ SubS(dst, lhs, rhs);
2172 } else {
2173 __ SubD(dst, lhs, rhs);
2174 }
2175 }
2176 break;
2177 }
2178
2179 default:
2180 LOG(FATAL) << "Unexpected binary operation type " << type;
2181 }
2182 }
2183
HandleShift(HBinaryOperation * instr)2184 void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
2185 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
2186
2187 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
2188 Primitive::Type type = instr->GetResultType();
2189 switch (type) {
2190 case Primitive::kPrimInt:
2191 locations->SetInAt(0, Location::RequiresRegister());
2192 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
2193 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2194 break;
2195 case Primitive::kPrimLong:
2196 locations->SetInAt(0, Location::RequiresRegister());
2197 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
2198 locations->SetOut(Location::RequiresRegister());
2199 break;
2200 default:
2201 LOG(FATAL) << "Unexpected shift type " << type;
2202 }
2203 }
2204
2205 static constexpr size_t kMipsBitsPerWord = kMipsWordSize * kBitsPerByte;
2206
HandleShift(HBinaryOperation * instr)2207 void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
2208 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
2209 LocationSummary* locations = instr->GetLocations();
2210 Primitive::Type type = instr->GetType();
2211
2212 Location rhs_location = locations->InAt(1);
2213 bool use_imm = rhs_location.IsConstant();
2214 Register rhs_reg = use_imm ? ZERO : rhs_location.AsRegister<Register>();
2215 int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0;
2216 const uint32_t shift_mask =
2217 (type == Primitive::kPrimInt) ? kMaxIntShiftDistance : kMaxLongShiftDistance;
2218 const uint32_t shift_value = rhs_imm & shift_mask;
2219 // Are the INS (Insert Bit Field) and ROTR instructions supported?
2220 bool has_ins_rotr = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
2221
2222 switch (type) {
2223 case Primitive::kPrimInt: {
2224 Register dst = locations->Out().AsRegister<Register>();
2225 Register lhs = locations->InAt(0).AsRegister<Register>();
2226 if (use_imm) {
2227 if (shift_value == 0) {
2228 if (dst != lhs) {
2229 __ Move(dst, lhs);
2230 }
2231 } else if (instr->IsShl()) {
2232 __ Sll(dst, lhs, shift_value);
2233 } else if (instr->IsShr()) {
2234 __ Sra(dst, lhs, shift_value);
2235 } else if (instr->IsUShr()) {
2236 __ Srl(dst, lhs, shift_value);
2237 } else {
2238 if (has_ins_rotr) {
2239 __ Rotr(dst, lhs, shift_value);
2240 } else {
2241 __ Sll(TMP, lhs, (kMipsBitsPerWord - shift_value) & shift_mask);
2242 __ Srl(dst, lhs, shift_value);
2243 __ Or(dst, dst, TMP);
2244 }
2245 }
2246 } else {
2247 if (instr->IsShl()) {
2248 __ Sllv(dst, lhs, rhs_reg);
2249 } else if (instr->IsShr()) {
2250 __ Srav(dst, lhs, rhs_reg);
2251 } else if (instr->IsUShr()) {
2252 __ Srlv(dst, lhs, rhs_reg);
2253 } else {
2254 if (has_ins_rotr) {
2255 __ Rotrv(dst, lhs, rhs_reg);
2256 } else {
2257 __ Subu(TMP, ZERO, rhs_reg);
2258 // 32-bit shift instructions use the 5 least significant bits of the shift count, so
2259 // shifting by `-rhs_reg` is equivalent to shifting by `(32 - rhs_reg) & 31`. The case
2260 // when `rhs_reg & 31 == 0` is OK even though we don't shift `lhs` left all the way out
2261 // by 32, because the result in this case is computed as `(lhs >> 0) | (lhs << 0)`,
2262 // IOW, the OR'd values are equal.
2263 __ Sllv(TMP, lhs, TMP);
2264 __ Srlv(dst, lhs, rhs_reg);
2265 __ Or(dst, dst, TMP);
2266 }
2267 }
2268 }
2269 break;
2270 }
2271
2272 case Primitive::kPrimLong: {
2273 Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
2274 Register dst_low = locations->Out().AsRegisterPairLow<Register>();
2275 Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
2276 Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
2277 if (use_imm) {
2278 if (shift_value == 0) {
2279 codegen_->Move64(locations->Out(), locations->InAt(0));
2280 } else if (shift_value < kMipsBitsPerWord) {
2281 if (has_ins_rotr) {
2282 if (instr->IsShl()) {
2283 __ Srl(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
2284 __ Ins(dst_high, lhs_high, shift_value, kMipsBitsPerWord - shift_value);
2285 __ Sll(dst_low, lhs_low, shift_value);
2286 } else if (instr->IsShr()) {
2287 __ Srl(dst_low, lhs_low, shift_value);
2288 __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
2289 __ Sra(dst_high, lhs_high, shift_value);
2290 } else if (instr->IsUShr()) {
2291 __ Srl(dst_low, lhs_low, shift_value);
2292 __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
2293 __ Srl(dst_high, lhs_high, shift_value);
2294 } else {
2295 __ Srl(dst_low, lhs_low, shift_value);
2296 __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
2297 __ Srl(dst_high, lhs_high, shift_value);
2298 __ Ins(dst_high, lhs_low, kMipsBitsPerWord - shift_value, shift_value);
2299 }
2300 } else {
2301 if (instr->IsShl()) {
2302 __ Sll(dst_low, lhs_low, shift_value);
2303 __ Srl(TMP, lhs_low, kMipsBitsPerWord - shift_value);
2304 __ Sll(dst_high, lhs_high, shift_value);
2305 __ Or(dst_high, dst_high, TMP);
2306 } else if (instr->IsShr()) {
2307 __ Sra(dst_high, lhs_high, shift_value);
2308 __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
2309 __ Srl(dst_low, lhs_low, shift_value);
2310 __ Or(dst_low, dst_low, TMP);
2311 } else if (instr->IsUShr()) {
2312 __ Srl(dst_high, lhs_high, shift_value);
2313 __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
2314 __ Srl(dst_low, lhs_low, shift_value);
2315 __ Or(dst_low, dst_low, TMP);
2316 } else {
2317 __ Srl(TMP, lhs_low, shift_value);
2318 __ Sll(dst_low, lhs_high, kMipsBitsPerWord - shift_value);
2319 __ Or(dst_low, dst_low, TMP);
2320 __ Srl(TMP, lhs_high, shift_value);
2321 __ Sll(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
2322 __ Or(dst_high, dst_high, TMP);
2323 }
2324 }
2325 } else {
2326 const uint32_t shift_value_high = shift_value - kMipsBitsPerWord;
2327 if (instr->IsShl()) {
2328 __ Sll(dst_high, lhs_low, shift_value_high);
2329 __ Move(dst_low, ZERO);
2330 } else if (instr->IsShr()) {
2331 __ Sra(dst_low, lhs_high, shift_value_high);
2332 __ Sra(dst_high, dst_low, kMipsBitsPerWord - 1);
2333 } else if (instr->IsUShr()) {
2334 __ Srl(dst_low, lhs_high, shift_value_high);
2335 __ Move(dst_high, ZERO);
2336 } else {
2337 if (shift_value == kMipsBitsPerWord) {
2338 // 64-bit rotation by 32 is just a swap.
2339 __ Move(dst_low, lhs_high);
2340 __ Move(dst_high, lhs_low);
2341 } else {
2342 if (has_ins_rotr) {
2343 __ Srl(dst_low, lhs_high, shift_value_high);
2344 __ Ins(dst_low, lhs_low, kMipsBitsPerWord - shift_value_high, shift_value_high);
2345 __ Srl(dst_high, lhs_low, shift_value_high);
2346 __ Ins(dst_high, lhs_high, kMipsBitsPerWord - shift_value_high, shift_value_high);
2347 } else {
2348 __ Sll(TMP, lhs_low, kMipsBitsPerWord - shift_value_high);
2349 __ Srl(dst_low, lhs_high, shift_value_high);
2350 __ Or(dst_low, dst_low, TMP);
2351 __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value_high);
2352 __ Srl(dst_high, lhs_low, shift_value_high);
2353 __ Or(dst_high, dst_high, TMP);
2354 }
2355 }
2356 }
2357 }
2358 } else {
2359 MipsLabel done;
2360 if (instr->IsShl()) {
2361 __ Sllv(dst_low, lhs_low, rhs_reg);
2362 __ Nor(AT, ZERO, rhs_reg);
2363 __ Srl(TMP, lhs_low, 1);
2364 __ Srlv(TMP, TMP, AT);
2365 __ Sllv(dst_high, lhs_high, rhs_reg);
2366 __ Or(dst_high, dst_high, TMP);
2367 __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
2368 __ Beqz(TMP, &done);
2369 __ Move(dst_high, dst_low);
2370 __ Move(dst_low, ZERO);
2371 } else if (instr->IsShr()) {
2372 __ Srav(dst_high, lhs_high, rhs_reg);
2373 __ Nor(AT, ZERO, rhs_reg);
2374 __ Sll(TMP, lhs_high, 1);
2375 __ Sllv(TMP, TMP, AT);
2376 __ Srlv(dst_low, lhs_low, rhs_reg);
2377 __ Or(dst_low, dst_low, TMP);
2378 __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
2379 __ Beqz(TMP, &done);
2380 __ Move(dst_low, dst_high);
2381 __ Sra(dst_high, dst_high, 31);
2382 } else if (instr->IsUShr()) {
2383 __ Srlv(dst_high, lhs_high, rhs_reg);
2384 __ Nor(AT, ZERO, rhs_reg);
2385 __ Sll(TMP, lhs_high, 1);
2386 __ Sllv(TMP, TMP, AT);
2387 __ Srlv(dst_low, lhs_low, rhs_reg);
2388 __ Or(dst_low, dst_low, TMP);
2389 __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
2390 __ Beqz(TMP, &done);
2391 __ Move(dst_low, dst_high);
2392 __ Move(dst_high, ZERO);
2393 } else {
2394 __ Nor(AT, ZERO, rhs_reg);
2395 __ Srlv(TMP, lhs_low, rhs_reg);
2396 __ Sll(dst_low, lhs_high, 1);
2397 __ Sllv(dst_low, dst_low, AT);
2398 __ Or(dst_low, dst_low, TMP);
2399 __ Srlv(TMP, lhs_high, rhs_reg);
2400 __ Sll(dst_high, lhs_low, 1);
2401 __ Sllv(dst_high, dst_high, AT);
2402 __ Or(dst_high, dst_high, TMP);
2403 __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
2404 __ Beqz(TMP, &done);
2405 __ Move(TMP, dst_high);
2406 __ Move(dst_high, dst_low);
2407 __ Move(dst_low, TMP);
2408 }
2409 __ Bind(&done);
2410 }
2411 break;
2412 }
2413
2414 default:
2415 LOG(FATAL) << "Unexpected shift operation type " << type;
2416 }
2417 }
2418
VisitAdd(HAdd * instruction)2419 void LocationsBuilderMIPS::VisitAdd(HAdd* instruction) {
2420 HandleBinaryOp(instruction);
2421 }
2422
VisitAdd(HAdd * instruction)2423 void InstructionCodeGeneratorMIPS::VisitAdd(HAdd* instruction) {
2424 HandleBinaryOp(instruction);
2425 }
2426
VisitAnd(HAnd * instruction)2427 void LocationsBuilderMIPS::VisitAnd(HAnd* instruction) {
2428 HandleBinaryOp(instruction);
2429 }
2430
VisitAnd(HAnd * instruction)2431 void InstructionCodeGeneratorMIPS::VisitAnd(HAnd* instruction) {
2432 HandleBinaryOp(instruction);
2433 }
2434
VisitArrayGet(HArrayGet * instruction)2435 void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) {
2436 Primitive::Type type = instruction->GetType();
2437 bool object_array_get_with_read_barrier =
2438 kEmitCompilerReadBarrier && (type == Primitive::kPrimNot);
2439 LocationSummary* locations =
2440 new (GetGraph()->GetArena()) LocationSummary(instruction,
2441 object_array_get_with_read_barrier
2442 ? LocationSummary::kCallOnSlowPath
2443 : LocationSummary::kNoCall);
2444 locations->SetInAt(0, Location::RequiresRegister());
2445 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
2446 if (Primitive::IsFloatingPointType(type)) {
2447 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2448 } else {
2449 // The output overlaps in the case of an object array get with
2450 // read barriers enabled: we do not want the move to overwrite the
2451 // array's location, as we need it to emit the read barrier.
2452 locations->SetOut(Location::RequiresRegister(),
2453 object_array_get_with_read_barrier
2454 ? Location::kOutputOverlap
2455 : Location::kNoOutputOverlap);
2456 }
2457 // We need a temporary register for the read barrier marking slow
2458 // path in CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier.
2459 if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
2460 locations->AddTemp(Location::RequiresRegister());
2461 }
2462 }
2463
GetImplicitNullChecker(HInstruction * instruction,CodeGeneratorMIPS * codegen)2464 static auto GetImplicitNullChecker(HInstruction* instruction, CodeGeneratorMIPS* codegen) {
2465 auto null_checker = [codegen, instruction]() {
2466 codegen->MaybeRecordImplicitNullCheck(instruction);
2467 };
2468 return null_checker;
2469 }
2470
VisitArrayGet(HArrayGet * instruction)2471 void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
2472 LocationSummary* locations = instruction->GetLocations();
2473 Location obj_loc = locations->InAt(0);
2474 Register obj = obj_loc.AsRegister<Register>();
2475 Location out_loc = locations->Out();
2476 Location index = locations->InAt(1);
2477 uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
2478 auto null_checker = GetImplicitNullChecker(instruction, codegen_);
2479
2480 Primitive::Type type = instruction->GetType();
2481 const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
2482 instruction->IsStringCharAt();
2483 switch (type) {
2484 case Primitive::kPrimBoolean: {
2485 Register out = out_loc.AsRegister<Register>();
2486 if (index.IsConstant()) {
2487 size_t offset =
2488 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
2489 __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset, null_checker);
2490 } else {
2491 __ Addu(TMP, obj, index.AsRegister<Register>());
2492 __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset, null_checker);
2493 }
2494 break;
2495 }
2496
2497 case Primitive::kPrimByte: {
2498 Register out = out_loc.AsRegister<Register>();
2499 if (index.IsConstant()) {
2500 size_t offset =
2501 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
2502 __ LoadFromOffset(kLoadSignedByte, out, obj, offset, null_checker);
2503 } else {
2504 __ Addu(TMP, obj, index.AsRegister<Register>());
2505 __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset, null_checker);
2506 }
2507 break;
2508 }
2509
2510 case Primitive::kPrimShort: {
2511 Register out = out_loc.AsRegister<Register>();
2512 if (index.IsConstant()) {
2513 size_t offset =
2514 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
2515 __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset, null_checker);
2516 } else {
2517 __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_2, TMP);
2518 __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
2519 }
2520 break;
2521 }
2522
2523 case Primitive::kPrimChar: {
2524 Register out = out_loc.AsRegister<Register>();
2525 if (maybe_compressed_char_at) {
2526 uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
2527 __ LoadFromOffset(kLoadWord, TMP, obj, count_offset, null_checker);
2528 __ Sll(TMP, TMP, 31); // Extract compression flag into the most significant bit of TMP.
2529 static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
2530 "Expecting 0=compressed, 1=uncompressed");
2531 }
2532 if (index.IsConstant()) {
2533 int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
2534 if (maybe_compressed_char_at) {
2535 MipsLabel uncompressed_load, done;
2536 __ Bnez(TMP, &uncompressed_load);
2537 __ LoadFromOffset(kLoadUnsignedByte,
2538 out,
2539 obj,
2540 data_offset + (const_index << TIMES_1));
2541 __ B(&done);
2542 __ Bind(&uncompressed_load);
2543 __ LoadFromOffset(kLoadUnsignedHalfword,
2544 out,
2545 obj,
2546 data_offset + (const_index << TIMES_2));
2547 __ Bind(&done);
2548 } else {
2549 __ LoadFromOffset(kLoadUnsignedHalfword,
2550 out,
2551 obj,
2552 data_offset + (const_index << TIMES_2),
2553 null_checker);
2554 }
2555 } else {
2556 Register index_reg = index.AsRegister<Register>();
2557 if (maybe_compressed_char_at) {
2558 MipsLabel uncompressed_load, done;
2559 __ Bnez(TMP, &uncompressed_load);
2560 __ Addu(TMP, obj, index_reg);
2561 __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
2562 __ B(&done);
2563 __ Bind(&uncompressed_load);
2564 __ ShiftAndAdd(TMP, index_reg, obj, TIMES_2, TMP);
2565 __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
2566 __ Bind(&done);
2567 } else {
2568 __ ShiftAndAdd(TMP, index_reg, obj, TIMES_2, TMP);
2569 __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
2570 }
2571 }
2572 break;
2573 }
2574
2575 case Primitive::kPrimInt: {
2576 DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
2577 Register out = out_loc.AsRegister<Register>();
2578 if (index.IsConstant()) {
2579 size_t offset =
2580 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
2581 __ LoadFromOffset(kLoadWord, out, obj, offset, null_checker);
2582 } else {
2583 __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
2584 __ LoadFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
2585 }
2586 break;
2587 }
2588
2589 case Primitive::kPrimNot: {
2590 static_assert(
2591 sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
2592 "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
2593 // /* HeapReference<Object> */ out =
2594 // *(obj + data_offset + index * sizeof(HeapReference<Object>))
2595 if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
2596 Location temp = locations->GetTemp(0);
2597 // Note that a potential implicit null check is handled in this
2598 // CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier call.
2599 codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
2600 out_loc,
2601 obj,
2602 data_offset,
2603 index,
2604 temp,
2605 /* needs_null_check */ true);
2606 } else {
2607 Register out = out_loc.AsRegister<Register>();
2608 if (index.IsConstant()) {
2609 size_t offset =
2610 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
2611 __ LoadFromOffset(kLoadWord, out, obj, offset, null_checker);
2612 // If read barriers are enabled, emit read barriers other than
2613 // Baker's using a slow path (and also unpoison the loaded
2614 // reference, if heap poisoning is enabled).
2615 codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
2616 } else {
2617 __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
2618 __ LoadFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
2619 // If read barriers are enabled, emit read barriers other than
2620 // Baker's using a slow path (and also unpoison the loaded
2621 // reference, if heap poisoning is enabled).
2622 codegen_->MaybeGenerateReadBarrierSlow(instruction,
2623 out_loc,
2624 out_loc,
2625 obj_loc,
2626 data_offset,
2627 index);
2628 }
2629 }
2630 break;
2631 }
2632
2633 case Primitive::kPrimLong: {
2634 Register out = out_loc.AsRegisterPairLow<Register>();
2635 if (index.IsConstant()) {
2636 size_t offset =
2637 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
2638 __ LoadFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
2639 } else {
2640 __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_8, TMP);
2641 __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
2642 }
2643 break;
2644 }
2645
2646 case Primitive::kPrimFloat: {
2647 FRegister out = out_loc.AsFpuRegister<FRegister>();
2648 if (index.IsConstant()) {
2649 size_t offset =
2650 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
2651 __ LoadSFromOffset(out, obj, offset, null_checker);
2652 } else {
2653 __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
2654 __ LoadSFromOffset(out, TMP, data_offset, null_checker);
2655 }
2656 break;
2657 }
2658
2659 case Primitive::kPrimDouble: {
2660 FRegister out = out_loc.AsFpuRegister<FRegister>();
2661 if (index.IsConstant()) {
2662 size_t offset =
2663 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
2664 __ LoadDFromOffset(out, obj, offset, null_checker);
2665 } else {
2666 __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_8, TMP);
2667 __ LoadDFromOffset(out, TMP, data_offset, null_checker);
2668 }
2669 break;
2670 }
2671
2672 case Primitive::kPrimVoid:
2673 LOG(FATAL) << "Unreachable type " << instruction->GetType();
2674 UNREACHABLE();
2675 }
2676 }
2677
VisitArrayLength(HArrayLength * instruction)2678 void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
2679 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2680 locations->SetInAt(0, Location::RequiresRegister());
2681 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2682 }
2683
VisitArrayLength(HArrayLength * instruction)2684 void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) {
2685 LocationSummary* locations = instruction->GetLocations();
2686 uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
2687 Register obj = locations->InAt(0).AsRegister<Register>();
2688 Register out = locations->Out().AsRegister<Register>();
2689 __ LoadFromOffset(kLoadWord, out, obj, offset);
2690 codegen_->MaybeRecordImplicitNullCheck(instruction);
2691 // Mask out compression flag from String's array length.
2692 if (mirror::kUseStringCompression && instruction->IsStringLength()) {
2693 __ Srl(out, out, 1u);
2694 }
2695 }
2696
RegisterOrZeroConstant(HInstruction * instruction)2697 Location LocationsBuilderMIPS::RegisterOrZeroConstant(HInstruction* instruction) {
2698 return (instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern())
2699 ? Location::ConstantLocation(instruction->AsConstant())
2700 : Location::RequiresRegister();
2701 }
2702
FpuRegisterOrConstantForStore(HInstruction * instruction)2703 Location LocationsBuilderMIPS::FpuRegisterOrConstantForStore(HInstruction* instruction) {
2704 // We can store 0.0 directly (from the ZERO register) without loading it into an FPU register.
2705 // We can store a non-zero float or double constant without first loading it into the FPU,
2706 // but we should only prefer this if the constant has a single use.
2707 if (instruction->IsConstant() &&
2708 (instruction->AsConstant()->IsZeroBitPattern() ||
2709 instruction->GetUses().HasExactlyOneElement())) {
2710 return Location::ConstantLocation(instruction->AsConstant());
2711 // Otherwise fall through and require an FPU register for the constant.
2712 }
2713 return Location::RequiresFpuRegister();
2714 }
2715
VisitArraySet(HArraySet * instruction)2716 void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
2717 Primitive::Type value_type = instruction->GetComponentType();
2718
2719 bool needs_write_barrier =
2720 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
2721 bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
2722
2723 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
2724 instruction,
2725 may_need_runtime_call_for_type_check ?
2726 LocationSummary::kCallOnSlowPath :
2727 LocationSummary::kNoCall);
2728
2729 locations->SetInAt(0, Location::RequiresRegister());
2730 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
2731 if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
2732 locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2)));
2733 } else {
2734 locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2)));
2735 }
2736 if (needs_write_barrier) {
2737 // Temporary register for the write barrier.
2738 locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
2739 }
2740 }
2741
VisitArraySet(HArraySet * instruction)2742 void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
2743 LocationSummary* locations = instruction->GetLocations();
2744 Register obj = locations->InAt(0).AsRegister<Register>();
2745 Location index = locations->InAt(1);
2746 Location value_location = locations->InAt(2);
2747 Primitive::Type value_type = instruction->GetComponentType();
2748 bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
2749 bool needs_write_barrier =
2750 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
2751 auto null_checker = GetImplicitNullChecker(instruction, codegen_);
2752 Register base_reg = index.IsConstant() ? obj : TMP;
2753
2754 switch (value_type) {
2755 case Primitive::kPrimBoolean:
2756 case Primitive::kPrimByte: {
2757 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
2758 if (index.IsConstant()) {
2759 data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1;
2760 } else {
2761 __ Addu(base_reg, obj, index.AsRegister<Register>());
2762 }
2763 if (value_location.IsConstant()) {
2764 int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
2765 __ StoreConstToOffset(kStoreByte, value, base_reg, data_offset, TMP, null_checker);
2766 } else {
2767 Register value = value_location.AsRegister<Register>();
2768 __ StoreToOffset(kStoreByte, value, base_reg, data_offset, null_checker);
2769 }
2770 break;
2771 }
2772
2773 case Primitive::kPrimShort:
2774 case Primitive::kPrimChar: {
2775 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
2776 if (index.IsConstant()) {
2777 data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2;
2778 } else {
2779 __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_2, base_reg);
2780 }
2781 if (value_location.IsConstant()) {
2782 int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
2783 __ StoreConstToOffset(kStoreHalfword, value, base_reg, data_offset, TMP, null_checker);
2784 } else {
2785 Register value = value_location.AsRegister<Register>();
2786 __ StoreToOffset(kStoreHalfword, value, base_reg, data_offset, null_checker);
2787 }
2788 break;
2789 }
2790
2791 case Primitive::kPrimInt: {
2792 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
2793 if (index.IsConstant()) {
2794 data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
2795 } else {
2796 __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
2797 }
2798 if (value_location.IsConstant()) {
2799 int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
2800 __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
2801 } else {
2802 Register value = value_location.AsRegister<Register>();
2803 __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
2804 }
2805 break;
2806 }
2807
2808 case Primitive::kPrimNot: {
2809 if (value_location.IsConstant()) {
2810 // Just setting null.
2811 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
2812 if (index.IsConstant()) {
2813 data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
2814 } else {
2815 __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
2816 }
2817 int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
2818 DCHECK_EQ(value, 0);
2819 __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
2820 DCHECK(!needs_write_barrier);
2821 DCHECK(!may_need_runtime_call_for_type_check);
2822 break;
2823 }
2824
2825 DCHECK(needs_write_barrier);
2826 Register value = value_location.AsRegister<Register>();
2827 Register temp1 = locations->GetTemp(0).AsRegister<Register>();
2828 Register temp2 = TMP; // Doesn't need to survive slow path.
2829 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2830 uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
2831 uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
2832 MipsLabel done;
2833 SlowPathCodeMIPS* slow_path = nullptr;
2834
2835 if (may_need_runtime_call_for_type_check) {
2836 slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathMIPS(instruction);
2837 codegen_->AddSlowPath(slow_path);
2838 if (instruction->GetValueCanBeNull()) {
2839 MipsLabel non_zero;
2840 __ Bnez(value, &non_zero);
2841 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
2842 if (index.IsConstant()) {
2843 data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
2844 } else {
2845 __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
2846 }
2847 __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
2848 __ B(&done);
2849 __ Bind(&non_zero);
2850 }
2851
2852 // Note that when read barriers are enabled, the type checks
2853 // are performed without read barriers. This is fine, even in
2854 // the case where a class object is in the from-space after
2855 // the flip, as a comparison involving such a type would not
2856 // produce a false positive; it may of course produce a false
2857 // negative, in which case we would take the ArraySet slow
2858 // path.
2859
2860 // /* HeapReference<Class> */ temp1 = obj->klass_
2861 __ LoadFromOffset(kLoadWord, temp1, obj, class_offset, null_checker);
2862 __ MaybeUnpoisonHeapReference(temp1);
2863
2864 // /* HeapReference<Class> */ temp1 = temp1->component_type_
2865 __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
2866 // /* HeapReference<Class> */ temp2 = value->klass_
2867 __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
2868 // If heap poisoning is enabled, no need to unpoison `temp1`
2869 // nor `temp2`, as we are comparing two poisoned references.
2870
2871 if (instruction->StaticTypeOfArrayIsObjectArray()) {
2872 MipsLabel do_put;
2873 __ Beq(temp1, temp2, &do_put);
2874 // If heap poisoning is enabled, the `temp1` reference has
2875 // not been unpoisoned yet; unpoison it now.
2876 __ MaybeUnpoisonHeapReference(temp1);
2877
2878 // /* HeapReference<Class> */ temp1 = temp1->super_class_
2879 __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
2880 // If heap poisoning is enabled, no need to unpoison
2881 // `temp1`, as we are comparing against null below.
2882 __ Bnez(temp1, slow_path->GetEntryLabel());
2883 __ Bind(&do_put);
2884 } else {
2885 __ Bne(temp1, temp2, slow_path->GetEntryLabel());
2886 }
2887 }
2888
2889 Register source = value;
2890 if (kPoisonHeapReferences) {
2891 // Note that in the case where `value` is a null reference,
2892 // we do not enter this block, as a null reference does not
2893 // need poisoning.
2894 __ Move(temp1, value);
2895 __ PoisonHeapReference(temp1);
2896 source = temp1;
2897 }
2898
2899 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
2900 if (index.IsConstant()) {
2901 data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
2902 } else {
2903 __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
2904 }
2905 __ StoreToOffset(kStoreWord, source, base_reg, data_offset);
2906
2907 if (!may_need_runtime_call_for_type_check) {
2908 codegen_->MaybeRecordImplicitNullCheck(instruction);
2909 }
2910
2911 codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
2912
2913 if (done.IsLinked()) {
2914 __ Bind(&done);
2915 }
2916
2917 if (slow_path != nullptr) {
2918 __ Bind(slow_path->GetExitLabel());
2919 }
2920 break;
2921 }
2922
2923 case Primitive::kPrimLong: {
2924 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
2925 if (index.IsConstant()) {
2926 data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
2927 } else {
2928 __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_8, base_reg);
2929 }
2930 if (value_location.IsConstant()) {
2931 int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
2932 __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
2933 } else {
2934 Register value = value_location.AsRegisterPairLow<Register>();
2935 __ StoreToOffset(kStoreDoubleword, value, base_reg, data_offset, null_checker);
2936 }
2937 break;
2938 }
2939
2940 case Primitive::kPrimFloat: {
2941 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
2942 if (index.IsConstant()) {
2943 data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
2944 } else {
2945 __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
2946 }
2947 if (value_location.IsConstant()) {
2948 int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
2949 __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
2950 } else {
2951 FRegister value = value_location.AsFpuRegister<FRegister>();
2952 __ StoreSToOffset(value, base_reg, data_offset, null_checker);
2953 }
2954 break;
2955 }
2956
2957 case Primitive::kPrimDouble: {
2958 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
2959 if (index.IsConstant()) {
2960 data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
2961 } else {
2962 __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_8, base_reg);
2963 }
2964 if (value_location.IsConstant()) {
2965 int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
2966 __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
2967 } else {
2968 FRegister value = value_location.AsFpuRegister<FRegister>();
2969 __ StoreDToOffset(value, base_reg, data_offset, null_checker);
2970 }
2971 break;
2972 }
2973
2974 case Primitive::kPrimVoid:
2975 LOG(FATAL) << "Unreachable type " << instruction->GetType();
2976 UNREACHABLE();
2977 }
2978 }
2979
VisitBoundsCheck(HBoundsCheck * instruction)2980 void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
2981 RegisterSet caller_saves = RegisterSet::Empty();
2982 InvokeRuntimeCallingConvention calling_convention;
2983 caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2984 caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2985 LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
2986 locations->SetInAt(0, Location::RequiresRegister());
2987 locations->SetInAt(1, Location::RequiresRegister());
2988 }
2989
VisitBoundsCheck(HBoundsCheck * instruction)2990 void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
2991 LocationSummary* locations = instruction->GetLocations();
2992 BoundsCheckSlowPathMIPS* slow_path =
2993 new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS(instruction);
2994 codegen_->AddSlowPath(slow_path);
2995
2996 Register index = locations->InAt(0).AsRegister<Register>();
2997 Register length = locations->InAt(1).AsRegister<Register>();
2998
2999 // length is limited by the maximum positive signed 32-bit integer.
3000 // Unsigned comparison of length and index checks for index < 0
3001 // and for length <= index simultaneously.
3002 __ Bgeu(index, length, slow_path->GetEntryLabel());
3003 }
3004
3005 // Temp is used for read barrier.
NumberOfInstanceOfTemps(TypeCheckKind type_check_kind)3006 static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
3007 if (kEmitCompilerReadBarrier &&
3008 (kUseBakerReadBarrier ||
3009 type_check_kind == TypeCheckKind::kAbstractClassCheck ||
3010 type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
3011 type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
3012 return 1;
3013 }
3014 return 0;
3015 }
3016
3017 // Extra temp is used for read barrier.
NumberOfCheckCastTemps(TypeCheckKind type_check_kind)3018 static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
3019 return 1 + NumberOfInstanceOfTemps(type_check_kind);
3020 }
3021
VisitCheckCast(HCheckCast * instruction)3022 void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
3023 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
3024 bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
3025
3026 TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
3027 switch (type_check_kind) {
3028 case TypeCheckKind::kExactCheck:
3029 case TypeCheckKind::kAbstractClassCheck:
3030 case TypeCheckKind::kClassHierarchyCheck:
3031 case TypeCheckKind::kArrayObjectCheck:
3032 call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
3033 ? LocationSummary::kCallOnSlowPath
3034 : LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
3035 break;
3036 case TypeCheckKind::kArrayCheck:
3037 case TypeCheckKind::kUnresolvedCheck:
3038 case TypeCheckKind::kInterfaceCheck:
3039 call_kind = LocationSummary::kCallOnSlowPath;
3040 break;
3041 }
3042
3043 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
3044 locations->SetInAt(0, Location::RequiresRegister());
3045 locations->SetInAt(1, Location::RequiresRegister());
3046 locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
3047 }
3048
VisitCheckCast(HCheckCast * instruction)3049 void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
3050 TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
3051 LocationSummary* locations = instruction->GetLocations();
3052 Location obj_loc = locations->InAt(0);
3053 Register obj = obj_loc.AsRegister<Register>();
3054 Register cls = locations->InAt(1).AsRegister<Register>();
3055 Location temp_loc = locations->GetTemp(0);
3056 Register temp = temp_loc.AsRegister<Register>();
3057 const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
3058 DCHECK_LE(num_temps, 2u);
3059 Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
3060 const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
3061 const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
3062 const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
3063 const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
3064 const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value();
3065 const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value();
3066 const uint32_t object_array_data_offset =
3067 mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
3068 MipsLabel done;
3069
3070 // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
3071 // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
3072 // read barriers is done for performance and code size reasons.
3073 bool is_type_check_slow_path_fatal = false;
3074 if (!kEmitCompilerReadBarrier) {
3075 is_type_check_slow_path_fatal =
3076 (type_check_kind == TypeCheckKind::kExactCheck ||
3077 type_check_kind == TypeCheckKind::kAbstractClassCheck ||
3078 type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
3079 type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
3080 !instruction->CanThrowIntoCatchBlock();
3081 }
3082 SlowPathCodeMIPS* slow_path =
3083 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
3084 is_type_check_slow_path_fatal);
3085 codegen_->AddSlowPath(slow_path);
3086
3087 // Avoid this check if we know `obj` is not null.
3088 if (instruction->MustDoNullCheck()) {
3089 __ Beqz(obj, &done);
3090 }
3091
3092 switch (type_check_kind) {
3093 case TypeCheckKind::kExactCheck:
3094 case TypeCheckKind::kArrayCheck: {
3095 // /* HeapReference<Class> */ temp = obj->klass_
3096 GenerateReferenceLoadTwoRegisters(instruction,
3097 temp_loc,
3098 obj_loc,
3099 class_offset,
3100 maybe_temp2_loc,
3101 kWithoutReadBarrier);
3102 // Jump to slow path for throwing the exception or doing a
3103 // more involved array check.
3104 __ Bne(temp, cls, slow_path->GetEntryLabel());
3105 break;
3106 }
3107
3108 case TypeCheckKind::kAbstractClassCheck: {
3109 // /* HeapReference<Class> */ temp = obj->klass_
3110 GenerateReferenceLoadTwoRegisters(instruction,
3111 temp_loc,
3112 obj_loc,
3113 class_offset,
3114 maybe_temp2_loc,
3115 kWithoutReadBarrier);
3116 // If the class is abstract, we eagerly fetch the super class of the
3117 // object to avoid doing a comparison we know will fail.
3118 MipsLabel loop;
3119 __ Bind(&loop);
3120 // /* HeapReference<Class> */ temp = temp->super_class_
3121 GenerateReferenceLoadOneRegister(instruction,
3122 temp_loc,
3123 super_offset,
3124 maybe_temp2_loc,
3125 kWithoutReadBarrier);
3126 // If the class reference currently in `temp` is null, jump to the slow path to throw the
3127 // exception.
3128 __ Beqz(temp, slow_path->GetEntryLabel());
3129 // Otherwise, compare the classes.
3130 __ Bne(temp, cls, &loop);
3131 break;
3132 }
3133
3134 case TypeCheckKind::kClassHierarchyCheck: {
3135 // /* HeapReference<Class> */ temp = obj->klass_
3136 GenerateReferenceLoadTwoRegisters(instruction,
3137 temp_loc,
3138 obj_loc,
3139 class_offset,
3140 maybe_temp2_loc,
3141 kWithoutReadBarrier);
3142 // Walk over the class hierarchy to find a match.
3143 MipsLabel loop;
3144 __ Bind(&loop);
3145 __ Beq(temp, cls, &done);
3146 // /* HeapReference<Class> */ temp = temp->super_class_
3147 GenerateReferenceLoadOneRegister(instruction,
3148 temp_loc,
3149 super_offset,
3150 maybe_temp2_loc,
3151 kWithoutReadBarrier);
3152 // If the class reference currently in `temp` is null, jump to the slow path to throw the
3153 // exception. Otherwise, jump to the beginning of the loop.
3154 __ Bnez(temp, &loop);
3155 __ B(slow_path->GetEntryLabel());
3156 break;
3157 }
3158
3159 case TypeCheckKind::kArrayObjectCheck: {
3160 // /* HeapReference<Class> */ temp = obj->klass_
3161 GenerateReferenceLoadTwoRegisters(instruction,
3162 temp_loc,
3163 obj_loc,
3164 class_offset,
3165 maybe_temp2_loc,
3166 kWithoutReadBarrier);
3167 // Do an exact check.
3168 __ Beq(temp, cls, &done);
3169 // Otherwise, we need to check that the object's class is a non-primitive array.
3170 // /* HeapReference<Class> */ temp = temp->component_type_
3171 GenerateReferenceLoadOneRegister(instruction,
3172 temp_loc,
3173 component_offset,
3174 maybe_temp2_loc,
3175 kWithoutReadBarrier);
3176 // If the component type is null, jump to the slow path to throw the exception.
3177 __ Beqz(temp, slow_path->GetEntryLabel());
3178 // Otherwise, the object is indeed an array, further check that this component
3179 // type is not a primitive type.
3180 __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
3181 static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
3182 __ Bnez(temp, slow_path->GetEntryLabel());
3183 break;
3184 }
3185
3186 case TypeCheckKind::kUnresolvedCheck:
3187 // We always go into the type check slow path for the unresolved check case.
3188 // We cannot directly call the CheckCast runtime entry point
3189 // without resorting to a type checking slow path here (i.e. by
3190 // calling InvokeRuntime directly), as it would require to
3191 // assign fixed registers for the inputs of this HInstanceOf
3192 // instruction (following the runtime calling convention), which
3193 // might be cluttered by the potential first read barrier
3194 // emission at the beginning of this method.
3195 __ B(slow_path->GetEntryLabel());
3196 break;
3197
3198 case TypeCheckKind::kInterfaceCheck: {
3199 // Avoid read barriers to improve performance of the fast path. We can not get false
3200 // positives by doing this.
3201 // /* HeapReference<Class> */ temp = obj->klass_
3202 GenerateReferenceLoadTwoRegisters(instruction,
3203 temp_loc,
3204 obj_loc,
3205 class_offset,
3206 maybe_temp2_loc,
3207 kWithoutReadBarrier);
3208 // /* HeapReference<Class> */ temp = temp->iftable_
3209 GenerateReferenceLoadTwoRegisters(instruction,
3210 temp_loc,
3211 temp_loc,
3212 iftable_offset,
3213 maybe_temp2_loc,
3214 kWithoutReadBarrier);
3215 // Iftable is never null.
3216 __ Lw(TMP, temp, array_length_offset);
3217 // Loop through the iftable and check if any class matches.
3218 MipsLabel loop;
3219 __ Bind(&loop);
3220 __ Addiu(temp, temp, 2 * kHeapReferenceSize); // Possibly in delay slot on R2.
3221 __ Beqz(TMP, slow_path->GetEntryLabel());
3222 __ Lw(AT, temp, object_array_data_offset - 2 * kHeapReferenceSize);
3223 __ MaybeUnpoisonHeapReference(AT);
3224 // Go to next interface.
3225 __ Addiu(TMP, TMP, -2);
3226 // Compare the classes and continue the loop if they do not match.
3227 __ Bne(AT, cls, &loop);
3228 break;
3229 }
3230 }
3231
3232 __ Bind(&done);
3233 __ Bind(slow_path->GetExitLabel());
3234 }
3235
VisitClinitCheck(HClinitCheck * check)3236 void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
3237 LocationSummary* locations =
3238 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
3239 locations->SetInAt(0, Location::RequiresRegister());
3240 if (check->HasUses()) {
3241 locations->SetOut(Location::SameAsFirstInput());
3242 }
3243 }
3244
VisitClinitCheck(HClinitCheck * check)3245 void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
3246 // We assume the class is not null.
3247 SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
3248 check->GetLoadClass(),
3249 check,
3250 check->GetDexPc(),
3251 true);
3252 codegen_->AddSlowPath(slow_path);
3253 GenerateClassInitializationCheck(slow_path,
3254 check->GetLocations()->InAt(0).AsRegister<Register>());
3255 }
3256
VisitCompare(HCompare * compare)3257 void LocationsBuilderMIPS::VisitCompare(HCompare* compare) {
3258 Primitive::Type in_type = compare->InputAt(0)->GetType();
3259
3260 LocationSummary* locations =
3261 new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
3262
3263 switch (in_type) {
3264 case Primitive::kPrimBoolean:
3265 case Primitive::kPrimByte:
3266 case Primitive::kPrimShort:
3267 case Primitive::kPrimChar:
3268 case Primitive::kPrimInt:
3269 locations->SetInAt(0, Location::RequiresRegister());
3270 locations->SetInAt(1, Location::RequiresRegister());
3271 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3272 break;
3273
3274 case Primitive::kPrimLong:
3275 locations->SetInAt(0, Location::RequiresRegister());
3276 locations->SetInAt(1, Location::RequiresRegister());
3277 // Output overlaps because it is written before doing the low comparison.
3278 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
3279 break;
3280
3281 case Primitive::kPrimFloat:
3282 case Primitive::kPrimDouble:
3283 locations->SetInAt(0, Location::RequiresFpuRegister());
3284 locations->SetInAt(1, Location::RequiresFpuRegister());
3285 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3286 break;
3287
3288 default:
3289 LOG(FATAL) << "Unexpected type for compare operation " << in_type;
3290 }
3291 }
3292
VisitCompare(HCompare * instruction)3293 void InstructionCodeGeneratorMIPS::VisitCompare(HCompare* instruction) {
3294 LocationSummary* locations = instruction->GetLocations();
3295 Register res = locations->Out().AsRegister<Register>();
3296 Primitive::Type in_type = instruction->InputAt(0)->GetType();
3297 bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
3298
3299 // 0 if: left == right
3300 // 1 if: left > right
3301 // -1 if: left < right
3302 switch (in_type) {
3303 case Primitive::kPrimBoolean:
3304 case Primitive::kPrimByte:
3305 case Primitive::kPrimShort:
3306 case Primitive::kPrimChar:
3307 case Primitive::kPrimInt: {
3308 Register lhs = locations->InAt(0).AsRegister<Register>();
3309 Register rhs = locations->InAt(1).AsRegister<Register>();
3310 __ Slt(TMP, lhs, rhs);
3311 __ Slt(res, rhs, lhs);
3312 __ Subu(res, res, TMP);
3313 break;
3314 }
3315 case Primitive::kPrimLong: {
3316 MipsLabel done;
3317 Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
3318 Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
3319 Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
3320 Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
3321 // TODO: more efficient (direct) comparison with a constant.
3322 __ Slt(TMP, lhs_high, rhs_high);
3323 __ Slt(AT, rhs_high, lhs_high); // Inverted: is actually gt.
3324 __ Subu(res, AT, TMP); // Result -1:1:0 for [ <, >, == ].
3325 __ Bnez(res, &done); // If we compared ==, check if lower bits are also equal.
3326 __ Sltu(TMP, lhs_low, rhs_low);
3327 __ Sltu(AT, rhs_low, lhs_low); // Inverted: is actually gt.
3328 __ Subu(res, AT, TMP); // Result -1:1:0 for [ <, >, == ].
3329 __ Bind(&done);
3330 break;
3331 }
3332
3333 case Primitive::kPrimFloat: {
3334 bool gt_bias = instruction->IsGtBias();
3335 FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
3336 FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
3337 MipsLabel done;
3338 if (isR6) {
3339 __ CmpEqS(FTMP, lhs, rhs);
3340 __ LoadConst32(res, 0);
3341 __ Bc1nez(FTMP, &done);
3342 if (gt_bias) {
3343 __ CmpLtS(FTMP, lhs, rhs);
3344 __ LoadConst32(res, -1);
3345 __ Bc1nez(FTMP, &done);
3346 __ LoadConst32(res, 1);
3347 } else {
3348 __ CmpLtS(FTMP, rhs, lhs);
3349 __ LoadConst32(res, 1);
3350 __ Bc1nez(FTMP, &done);
3351 __ LoadConst32(res, -1);
3352 }
3353 } else {
3354 if (gt_bias) {
3355 __ ColtS(0, lhs, rhs);
3356 __ LoadConst32(res, -1);
3357 __ Bc1t(0, &done);
3358 __ CeqS(0, lhs, rhs);
3359 __ LoadConst32(res, 1);
3360 __ Movt(res, ZERO, 0);
3361 } else {
3362 __ ColtS(0, rhs, lhs);
3363 __ LoadConst32(res, 1);
3364 __ Bc1t(0, &done);
3365 __ CeqS(0, lhs, rhs);
3366 __ LoadConst32(res, -1);
3367 __ Movt(res, ZERO, 0);
3368 }
3369 }
3370 __ Bind(&done);
3371 break;
3372 }
3373 case Primitive::kPrimDouble: {
3374 bool gt_bias = instruction->IsGtBias();
3375 FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
3376 FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
3377 MipsLabel done;
3378 if (isR6) {
3379 __ CmpEqD(FTMP, lhs, rhs);
3380 __ LoadConst32(res, 0);
3381 __ Bc1nez(FTMP, &done);
3382 if (gt_bias) {
3383 __ CmpLtD(FTMP, lhs, rhs);
3384 __ LoadConst32(res, -1);
3385 __ Bc1nez(FTMP, &done);
3386 __ LoadConst32(res, 1);
3387 } else {
3388 __ CmpLtD(FTMP, rhs, lhs);
3389 __ LoadConst32(res, 1);
3390 __ Bc1nez(FTMP, &done);
3391 __ LoadConst32(res, -1);
3392 }
3393 } else {
3394 if (gt_bias) {
3395 __ ColtD(0, lhs, rhs);
3396 __ LoadConst32(res, -1);
3397 __ Bc1t(0, &done);
3398 __ CeqD(0, lhs, rhs);
3399 __ LoadConst32(res, 1);
3400 __ Movt(res, ZERO, 0);
3401 } else {
3402 __ ColtD(0, rhs, lhs);
3403 __ LoadConst32(res, 1);
3404 __ Bc1t(0, &done);
3405 __ CeqD(0, lhs, rhs);
3406 __ LoadConst32(res, -1);
3407 __ Movt(res, ZERO, 0);
3408 }
3409 }
3410 __ Bind(&done);
3411 break;
3412 }
3413
3414 default:
3415 LOG(FATAL) << "Unimplemented compare type " << in_type;
3416 }
3417 }
3418
HandleCondition(HCondition * instruction)3419 void LocationsBuilderMIPS::HandleCondition(HCondition* instruction) {
3420 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3421 switch (instruction->InputAt(0)->GetType()) {
3422 default:
3423 case Primitive::kPrimLong:
3424 locations->SetInAt(0, Location::RequiresRegister());
3425 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3426 break;
3427
3428 case Primitive::kPrimFloat:
3429 case Primitive::kPrimDouble:
3430 locations->SetInAt(0, Location::RequiresFpuRegister());
3431 locations->SetInAt(1, Location::RequiresFpuRegister());
3432 break;
3433 }
3434 if (!instruction->IsEmittedAtUseSite()) {
3435 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3436 }
3437 }
3438
HandleCondition(HCondition * instruction)3439 void InstructionCodeGeneratorMIPS::HandleCondition(HCondition* instruction) {
3440 if (instruction->IsEmittedAtUseSite()) {
3441 return;
3442 }
3443
3444 Primitive::Type type = instruction->InputAt(0)->GetType();
3445 LocationSummary* locations = instruction->GetLocations();
3446 Register dst = locations->Out().AsRegister<Register>();
3447 MipsLabel true_label;
3448
3449 switch (type) {
3450 default:
3451 // Integer case.
3452 GenerateIntCompare(instruction->GetCondition(), locations);
3453 return;
3454
3455 case Primitive::kPrimLong:
3456 // TODO: don't use branches.
3457 GenerateLongCompareAndBranch(instruction->GetCondition(), locations, &true_label);
3458 break;
3459
3460 case Primitive::kPrimFloat:
3461 case Primitive::kPrimDouble:
3462 GenerateFpCompare(instruction->GetCondition(), instruction->IsGtBias(), type, locations);
3463 return;
3464 }
3465
3466 // Convert the branches into the result.
3467 MipsLabel done;
3468
3469 // False case: result = 0.
3470 __ LoadConst32(dst, 0);
3471 __ B(&done);
3472
3473 // True case: result = 1.
3474 __ Bind(&true_label);
3475 __ LoadConst32(dst, 1);
3476 __ Bind(&done);
3477 }
3478
DivRemOneOrMinusOne(HBinaryOperation * instruction)3479 void InstructionCodeGeneratorMIPS::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
3480 DCHECK(instruction->IsDiv() || instruction->IsRem());
3481 DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimInt);
3482
3483 LocationSummary* locations = instruction->GetLocations();
3484 Location second = locations->InAt(1);
3485 DCHECK(second.IsConstant());
3486
3487 Register out = locations->Out().AsRegister<Register>();
3488 Register dividend = locations->InAt(0).AsRegister<Register>();
3489 int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
3490 DCHECK(imm == 1 || imm == -1);
3491
3492 if (instruction->IsRem()) {
3493 __ Move(out, ZERO);
3494 } else {
3495 if (imm == -1) {
3496 __ Subu(out, ZERO, dividend);
3497 } else if (out != dividend) {
3498 __ Move(out, dividend);
3499 }
3500 }
3501 }
3502
DivRemByPowerOfTwo(HBinaryOperation * instruction)3503 void InstructionCodeGeneratorMIPS::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
3504 DCHECK(instruction->IsDiv() || instruction->IsRem());
3505 DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimInt);
3506
3507 LocationSummary* locations = instruction->GetLocations();
3508 Location second = locations->InAt(1);
3509 DCHECK(second.IsConstant());
3510
3511 Register out = locations->Out().AsRegister<Register>();
3512 Register dividend = locations->InAt(0).AsRegister<Register>();
3513 int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
3514 uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
3515 int ctz_imm = CTZ(abs_imm);
3516
3517 if (instruction->IsDiv()) {
3518 if (ctz_imm == 1) {
3519 // Fast path for division by +/-2, which is very common.
3520 __ Srl(TMP, dividend, 31);
3521 } else {
3522 __ Sra(TMP, dividend, 31);
3523 __ Srl(TMP, TMP, 32 - ctz_imm);
3524 }
3525 __ Addu(out, dividend, TMP);
3526 __ Sra(out, out, ctz_imm);
3527 if (imm < 0) {
3528 __ Subu(out, ZERO, out);
3529 }
3530 } else {
3531 if (ctz_imm == 1) {
3532 // Fast path for modulo +/-2, which is very common.
3533 __ Sra(TMP, dividend, 31);
3534 __ Subu(out, dividend, TMP);
3535 __ Andi(out, out, 1);
3536 __ Addu(out, out, TMP);
3537 } else {
3538 __ Sra(TMP, dividend, 31);
3539 __ Srl(TMP, TMP, 32 - ctz_imm);
3540 __ Addu(out, dividend, TMP);
3541 if (IsUint<16>(abs_imm - 1)) {
3542 __ Andi(out, out, abs_imm - 1);
3543 } else {
3544 __ Sll(out, out, 32 - ctz_imm);
3545 __ Srl(out, out, 32 - ctz_imm);
3546 }
3547 __ Subu(out, out, TMP);
3548 }
3549 }
3550 }
3551
GenerateDivRemWithAnyConstant(HBinaryOperation * instruction)3552 void InstructionCodeGeneratorMIPS::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
3553 DCHECK(instruction->IsDiv() || instruction->IsRem());
3554 DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimInt);
3555
3556 LocationSummary* locations = instruction->GetLocations();
3557 Location second = locations->InAt(1);
3558 DCHECK(second.IsConstant());
3559
3560 Register out = locations->Out().AsRegister<Register>();
3561 Register dividend = locations->InAt(0).AsRegister<Register>();
3562 int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
3563
3564 int64_t magic;
3565 int shift;
3566 CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
3567
3568 bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
3569
3570 __ LoadConst32(TMP, magic);
3571 if (isR6) {
3572 __ MuhR6(TMP, dividend, TMP);
3573 } else {
3574 __ MultR2(dividend, TMP);
3575 __ Mfhi(TMP);
3576 }
3577 if (imm > 0 && magic < 0) {
3578 __ Addu(TMP, TMP, dividend);
3579 } else if (imm < 0 && magic > 0) {
3580 __ Subu(TMP, TMP, dividend);
3581 }
3582
3583 if (shift != 0) {
3584 __ Sra(TMP, TMP, shift);
3585 }
3586
3587 if (instruction->IsDiv()) {
3588 __ Sra(out, TMP, 31);
3589 __ Subu(out, TMP, out);
3590 } else {
3591 __ Sra(AT, TMP, 31);
3592 __ Subu(AT, TMP, AT);
3593 __ LoadConst32(TMP, imm);
3594 if (isR6) {
3595 __ MulR6(TMP, AT, TMP);
3596 } else {
3597 __ MulR2(TMP, AT, TMP);
3598 }
3599 __ Subu(out, dividend, TMP);
3600 }
3601 }
3602
GenerateDivRemIntegral(HBinaryOperation * instruction)3603 void InstructionCodeGeneratorMIPS::GenerateDivRemIntegral(HBinaryOperation* instruction) {
3604 DCHECK(instruction->IsDiv() || instruction->IsRem());
3605 DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimInt);
3606
3607 LocationSummary* locations = instruction->GetLocations();
3608 Register out = locations->Out().AsRegister<Register>();
3609 Location second = locations->InAt(1);
3610
3611 if (second.IsConstant()) {
3612 int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
3613 if (imm == 0) {
3614 // Do not generate anything. DivZeroCheck would prevent any code to be executed.
3615 } else if (imm == 1 || imm == -1) {
3616 DivRemOneOrMinusOne(instruction);
3617 } else if (IsPowerOfTwo(AbsOrMin(imm))) {
3618 DivRemByPowerOfTwo(instruction);
3619 } else {
3620 DCHECK(imm <= -2 || imm >= 2);
3621 GenerateDivRemWithAnyConstant(instruction);
3622 }
3623 } else {
3624 Register dividend = locations->InAt(0).AsRegister<Register>();
3625 Register divisor = second.AsRegister<Register>();
3626 bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
3627 if (instruction->IsDiv()) {
3628 if (isR6) {
3629 __ DivR6(out, dividend, divisor);
3630 } else {
3631 __ DivR2(out, dividend, divisor);
3632 }
3633 } else {
3634 if (isR6) {
3635 __ ModR6(out, dividend, divisor);
3636 } else {
3637 __ ModR2(out, dividend, divisor);
3638 }
3639 }
3640 }
3641 }
3642
VisitDiv(HDiv * div)3643 void LocationsBuilderMIPS::VisitDiv(HDiv* div) {
3644 Primitive::Type type = div->GetResultType();
3645 LocationSummary::CallKind call_kind = (type == Primitive::kPrimLong)
3646 ? LocationSummary::kCallOnMainOnly
3647 : LocationSummary::kNoCall;
3648
3649 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
3650
3651 switch (type) {
3652 case Primitive::kPrimInt:
3653 locations->SetInAt(0, Location::RequiresRegister());
3654 locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
3655 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3656 break;
3657
3658 case Primitive::kPrimLong: {
3659 InvokeRuntimeCallingConvention calling_convention;
3660 locations->SetInAt(0, Location::RegisterPairLocation(
3661 calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
3662 locations->SetInAt(1, Location::RegisterPairLocation(
3663 calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
3664 locations->SetOut(calling_convention.GetReturnLocation(type));
3665 break;
3666 }
3667
3668 case Primitive::kPrimFloat:
3669 case Primitive::kPrimDouble:
3670 locations->SetInAt(0, Location::RequiresFpuRegister());
3671 locations->SetInAt(1, Location::RequiresFpuRegister());
3672 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3673 break;
3674
3675 default:
3676 LOG(FATAL) << "Unexpected div type " << type;
3677 }
3678 }
3679
VisitDiv(HDiv * instruction)3680 void InstructionCodeGeneratorMIPS::VisitDiv(HDiv* instruction) {
3681 Primitive::Type type = instruction->GetType();
3682 LocationSummary* locations = instruction->GetLocations();
3683
3684 switch (type) {
3685 case Primitive::kPrimInt:
3686 GenerateDivRemIntegral(instruction);
3687 break;
3688 case Primitive::kPrimLong: {
3689 codegen_->InvokeRuntime(kQuickLdiv, instruction, instruction->GetDexPc());
3690 CheckEntrypointTypes<kQuickLdiv, int64_t, int64_t, int64_t>();
3691 break;
3692 }
3693 case Primitive::kPrimFloat:
3694 case Primitive::kPrimDouble: {
3695 FRegister dst = locations->Out().AsFpuRegister<FRegister>();
3696 FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
3697 FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
3698 if (type == Primitive::kPrimFloat) {
3699 __ DivS(dst, lhs, rhs);
3700 } else {
3701 __ DivD(dst, lhs, rhs);
3702 }
3703 break;
3704 }
3705 default:
3706 LOG(FATAL) << "Unexpected div type " << type;
3707 }
3708 }
3709
VisitDivZeroCheck(HDivZeroCheck * instruction)3710 void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
3711 LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
3712 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
3713 }
3714
VisitDivZeroCheck(HDivZeroCheck * instruction)3715 void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
3716 SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS(instruction);
3717 codegen_->AddSlowPath(slow_path);
3718 Location value = instruction->GetLocations()->InAt(0);
3719 Primitive::Type type = instruction->GetType();
3720
3721 switch (type) {
3722 case Primitive::kPrimBoolean:
3723 case Primitive::kPrimByte:
3724 case Primitive::kPrimChar:
3725 case Primitive::kPrimShort:
3726 case Primitive::kPrimInt: {
3727 if (value.IsConstant()) {
3728 if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
3729 __ B(slow_path->GetEntryLabel());
3730 } else {
3731 // A division by a non-null constant is valid. We don't need to perform
3732 // any check, so simply fall through.
3733 }
3734 } else {
3735 DCHECK(value.IsRegister()) << value;
3736 __ Beqz(value.AsRegister<Register>(), slow_path->GetEntryLabel());
3737 }
3738 break;
3739 }
3740 case Primitive::kPrimLong: {
3741 if (value.IsConstant()) {
3742 if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
3743 __ B(slow_path->GetEntryLabel());
3744 } else {
3745 // A division by a non-null constant is valid. We don't need to perform
3746 // any check, so simply fall through.
3747 }
3748 } else {
3749 DCHECK(value.IsRegisterPair()) << value;
3750 __ Or(TMP, value.AsRegisterPairHigh<Register>(), value.AsRegisterPairLow<Register>());
3751 __ Beqz(TMP, slow_path->GetEntryLabel());
3752 }
3753 break;
3754 }
3755 default:
3756 LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
3757 }
3758 }
3759
VisitDoubleConstant(HDoubleConstant * constant)3760 void LocationsBuilderMIPS::VisitDoubleConstant(HDoubleConstant* constant) {
3761 LocationSummary* locations =
3762 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
3763 locations->SetOut(Location::ConstantLocation(constant));
3764 }
3765
VisitDoubleConstant(HDoubleConstant * cst ATTRIBUTE_UNUSED)3766 void InstructionCodeGeneratorMIPS::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
3767 // Will be generated at use site.
3768 }
3769
VisitExit(HExit * exit)3770 void LocationsBuilderMIPS::VisitExit(HExit* exit) {
3771 exit->SetLocations(nullptr);
3772 }
3773
VisitExit(HExit * exit ATTRIBUTE_UNUSED)3774 void InstructionCodeGeneratorMIPS::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
3775 }
3776
VisitFloatConstant(HFloatConstant * constant)3777 void LocationsBuilderMIPS::VisitFloatConstant(HFloatConstant* constant) {
3778 LocationSummary* locations =
3779 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
3780 locations->SetOut(Location::ConstantLocation(constant));
3781 }
3782
VisitFloatConstant(HFloatConstant * constant ATTRIBUTE_UNUSED)3783 void InstructionCodeGeneratorMIPS::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
3784 // Will be generated at use site.
3785 }
3786
VisitGoto(HGoto * got)3787 void LocationsBuilderMIPS::VisitGoto(HGoto* got) {
3788 got->SetLocations(nullptr);
3789 }
3790
HandleGoto(HInstruction * got,HBasicBlock * successor)3791 void InstructionCodeGeneratorMIPS::HandleGoto(HInstruction* got, HBasicBlock* successor) {
3792 DCHECK(!successor->IsExitBlock());
3793 HBasicBlock* block = got->GetBlock();
3794 HInstruction* previous = got->GetPrevious();
3795 HLoopInformation* info = block->GetLoopInformation();
3796
3797 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
3798 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
3799 GenerateSuspendCheck(info->GetSuspendCheck(), successor);
3800 return;
3801 }
3802 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
3803 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
3804 }
3805 if (!codegen_->GoesToNextBlock(block, successor)) {
3806 __ B(codegen_->GetLabelOf(successor));
3807 }
3808 }
3809
VisitGoto(HGoto * got)3810 void InstructionCodeGeneratorMIPS::VisitGoto(HGoto* got) {
3811 HandleGoto(got, got->GetSuccessor());
3812 }
3813
VisitTryBoundary(HTryBoundary * try_boundary)3814 void LocationsBuilderMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
3815 try_boundary->SetLocations(nullptr);
3816 }
3817
VisitTryBoundary(HTryBoundary * try_boundary)3818 void InstructionCodeGeneratorMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
3819 HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
3820 if (!successor->IsExitBlock()) {
3821 HandleGoto(try_boundary, successor);
3822 }
3823 }
3824
GenerateIntCompare(IfCondition cond,LocationSummary * locations)3825 void InstructionCodeGeneratorMIPS::GenerateIntCompare(IfCondition cond,
3826 LocationSummary* locations) {
3827 Register dst = locations->Out().AsRegister<Register>();
3828 Register lhs = locations->InAt(0).AsRegister<Register>();
3829 Location rhs_location = locations->InAt(1);
3830 Register rhs_reg = ZERO;
3831 int64_t rhs_imm = 0;
3832 bool use_imm = rhs_location.IsConstant();
3833 if (use_imm) {
3834 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
3835 } else {
3836 rhs_reg = rhs_location.AsRegister<Register>();
3837 }
3838
3839 switch (cond) {
3840 case kCondEQ:
3841 case kCondNE:
3842 if (use_imm && IsInt<16>(-rhs_imm)) {
3843 if (rhs_imm == 0) {
3844 if (cond == kCondEQ) {
3845 __ Sltiu(dst, lhs, 1);
3846 } else {
3847 __ Sltu(dst, ZERO, lhs);
3848 }
3849 } else {
3850 __ Addiu(dst, lhs, -rhs_imm);
3851 if (cond == kCondEQ) {
3852 __ Sltiu(dst, dst, 1);
3853 } else {
3854 __ Sltu(dst, ZERO, dst);
3855 }
3856 }
3857 } else {
3858 if (use_imm && IsUint<16>(rhs_imm)) {
3859 __ Xori(dst, lhs, rhs_imm);
3860 } else {
3861 if (use_imm) {
3862 rhs_reg = TMP;
3863 __ LoadConst32(rhs_reg, rhs_imm);
3864 }
3865 __ Xor(dst, lhs, rhs_reg);
3866 }
3867 if (cond == kCondEQ) {
3868 __ Sltiu(dst, dst, 1);
3869 } else {
3870 __ Sltu(dst, ZERO, dst);
3871 }
3872 }
3873 break;
3874
3875 case kCondLT:
3876 case kCondGE:
3877 if (use_imm && IsInt<16>(rhs_imm)) {
3878 __ Slti(dst, lhs, rhs_imm);
3879 } else {
3880 if (use_imm) {
3881 rhs_reg = TMP;
3882 __ LoadConst32(rhs_reg, rhs_imm);
3883 }
3884 __ Slt(dst, lhs, rhs_reg);
3885 }
3886 if (cond == kCondGE) {
3887 // Simulate lhs >= rhs via !(lhs < rhs) since there's
3888 // only the slt instruction but no sge.
3889 __ Xori(dst, dst, 1);
3890 }
3891 break;
3892
3893 case kCondLE:
3894 case kCondGT:
3895 if (use_imm && IsInt<16>(rhs_imm + 1)) {
3896 // Simulate lhs <= rhs via lhs < rhs + 1.
3897 __ Slti(dst, lhs, rhs_imm + 1);
3898 if (cond == kCondGT) {
3899 // Simulate lhs > rhs via !(lhs <= rhs) since there's
3900 // only the slti instruction but no sgti.
3901 __ Xori(dst, dst, 1);
3902 }
3903 } else {
3904 if (use_imm) {
3905 rhs_reg = TMP;
3906 __ LoadConst32(rhs_reg, rhs_imm);
3907 }
3908 __ Slt(dst, rhs_reg, lhs);
3909 if (cond == kCondLE) {
3910 // Simulate lhs <= rhs via !(rhs < lhs) since there's
3911 // only the slt instruction but no sle.
3912 __ Xori(dst, dst, 1);
3913 }
3914 }
3915 break;
3916
3917 case kCondB:
3918 case kCondAE:
3919 if (use_imm && IsInt<16>(rhs_imm)) {
3920 // Sltiu sign-extends its 16-bit immediate operand before
3921 // the comparison and thus lets us compare directly with
3922 // unsigned values in the ranges [0, 0x7fff] and
3923 // [0xffff8000, 0xffffffff].
3924 __ Sltiu(dst, lhs, rhs_imm);
3925 } else {
3926 if (use_imm) {
3927 rhs_reg = TMP;
3928 __ LoadConst32(rhs_reg, rhs_imm);
3929 }
3930 __ Sltu(dst, lhs, rhs_reg);
3931 }
3932 if (cond == kCondAE) {
3933 // Simulate lhs >= rhs via !(lhs < rhs) since there's
3934 // only the sltu instruction but no sgeu.
3935 __ Xori(dst, dst, 1);
3936 }
3937 break;
3938
3939 case kCondBE:
3940 case kCondA:
3941 if (use_imm && (rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
3942 // Simulate lhs <= rhs via lhs < rhs + 1.
3943 // Note that this only works if rhs + 1 does not overflow
3944 // to 0, hence the check above.
3945 // Sltiu sign-extends its 16-bit immediate operand before
3946 // the comparison and thus lets us compare directly with
3947 // unsigned values in the ranges [0, 0x7fff] and
3948 // [0xffff8000, 0xffffffff].
3949 __ Sltiu(dst, lhs, rhs_imm + 1);
3950 if (cond == kCondA) {
3951 // Simulate lhs > rhs via !(lhs <= rhs) since there's
3952 // only the sltiu instruction but no sgtiu.
3953 __ Xori(dst, dst, 1);
3954 }
3955 } else {
3956 if (use_imm) {
3957 rhs_reg = TMP;
3958 __ LoadConst32(rhs_reg, rhs_imm);
3959 }
3960 __ Sltu(dst, rhs_reg, lhs);
3961 if (cond == kCondBE) {
3962 // Simulate lhs <= rhs via !(rhs < lhs) since there's
3963 // only the sltu instruction but no sleu.
3964 __ Xori(dst, dst, 1);
3965 }
3966 }
3967 break;
3968 }
3969 }
3970
MaterializeIntCompare(IfCondition cond,LocationSummary * input_locations,Register dst)3971 bool InstructionCodeGeneratorMIPS::MaterializeIntCompare(IfCondition cond,
3972 LocationSummary* input_locations,
3973 Register dst) {
3974 Register lhs = input_locations->InAt(0).AsRegister<Register>();
3975 Location rhs_location = input_locations->InAt(1);
3976 Register rhs_reg = ZERO;
3977 int64_t rhs_imm = 0;
3978 bool use_imm = rhs_location.IsConstant();
3979 if (use_imm) {
3980 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
3981 } else {
3982 rhs_reg = rhs_location.AsRegister<Register>();
3983 }
3984
3985 switch (cond) {
3986 case kCondEQ:
3987 case kCondNE:
3988 if (use_imm && IsInt<16>(-rhs_imm)) {
3989 __ Addiu(dst, lhs, -rhs_imm);
3990 } else if (use_imm && IsUint<16>(rhs_imm)) {
3991 __ Xori(dst, lhs, rhs_imm);
3992 } else {
3993 if (use_imm) {
3994 rhs_reg = TMP;
3995 __ LoadConst32(rhs_reg, rhs_imm);
3996 }
3997 __ Xor(dst, lhs, rhs_reg);
3998 }
3999 return (cond == kCondEQ);
4000
4001 case kCondLT:
4002 case kCondGE:
4003 if (use_imm && IsInt<16>(rhs_imm)) {
4004 __ Slti(dst, lhs, rhs_imm);
4005 } else {
4006 if (use_imm) {
4007 rhs_reg = TMP;
4008 __ LoadConst32(rhs_reg, rhs_imm);
4009 }
4010 __ Slt(dst, lhs, rhs_reg);
4011 }
4012 return (cond == kCondGE);
4013
4014 case kCondLE:
4015 case kCondGT:
4016 if (use_imm && IsInt<16>(rhs_imm + 1)) {
4017 // Simulate lhs <= rhs via lhs < rhs + 1.
4018 __ Slti(dst, lhs, rhs_imm + 1);
4019 return (cond == kCondGT);
4020 } else {
4021 if (use_imm) {
4022 rhs_reg = TMP;
4023 __ LoadConst32(rhs_reg, rhs_imm);
4024 }
4025 __ Slt(dst, rhs_reg, lhs);
4026 return (cond == kCondLE);
4027 }
4028
4029 case kCondB:
4030 case kCondAE:
4031 if (use_imm && IsInt<16>(rhs_imm)) {
4032 // Sltiu sign-extends its 16-bit immediate operand before
4033 // the comparison and thus lets us compare directly with
4034 // unsigned values in the ranges [0, 0x7fff] and
4035 // [0xffff8000, 0xffffffff].
4036 __ Sltiu(dst, lhs, rhs_imm);
4037 } else {
4038 if (use_imm) {
4039 rhs_reg = TMP;
4040 __ LoadConst32(rhs_reg, rhs_imm);
4041 }
4042 __ Sltu(dst, lhs, rhs_reg);
4043 }
4044 return (cond == kCondAE);
4045
4046 case kCondBE:
4047 case kCondA:
4048 if (use_imm && (rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
4049 // Simulate lhs <= rhs via lhs < rhs + 1.
4050 // Note that this only works if rhs + 1 does not overflow
4051 // to 0, hence the check above.
4052 // Sltiu sign-extends its 16-bit immediate operand before
4053 // the comparison and thus lets us compare directly with
4054 // unsigned values in the ranges [0, 0x7fff] and
4055 // [0xffff8000, 0xffffffff].
4056 __ Sltiu(dst, lhs, rhs_imm + 1);
4057 return (cond == kCondA);
4058 } else {
4059 if (use_imm) {
4060 rhs_reg = TMP;
4061 __ LoadConst32(rhs_reg, rhs_imm);
4062 }
4063 __ Sltu(dst, rhs_reg, lhs);
4064 return (cond == kCondBE);
4065 }
4066 }
4067 }
4068
GenerateIntCompareAndBranch(IfCondition cond,LocationSummary * locations,MipsLabel * label)4069 void InstructionCodeGeneratorMIPS::GenerateIntCompareAndBranch(IfCondition cond,
4070 LocationSummary* locations,
4071 MipsLabel* label) {
4072 Register lhs = locations->InAt(0).AsRegister<Register>();
4073 Location rhs_location = locations->InAt(1);
4074 Register rhs_reg = ZERO;
4075 int64_t rhs_imm = 0;
4076 bool use_imm = rhs_location.IsConstant();
4077 if (use_imm) {
4078 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
4079 } else {
4080 rhs_reg = rhs_location.AsRegister<Register>();
4081 }
4082
4083 if (use_imm && rhs_imm == 0) {
4084 switch (cond) {
4085 case kCondEQ:
4086 case kCondBE: // <= 0 if zero
4087 __ Beqz(lhs, label);
4088 break;
4089 case kCondNE:
4090 case kCondA: // > 0 if non-zero
4091 __ Bnez(lhs, label);
4092 break;
4093 case kCondLT:
4094 __ Bltz(lhs, label);
4095 break;
4096 case kCondGE:
4097 __ Bgez(lhs, label);
4098 break;
4099 case kCondLE:
4100 __ Blez(lhs, label);
4101 break;
4102 case kCondGT:
4103 __ Bgtz(lhs, label);
4104 break;
4105 case kCondB: // always false
4106 break;
4107 case kCondAE: // always true
4108 __ B(label);
4109 break;
4110 }
4111 } else {
4112 bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
4113 if (isR6 || !use_imm) {
4114 if (use_imm) {
4115 rhs_reg = TMP;
4116 __ LoadConst32(rhs_reg, rhs_imm);
4117 }
4118 switch (cond) {
4119 case kCondEQ:
4120 __ Beq(lhs, rhs_reg, label);
4121 break;
4122 case kCondNE:
4123 __ Bne(lhs, rhs_reg, label);
4124 break;
4125 case kCondLT:
4126 __ Blt(lhs, rhs_reg, label);
4127 break;
4128 case kCondGE:
4129 __ Bge(lhs, rhs_reg, label);
4130 break;
4131 case kCondLE:
4132 __ Bge(rhs_reg, lhs, label);
4133 break;
4134 case kCondGT:
4135 __ Blt(rhs_reg, lhs, label);
4136 break;
4137 case kCondB:
4138 __ Bltu(lhs, rhs_reg, label);
4139 break;
4140 case kCondAE:
4141 __ Bgeu(lhs, rhs_reg, label);
4142 break;
4143 case kCondBE:
4144 __ Bgeu(rhs_reg, lhs, label);
4145 break;
4146 case kCondA:
4147 __ Bltu(rhs_reg, lhs, label);
4148 break;
4149 }
4150 } else {
4151 // Special cases for more efficient comparison with constants on R2.
4152 switch (cond) {
4153 case kCondEQ:
4154 __ LoadConst32(TMP, rhs_imm);
4155 __ Beq(lhs, TMP, label);
4156 break;
4157 case kCondNE:
4158 __ LoadConst32(TMP, rhs_imm);
4159 __ Bne(lhs, TMP, label);
4160 break;
4161 case kCondLT:
4162 if (IsInt<16>(rhs_imm)) {
4163 __ Slti(TMP, lhs, rhs_imm);
4164 __ Bnez(TMP, label);
4165 } else {
4166 __ LoadConst32(TMP, rhs_imm);
4167 __ Blt(lhs, TMP, label);
4168 }
4169 break;
4170 case kCondGE:
4171 if (IsInt<16>(rhs_imm)) {
4172 __ Slti(TMP, lhs, rhs_imm);
4173 __ Beqz(TMP, label);
4174 } else {
4175 __ LoadConst32(TMP, rhs_imm);
4176 __ Bge(lhs, TMP, label);
4177 }
4178 break;
4179 case kCondLE:
4180 if (IsInt<16>(rhs_imm + 1)) {
4181 // Simulate lhs <= rhs via lhs < rhs + 1.
4182 __ Slti(TMP, lhs, rhs_imm + 1);
4183 __ Bnez(TMP, label);
4184 } else {
4185 __ LoadConst32(TMP, rhs_imm);
4186 __ Bge(TMP, lhs, label);
4187 }
4188 break;
4189 case kCondGT:
4190 if (IsInt<16>(rhs_imm + 1)) {
4191 // Simulate lhs > rhs via !(lhs < rhs + 1).
4192 __ Slti(TMP, lhs, rhs_imm + 1);
4193 __ Beqz(TMP, label);
4194 } else {
4195 __ LoadConst32(TMP, rhs_imm);
4196 __ Blt(TMP, lhs, label);
4197 }
4198 break;
4199 case kCondB:
4200 if (IsInt<16>(rhs_imm)) {
4201 __ Sltiu(TMP, lhs, rhs_imm);
4202 __ Bnez(TMP, label);
4203 } else {
4204 __ LoadConst32(TMP, rhs_imm);
4205 __ Bltu(lhs, TMP, label);
4206 }
4207 break;
4208 case kCondAE:
4209 if (IsInt<16>(rhs_imm)) {
4210 __ Sltiu(TMP, lhs, rhs_imm);
4211 __ Beqz(TMP, label);
4212 } else {
4213 __ LoadConst32(TMP, rhs_imm);
4214 __ Bgeu(lhs, TMP, label);
4215 }
4216 break;
4217 case kCondBE:
4218 if ((rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
4219 // Simulate lhs <= rhs via lhs < rhs + 1.
4220 // Note that this only works if rhs + 1 does not overflow
4221 // to 0, hence the check above.
4222 __ Sltiu(TMP, lhs, rhs_imm + 1);
4223 __ Bnez(TMP, label);
4224 } else {
4225 __ LoadConst32(TMP, rhs_imm);
4226 __ Bgeu(TMP, lhs, label);
4227 }
4228 break;
4229 case kCondA:
4230 if ((rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
4231 // Simulate lhs > rhs via !(lhs < rhs + 1).
4232 // Note that this only works if rhs + 1 does not overflow
4233 // to 0, hence the check above.
4234 __ Sltiu(TMP, lhs, rhs_imm + 1);
4235 __ Beqz(TMP, label);
4236 } else {
4237 __ LoadConst32(TMP, rhs_imm);
4238 __ Bltu(TMP, lhs, label);
4239 }
4240 break;
4241 }
4242 }
4243 }
4244 }
4245
GenerateLongCompareAndBranch(IfCondition cond,LocationSummary * locations,MipsLabel * label)4246 void InstructionCodeGeneratorMIPS::GenerateLongCompareAndBranch(IfCondition cond,
4247 LocationSummary* locations,
4248 MipsLabel* label) {
4249 Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
4250 Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
4251 Location rhs_location = locations->InAt(1);
4252 Register rhs_high = ZERO;
4253 Register rhs_low = ZERO;
4254 int64_t imm = 0;
4255 uint32_t imm_high = 0;
4256 uint32_t imm_low = 0;
4257 bool use_imm = rhs_location.IsConstant();
4258 if (use_imm) {
4259 imm = rhs_location.GetConstant()->AsLongConstant()->GetValue();
4260 imm_high = High32Bits(imm);
4261 imm_low = Low32Bits(imm);
4262 } else {
4263 rhs_high = rhs_location.AsRegisterPairHigh<Register>();
4264 rhs_low = rhs_location.AsRegisterPairLow<Register>();
4265 }
4266
4267 if (use_imm && imm == 0) {
4268 switch (cond) {
4269 case kCondEQ:
4270 case kCondBE: // <= 0 if zero
4271 __ Or(TMP, lhs_high, lhs_low);
4272 __ Beqz(TMP, label);
4273 break;
4274 case kCondNE:
4275 case kCondA: // > 0 if non-zero
4276 __ Or(TMP, lhs_high, lhs_low);
4277 __ Bnez(TMP, label);
4278 break;
4279 case kCondLT:
4280 __ Bltz(lhs_high, label);
4281 break;
4282 case kCondGE:
4283 __ Bgez(lhs_high, label);
4284 break;
4285 case kCondLE:
4286 __ Or(TMP, lhs_high, lhs_low);
4287 __ Sra(AT, lhs_high, 31);
4288 __ Bgeu(AT, TMP, label);
4289 break;
4290 case kCondGT:
4291 __ Or(TMP, lhs_high, lhs_low);
4292 __ Sra(AT, lhs_high, 31);
4293 __ Bltu(AT, TMP, label);
4294 break;
4295 case kCondB: // always false
4296 break;
4297 case kCondAE: // always true
4298 __ B(label);
4299 break;
4300 }
4301 } else if (use_imm) {
4302 // TODO: more efficient comparison with constants without loading them into TMP/AT.
4303 switch (cond) {
4304 case kCondEQ:
4305 __ LoadConst32(TMP, imm_high);
4306 __ Xor(TMP, TMP, lhs_high);
4307 __ LoadConst32(AT, imm_low);
4308 __ Xor(AT, AT, lhs_low);
4309 __ Or(TMP, TMP, AT);
4310 __ Beqz(TMP, label);
4311 break;
4312 case kCondNE:
4313 __ LoadConst32(TMP, imm_high);
4314 __ Xor(TMP, TMP, lhs_high);
4315 __ LoadConst32(AT, imm_low);
4316 __ Xor(AT, AT, lhs_low);
4317 __ Or(TMP, TMP, AT);
4318 __ Bnez(TMP, label);
4319 break;
4320 case kCondLT:
4321 __ LoadConst32(TMP, imm_high);
4322 __ Blt(lhs_high, TMP, label);
4323 __ Slt(TMP, TMP, lhs_high);
4324 __ LoadConst32(AT, imm_low);
4325 __ Sltu(AT, lhs_low, AT);
4326 __ Blt(TMP, AT, label);
4327 break;
4328 case kCondGE:
4329 __ LoadConst32(TMP, imm_high);
4330 __ Blt(TMP, lhs_high, label);
4331 __ Slt(TMP, lhs_high, TMP);
4332 __ LoadConst32(AT, imm_low);
4333 __ Sltu(AT, lhs_low, AT);
4334 __ Or(TMP, TMP, AT);
4335 __ Beqz(TMP, label);
4336 break;
4337 case kCondLE:
4338 __ LoadConst32(TMP, imm_high);
4339 __ Blt(lhs_high, TMP, label);
4340 __ Slt(TMP, TMP, lhs_high);
4341 __ LoadConst32(AT, imm_low);
4342 __ Sltu(AT, AT, lhs_low);
4343 __ Or(TMP, TMP, AT);
4344 __ Beqz(TMP, label);
4345 break;
4346 case kCondGT:
4347 __ LoadConst32(TMP, imm_high);
4348 __ Blt(TMP, lhs_high, label);
4349 __ Slt(TMP, lhs_high, TMP);
4350 __ LoadConst32(AT, imm_low);
4351 __ Sltu(AT, AT, lhs_low);
4352 __ Blt(TMP, AT, label);
4353 break;
4354 case kCondB:
4355 __ LoadConst32(TMP, imm_high);
4356 __ Bltu(lhs_high, TMP, label);
4357 __ Sltu(TMP, TMP, lhs_high);
4358 __ LoadConst32(AT, imm_low);
4359 __ Sltu(AT, lhs_low, AT);
4360 __ Blt(TMP, AT, label);
4361 break;
4362 case kCondAE:
4363 __ LoadConst32(TMP, imm_high);
4364 __ Bltu(TMP, lhs_high, label);
4365 __ Sltu(TMP, lhs_high, TMP);
4366 __ LoadConst32(AT, imm_low);
4367 __ Sltu(AT, lhs_low, AT);
4368 __ Or(TMP, TMP, AT);
4369 __ Beqz(TMP, label);
4370 break;
4371 case kCondBE:
4372 __ LoadConst32(TMP, imm_high);
4373 __ Bltu(lhs_high, TMP, label);
4374 __ Sltu(TMP, TMP, lhs_high);
4375 __ LoadConst32(AT, imm_low);
4376 __ Sltu(AT, AT, lhs_low);
4377 __ Or(TMP, TMP, AT);
4378 __ Beqz(TMP, label);
4379 break;
4380 case kCondA:
4381 __ LoadConst32(TMP, imm_high);
4382 __ Bltu(TMP, lhs_high, label);
4383 __ Sltu(TMP, lhs_high, TMP);
4384 __ LoadConst32(AT, imm_low);
4385 __ Sltu(AT, AT, lhs_low);
4386 __ Blt(TMP, AT, label);
4387 break;
4388 }
4389 } else {
4390 switch (cond) {
4391 case kCondEQ:
4392 __ Xor(TMP, lhs_high, rhs_high);
4393 __ Xor(AT, lhs_low, rhs_low);
4394 __ Or(TMP, TMP, AT);
4395 __ Beqz(TMP, label);
4396 break;
4397 case kCondNE:
4398 __ Xor(TMP, lhs_high, rhs_high);
4399 __ Xor(AT, lhs_low, rhs_low);
4400 __ Or(TMP, TMP, AT);
4401 __ Bnez(TMP, label);
4402 break;
4403 case kCondLT:
4404 __ Blt(lhs_high, rhs_high, label);
4405 __ Slt(TMP, rhs_high, lhs_high);
4406 __ Sltu(AT, lhs_low, rhs_low);
4407 __ Blt(TMP, AT, label);
4408 break;
4409 case kCondGE:
4410 __ Blt(rhs_high, lhs_high, label);
4411 __ Slt(TMP, lhs_high, rhs_high);
4412 __ Sltu(AT, lhs_low, rhs_low);
4413 __ Or(TMP, TMP, AT);
4414 __ Beqz(TMP, label);
4415 break;
4416 case kCondLE:
4417 __ Blt(lhs_high, rhs_high, label);
4418 __ Slt(TMP, rhs_high, lhs_high);
4419 __ Sltu(AT, rhs_low, lhs_low);
4420 __ Or(TMP, TMP, AT);
4421 __ Beqz(TMP, label);
4422 break;
4423 case kCondGT:
4424 __ Blt(rhs_high, lhs_high, label);
4425 __ Slt(TMP, lhs_high, rhs_high);
4426 __ Sltu(AT, rhs_low, lhs_low);
4427 __ Blt(TMP, AT, label);
4428 break;
4429 case kCondB:
4430 __ Bltu(lhs_high, rhs_high, label);
4431 __ Sltu(TMP, rhs_high, lhs_high);
4432 __ Sltu(AT, lhs_low, rhs_low);
4433 __ Blt(TMP, AT, label);
4434 break;
4435 case kCondAE:
4436 __ Bltu(rhs_high, lhs_high, label);
4437 __ Sltu(TMP, lhs_high, rhs_high);
4438 __ Sltu(AT, lhs_low, rhs_low);
4439 __ Or(TMP, TMP, AT);
4440 __ Beqz(TMP, label);
4441 break;
4442 case kCondBE:
4443 __ Bltu(lhs_high, rhs_high, label);
4444 __ Sltu(TMP, rhs_high, lhs_high);
4445 __ Sltu(AT, rhs_low, lhs_low);
4446 __ Or(TMP, TMP, AT);
4447 __ Beqz(TMP, label);
4448 break;
4449 case kCondA:
4450 __ Bltu(rhs_high, lhs_high, label);
4451 __ Sltu(TMP, lhs_high, rhs_high);
4452 __ Sltu(AT, rhs_low, lhs_low);
4453 __ Blt(TMP, AT, label);
4454 break;
4455 }
4456 }
4457 }
4458
GenerateFpCompare(IfCondition cond,bool gt_bias,Primitive::Type type,LocationSummary * locations)4459 void InstructionCodeGeneratorMIPS::GenerateFpCompare(IfCondition cond,
4460 bool gt_bias,
4461 Primitive::Type type,
4462 LocationSummary* locations) {
4463 Register dst = locations->Out().AsRegister<Register>();
4464 FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
4465 FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
4466 bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
4467 if (type == Primitive::kPrimFloat) {
4468 if (isR6) {
4469 switch (cond) {
4470 case kCondEQ:
4471 __ CmpEqS(FTMP, lhs, rhs);
4472 __ Mfc1(dst, FTMP);
4473 __ Andi(dst, dst, 1);
4474 break;
4475 case kCondNE:
4476 __ CmpEqS(FTMP, lhs, rhs);
4477 __ Mfc1(dst, FTMP);
4478 __ Addiu(dst, dst, 1);
4479 break;
4480 case kCondLT:
4481 if (gt_bias) {
4482 __ CmpLtS(FTMP, lhs, rhs);
4483 } else {
4484 __ CmpUltS(FTMP, lhs, rhs);
4485 }
4486 __ Mfc1(dst, FTMP);
4487 __ Andi(dst, dst, 1);
4488 break;
4489 case kCondLE:
4490 if (gt_bias) {
4491 __ CmpLeS(FTMP, lhs, rhs);
4492 } else {
4493 __ CmpUleS(FTMP, lhs, rhs);
4494 }
4495 __ Mfc1(dst, FTMP);
4496 __ Andi(dst, dst, 1);
4497 break;
4498 case kCondGT:
4499 if (gt_bias) {
4500 __ CmpUltS(FTMP, rhs, lhs);
4501 } else {
4502 __ CmpLtS(FTMP, rhs, lhs);
4503 }
4504 __ Mfc1(dst, FTMP);
4505 __ Andi(dst, dst, 1);
4506 break;
4507 case kCondGE:
4508 if (gt_bias) {
4509 __ CmpUleS(FTMP, rhs, lhs);
4510 } else {
4511 __ CmpLeS(FTMP, rhs, lhs);
4512 }
4513 __ Mfc1(dst, FTMP);
4514 __ Andi(dst, dst, 1);
4515 break;
4516 default:
4517 LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
4518 UNREACHABLE();
4519 }
4520 } else {
4521 switch (cond) {
4522 case kCondEQ:
4523 __ CeqS(0, lhs, rhs);
4524 __ LoadConst32(dst, 1);
4525 __ Movf(dst, ZERO, 0);
4526 break;
4527 case kCondNE:
4528 __ CeqS(0, lhs, rhs);
4529 __ LoadConst32(dst, 1);
4530 __ Movt(dst, ZERO, 0);
4531 break;
4532 case kCondLT:
4533 if (gt_bias) {
4534 __ ColtS(0, lhs, rhs);
4535 } else {
4536 __ CultS(0, lhs, rhs);
4537 }
4538 __ LoadConst32(dst, 1);
4539 __ Movf(dst, ZERO, 0);
4540 break;
4541 case kCondLE:
4542 if (gt_bias) {
4543 __ ColeS(0, lhs, rhs);
4544 } else {
4545 __ CuleS(0, lhs, rhs);
4546 }
4547 __ LoadConst32(dst, 1);
4548 __ Movf(dst, ZERO, 0);
4549 break;
4550 case kCondGT:
4551 if (gt_bias) {
4552 __ CultS(0, rhs, lhs);
4553 } else {
4554 __ ColtS(0, rhs, lhs);
4555 }
4556 __ LoadConst32(dst, 1);
4557 __ Movf(dst, ZERO, 0);
4558 break;
4559 case kCondGE:
4560 if (gt_bias) {
4561 __ CuleS(0, rhs, lhs);
4562 } else {
4563 __ ColeS(0, rhs, lhs);
4564 }
4565 __ LoadConst32(dst, 1);
4566 __ Movf(dst, ZERO, 0);
4567 break;
4568 default:
4569 LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
4570 UNREACHABLE();
4571 }
4572 }
4573 } else {
4574 DCHECK_EQ(type, Primitive::kPrimDouble);
4575 if (isR6) {
4576 switch (cond) {
4577 case kCondEQ:
4578 __ CmpEqD(FTMP, lhs, rhs);
4579 __ Mfc1(dst, FTMP);
4580 __ Andi(dst, dst, 1);
4581 break;
4582 case kCondNE:
4583 __ CmpEqD(FTMP, lhs, rhs);
4584 __ Mfc1(dst, FTMP);
4585 __ Addiu(dst, dst, 1);
4586 break;
4587 case kCondLT:
4588 if (gt_bias) {
4589 __ CmpLtD(FTMP, lhs, rhs);
4590 } else {
4591 __ CmpUltD(FTMP, lhs, rhs);
4592 }
4593 __ Mfc1(dst, FTMP);
4594 __ Andi(dst, dst, 1);
4595 break;
4596 case kCondLE:
4597 if (gt_bias) {
4598 __ CmpLeD(FTMP, lhs, rhs);
4599 } else {
4600 __ CmpUleD(FTMP, lhs, rhs);
4601 }
4602 __ Mfc1(dst, FTMP);
4603 __ Andi(dst, dst, 1);
4604 break;
4605 case kCondGT:
4606 if (gt_bias) {
4607 __ CmpUltD(FTMP, rhs, lhs);
4608 } else {
4609 __ CmpLtD(FTMP, rhs, lhs);
4610 }
4611 __ Mfc1(dst, FTMP);
4612 __ Andi(dst, dst, 1);
4613 break;
4614 case kCondGE:
4615 if (gt_bias) {
4616 __ CmpUleD(FTMP, rhs, lhs);
4617 } else {
4618 __ CmpLeD(FTMP, rhs, lhs);
4619 }
4620 __ Mfc1(dst, FTMP);
4621 __ Andi(dst, dst, 1);
4622 break;
4623 default:
4624 LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
4625 UNREACHABLE();
4626 }
4627 } else {
4628 switch (cond) {
4629 case kCondEQ:
4630 __ CeqD(0, lhs, rhs);
4631 __ LoadConst32(dst, 1);
4632 __ Movf(dst, ZERO, 0);
4633 break;
4634 case kCondNE:
4635 __ CeqD(0, lhs, rhs);
4636 __ LoadConst32(dst, 1);
4637 __ Movt(dst, ZERO, 0);
4638 break;
4639 case kCondLT:
4640 if (gt_bias) {
4641 __ ColtD(0, lhs, rhs);
4642 } else {
4643 __ CultD(0, lhs, rhs);
4644 }
4645 __ LoadConst32(dst, 1);
4646 __ Movf(dst, ZERO, 0);
4647 break;
4648 case kCondLE:
4649 if (gt_bias) {
4650 __ ColeD(0, lhs, rhs);
4651 } else {
4652 __ CuleD(0, lhs, rhs);
4653 }
4654 __ LoadConst32(dst, 1);
4655 __ Movf(dst, ZERO, 0);
4656 break;
4657 case kCondGT:
4658 if (gt_bias) {
4659 __ CultD(0, rhs, lhs);
4660 } else {
4661 __ ColtD(0, rhs, lhs);
4662 }
4663 __ LoadConst32(dst, 1);
4664 __ Movf(dst, ZERO, 0);
4665 break;
4666 case kCondGE:
4667 if (gt_bias) {
4668 __ CuleD(0, rhs, lhs);
4669 } else {
4670 __ ColeD(0, rhs, lhs);
4671 }
4672 __ LoadConst32(dst, 1);
4673 __ Movf(dst, ZERO, 0);
4674 break;
4675 default:
4676 LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
4677 UNREACHABLE();
4678 }
4679 }
4680 }
4681 }
4682
MaterializeFpCompareR2(IfCondition cond,bool gt_bias,Primitive::Type type,LocationSummary * input_locations,int cc)4683 bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR2(IfCondition cond,
4684 bool gt_bias,
4685 Primitive::Type type,
4686 LocationSummary* input_locations,
4687 int cc) {
4688 FRegister lhs = input_locations->InAt(0).AsFpuRegister<FRegister>();
4689 FRegister rhs = input_locations->InAt(1).AsFpuRegister<FRegister>();
4690 CHECK(!codegen_->GetInstructionSetFeatures().IsR6());
4691 if (type == Primitive::kPrimFloat) {
4692 switch (cond) {
4693 case kCondEQ:
4694 __ CeqS(cc, lhs, rhs);
4695 return false;
4696 case kCondNE:
4697 __ CeqS(cc, lhs, rhs);
4698 return true;
4699 case kCondLT:
4700 if (gt_bias) {
4701 __ ColtS(cc, lhs, rhs);
4702 } else {
4703 __ CultS(cc, lhs, rhs);
4704 }
4705 return false;
4706 case kCondLE:
4707 if (gt_bias) {
4708 __ ColeS(cc, lhs, rhs);
4709 } else {
4710 __ CuleS(cc, lhs, rhs);
4711 }
4712 return false;
4713 case kCondGT:
4714 if (gt_bias) {
4715 __ CultS(cc, rhs, lhs);
4716 } else {
4717 __ ColtS(cc, rhs, lhs);
4718 }
4719 return false;
4720 case kCondGE:
4721 if (gt_bias) {
4722 __ CuleS(cc, rhs, lhs);
4723 } else {
4724 __ ColeS(cc, rhs, lhs);
4725 }
4726 return false;
4727 default:
4728 LOG(FATAL) << "Unexpected non-floating-point condition";
4729 UNREACHABLE();
4730 }
4731 } else {
4732 DCHECK_EQ(type, Primitive::kPrimDouble);
4733 switch (cond) {
4734 case kCondEQ:
4735 __ CeqD(cc, lhs, rhs);
4736 return false;
4737 case kCondNE:
4738 __ CeqD(cc, lhs, rhs);
4739 return true;
4740 case kCondLT:
4741 if (gt_bias) {
4742 __ ColtD(cc, lhs, rhs);
4743 } else {
4744 __ CultD(cc, lhs, rhs);
4745 }
4746 return false;
4747 case kCondLE:
4748 if (gt_bias) {
4749 __ ColeD(cc, lhs, rhs);
4750 } else {
4751 __ CuleD(cc, lhs, rhs);
4752 }
4753 return false;
4754 case kCondGT:
4755 if (gt_bias) {
4756 __ CultD(cc, rhs, lhs);
4757 } else {
4758 __ ColtD(cc, rhs, lhs);
4759 }
4760 return false;
4761 case kCondGE:
4762 if (gt_bias) {
4763 __ CuleD(cc, rhs, lhs);
4764 } else {
4765 __ ColeD(cc, rhs, lhs);
4766 }
4767 return false;
4768 default:
4769 LOG(FATAL) << "Unexpected non-floating-point condition";
4770 UNREACHABLE();
4771 }
4772 }
4773 }
4774
MaterializeFpCompareR6(IfCondition cond,bool gt_bias,Primitive::Type type,LocationSummary * input_locations,FRegister dst)4775 bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR6(IfCondition cond,
4776 bool gt_bias,
4777 Primitive::Type type,
4778 LocationSummary* input_locations,
4779 FRegister dst) {
4780 FRegister lhs = input_locations->InAt(0).AsFpuRegister<FRegister>();
4781 FRegister rhs = input_locations->InAt(1).AsFpuRegister<FRegister>();
4782 CHECK(codegen_->GetInstructionSetFeatures().IsR6());
4783 if (type == Primitive::kPrimFloat) {
4784 switch (cond) {
4785 case kCondEQ:
4786 __ CmpEqS(dst, lhs, rhs);
4787 return false;
4788 case kCondNE:
4789 __ CmpEqS(dst, lhs, rhs);
4790 return true;
4791 case kCondLT:
4792 if (gt_bias) {
4793 __ CmpLtS(dst, lhs, rhs);
4794 } else {
4795 __ CmpUltS(dst, lhs, rhs);
4796 }
4797 return false;
4798 case kCondLE:
4799 if (gt_bias) {
4800 __ CmpLeS(dst, lhs, rhs);
4801 } else {
4802 __ CmpUleS(dst, lhs, rhs);
4803 }
4804 return false;
4805 case kCondGT:
4806 if (gt_bias) {
4807 __ CmpUltS(dst, rhs, lhs);
4808 } else {
4809 __ CmpLtS(dst, rhs, lhs);
4810 }
4811 return false;
4812 case kCondGE:
4813 if (gt_bias) {
4814 __ CmpUleS(dst, rhs, lhs);
4815 } else {
4816 __ CmpLeS(dst, rhs, lhs);
4817 }
4818 return false;
4819 default:
4820 LOG(FATAL) << "Unexpected non-floating-point condition";
4821 UNREACHABLE();
4822 }
4823 } else {
4824 DCHECK_EQ(type, Primitive::kPrimDouble);
4825 switch (cond) {
4826 case kCondEQ:
4827 __ CmpEqD(dst, lhs, rhs);
4828 return false;
4829 case kCondNE:
4830 __ CmpEqD(dst, lhs, rhs);
4831 return true;
4832 case kCondLT:
4833 if (gt_bias) {
4834 __ CmpLtD(dst, lhs, rhs);
4835 } else {
4836 __ CmpUltD(dst, lhs, rhs);
4837 }
4838 return false;
4839 case kCondLE:
4840 if (gt_bias) {
4841 __ CmpLeD(dst, lhs, rhs);
4842 } else {
4843 __ CmpUleD(dst, lhs, rhs);
4844 }
4845 return false;
4846 case kCondGT:
4847 if (gt_bias) {
4848 __ CmpUltD(dst, rhs, lhs);
4849 } else {
4850 __ CmpLtD(dst, rhs, lhs);
4851 }
4852 return false;
4853 case kCondGE:
4854 if (gt_bias) {
4855 __ CmpUleD(dst, rhs, lhs);
4856 } else {
4857 __ CmpLeD(dst, rhs, lhs);
4858 }
4859 return false;
4860 default:
4861 LOG(FATAL) << "Unexpected non-floating-point condition";
4862 UNREACHABLE();
4863 }
4864 }
4865 }
4866
GenerateFpCompareAndBranch(IfCondition cond,bool gt_bias,Primitive::Type type,LocationSummary * locations,MipsLabel * label)4867 void InstructionCodeGeneratorMIPS::GenerateFpCompareAndBranch(IfCondition cond,
4868 bool gt_bias,
4869 Primitive::Type type,
4870 LocationSummary* locations,
4871 MipsLabel* label) {
4872 FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
4873 FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
4874 bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
4875 if (type == Primitive::kPrimFloat) {
4876 if (isR6) {
4877 switch (cond) {
4878 case kCondEQ:
4879 __ CmpEqS(FTMP, lhs, rhs);
4880 __ Bc1nez(FTMP, label);
4881 break;
4882 case kCondNE:
4883 __ CmpEqS(FTMP, lhs, rhs);
4884 __ Bc1eqz(FTMP, label);
4885 break;
4886 case kCondLT:
4887 if (gt_bias) {
4888 __ CmpLtS(FTMP, lhs, rhs);
4889 } else {
4890 __ CmpUltS(FTMP, lhs, rhs);
4891 }
4892 __ Bc1nez(FTMP, label);
4893 break;
4894 case kCondLE:
4895 if (gt_bias) {
4896 __ CmpLeS(FTMP, lhs, rhs);
4897 } else {
4898 __ CmpUleS(FTMP, lhs, rhs);
4899 }
4900 __ Bc1nez(FTMP, label);
4901 break;
4902 case kCondGT:
4903 if (gt_bias) {
4904 __ CmpUltS(FTMP, rhs, lhs);
4905 } else {
4906 __ CmpLtS(FTMP, rhs, lhs);
4907 }
4908 __ Bc1nez(FTMP, label);
4909 break;
4910 case kCondGE:
4911 if (gt_bias) {
4912 __ CmpUleS(FTMP, rhs, lhs);
4913 } else {
4914 __ CmpLeS(FTMP, rhs, lhs);
4915 }
4916 __ Bc1nez(FTMP, label);
4917 break;
4918 default:
4919 LOG(FATAL) << "Unexpected non-floating-point condition";
4920 UNREACHABLE();
4921 }
4922 } else {
4923 switch (cond) {
4924 case kCondEQ:
4925 __ CeqS(0, lhs, rhs);
4926 __ Bc1t(0, label);
4927 break;
4928 case kCondNE:
4929 __ CeqS(0, lhs, rhs);
4930 __ Bc1f(0, label);
4931 break;
4932 case kCondLT:
4933 if (gt_bias) {
4934 __ ColtS(0, lhs, rhs);
4935 } else {
4936 __ CultS(0, lhs, rhs);
4937 }
4938 __ Bc1t(0, label);
4939 break;
4940 case kCondLE:
4941 if (gt_bias) {
4942 __ ColeS(0, lhs, rhs);
4943 } else {
4944 __ CuleS(0, lhs, rhs);
4945 }
4946 __ Bc1t(0, label);
4947 break;
4948 case kCondGT:
4949 if (gt_bias) {
4950 __ CultS(0, rhs, lhs);
4951 } else {
4952 __ ColtS(0, rhs, lhs);
4953 }
4954 __ Bc1t(0, label);
4955 break;
4956 case kCondGE:
4957 if (gt_bias) {
4958 __ CuleS(0, rhs, lhs);
4959 } else {
4960 __ ColeS(0, rhs, lhs);
4961 }
4962 __ Bc1t(0, label);
4963 break;
4964 default:
4965 LOG(FATAL) << "Unexpected non-floating-point condition";
4966 UNREACHABLE();
4967 }
4968 }
4969 } else {
4970 DCHECK_EQ(type, Primitive::kPrimDouble);
4971 if (isR6) {
4972 switch (cond) {
4973 case kCondEQ:
4974 __ CmpEqD(FTMP, lhs, rhs);
4975 __ Bc1nez(FTMP, label);
4976 break;
4977 case kCondNE:
4978 __ CmpEqD(FTMP, lhs, rhs);
4979 __ Bc1eqz(FTMP, label);
4980 break;
4981 case kCondLT:
4982 if (gt_bias) {
4983 __ CmpLtD(FTMP, lhs, rhs);
4984 } else {
4985 __ CmpUltD(FTMP, lhs, rhs);
4986 }
4987 __ Bc1nez(FTMP, label);
4988 break;
4989 case kCondLE:
4990 if (gt_bias) {
4991 __ CmpLeD(FTMP, lhs, rhs);
4992 } else {
4993 __ CmpUleD(FTMP, lhs, rhs);
4994 }
4995 __ Bc1nez(FTMP, label);
4996 break;
4997 case kCondGT:
4998 if (gt_bias) {
4999 __ CmpUltD(FTMP, rhs, lhs);
5000 } else {
5001 __ CmpLtD(FTMP, rhs, lhs);
5002 }
5003 __ Bc1nez(FTMP, label);
5004 break;
5005 case kCondGE:
5006 if (gt_bias) {
5007 __ CmpUleD(FTMP, rhs, lhs);
5008 } else {
5009 __ CmpLeD(FTMP, rhs, lhs);
5010 }
5011 __ Bc1nez(FTMP, label);
5012 break;
5013 default:
5014 LOG(FATAL) << "Unexpected non-floating-point condition";
5015 UNREACHABLE();
5016 }
5017 } else {
5018 switch (cond) {
5019 case kCondEQ:
5020 __ CeqD(0, lhs, rhs);
5021 __ Bc1t(0, label);
5022 break;
5023 case kCondNE:
5024 __ CeqD(0, lhs, rhs);
5025 __ Bc1f(0, label);
5026 break;
5027 case kCondLT:
5028 if (gt_bias) {
5029 __ ColtD(0, lhs, rhs);
5030 } else {
5031 __ CultD(0, lhs, rhs);
5032 }
5033 __ Bc1t(0, label);
5034 break;
5035 case kCondLE:
5036 if (gt_bias) {
5037 __ ColeD(0, lhs, rhs);
5038 } else {
5039 __ CuleD(0, lhs, rhs);
5040 }
5041 __ Bc1t(0, label);
5042 break;
5043 case kCondGT:
5044 if (gt_bias) {
5045 __ CultD(0, rhs, lhs);
5046 } else {
5047 __ ColtD(0, rhs, lhs);
5048 }
5049 __ Bc1t(0, label);
5050 break;
5051 case kCondGE:
5052 if (gt_bias) {
5053 __ CuleD(0, rhs, lhs);
5054 } else {
5055 __ ColeD(0, rhs, lhs);
5056 }
5057 __ Bc1t(0, label);
5058 break;
5059 default:
5060 LOG(FATAL) << "Unexpected non-floating-point condition";
5061 UNREACHABLE();
5062 }
5063 }
5064 }
5065 }
5066
GenerateTestAndBranch(HInstruction * instruction,size_t condition_input_index,MipsLabel * true_target,MipsLabel * false_target)5067 void InstructionCodeGeneratorMIPS::GenerateTestAndBranch(HInstruction* instruction,
5068 size_t condition_input_index,
5069 MipsLabel* true_target,
5070 MipsLabel* false_target) {
5071 HInstruction* cond = instruction->InputAt(condition_input_index);
5072
5073 if (true_target == nullptr && false_target == nullptr) {
5074 // Nothing to do. The code always falls through.
5075 return;
5076 } else if (cond->IsIntConstant()) {
5077 // Constant condition, statically compared against "true" (integer value 1).
5078 if (cond->AsIntConstant()->IsTrue()) {
5079 if (true_target != nullptr) {
5080 __ B(true_target);
5081 }
5082 } else {
5083 DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue();
5084 if (false_target != nullptr) {
5085 __ B(false_target);
5086 }
5087 }
5088 return;
5089 }
5090
5091 // The following code generates these patterns:
5092 // (1) true_target == nullptr && false_target != nullptr
5093 // - opposite condition true => branch to false_target
5094 // (2) true_target != nullptr && false_target == nullptr
5095 // - condition true => branch to true_target
5096 // (3) true_target != nullptr && false_target != nullptr
5097 // - condition true => branch to true_target
5098 // - branch to false_target
5099 if (IsBooleanValueOrMaterializedCondition(cond)) {
5100 // The condition instruction has been materialized, compare the output to 0.
5101 Location cond_val = instruction->GetLocations()->InAt(condition_input_index);
5102 DCHECK(cond_val.IsRegister());
5103 if (true_target == nullptr) {
5104 __ Beqz(cond_val.AsRegister<Register>(), false_target);
5105 } else {
5106 __ Bnez(cond_val.AsRegister<Register>(), true_target);
5107 }
5108 } else {
5109 // The condition instruction has not been materialized, use its inputs as
5110 // the comparison and its condition as the branch condition.
5111 HCondition* condition = cond->AsCondition();
5112 Primitive::Type type = condition->InputAt(0)->GetType();
5113 LocationSummary* locations = cond->GetLocations();
5114 IfCondition if_cond = condition->GetCondition();
5115 MipsLabel* branch_target = true_target;
5116
5117 if (true_target == nullptr) {
5118 if_cond = condition->GetOppositeCondition();
5119 branch_target = false_target;
5120 }
5121
5122 switch (type) {
5123 default:
5124 GenerateIntCompareAndBranch(if_cond, locations, branch_target);
5125 break;
5126 case Primitive::kPrimLong:
5127 GenerateLongCompareAndBranch(if_cond, locations, branch_target);
5128 break;
5129 case Primitive::kPrimFloat:
5130 case Primitive::kPrimDouble:
5131 GenerateFpCompareAndBranch(if_cond, condition->IsGtBias(), type, locations, branch_target);
5132 break;
5133 }
5134 }
5135
5136 // If neither branch falls through (case 3), the conditional branch to `true_target`
5137 // was already emitted (case 2) and we need to emit a jump to `false_target`.
5138 if (true_target != nullptr && false_target != nullptr) {
5139 __ B(false_target);
5140 }
5141 }
5142
VisitIf(HIf * if_instr)5143 void LocationsBuilderMIPS::VisitIf(HIf* if_instr) {
5144 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
5145 if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
5146 locations->SetInAt(0, Location::RequiresRegister());
5147 }
5148 }
5149
VisitIf(HIf * if_instr)5150 void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) {
5151 HBasicBlock* true_successor = if_instr->IfTrueSuccessor();
5152 HBasicBlock* false_successor = if_instr->IfFalseSuccessor();
5153 MipsLabel* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ?
5154 nullptr : codegen_->GetLabelOf(true_successor);
5155 MipsLabel* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
5156 nullptr : codegen_->GetLabelOf(false_successor);
5157 GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
5158 }
5159
VisitDeoptimize(HDeoptimize * deoptimize)5160 void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
5161 LocationSummary* locations = new (GetGraph()->GetArena())
5162 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
5163 InvokeRuntimeCallingConvention calling_convention;
5164 RegisterSet caller_saves = RegisterSet::Empty();
5165 caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
5166 locations->SetCustomSlowPathCallerSaves(caller_saves);
5167 if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
5168 locations->SetInAt(0, Location::RequiresRegister());
5169 }
5170 }
5171
VisitDeoptimize(HDeoptimize * deoptimize)5172 void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
5173 SlowPathCodeMIPS* slow_path =
5174 deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS>(deoptimize);
5175 GenerateTestAndBranch(deoptimize,
5176 /* condition_input_index */ 0,
5177 slow_path->GetEntryLabel(),
5178 /* false_target */ nullptr);
5179 }
5180
5181 // This function returns true if a conditional move can be generated for HSelect.
5182 // Otherwise it returns false and HSelect must be implemented in terms of conditonal
5183 // branches and regular moves.
5184 //
5185 // If `locations_to_set` isn't nullptr, its inputs and outputs are set for HSelect.
5186 //
5187 // While determining feasibility of a conditional move and setting inputs/outputs
5188 // are two distinct tasks, this function does both because they share quite a bit
5189 // of common logic.
CanMoveConditionally(HSelect * select,bool is_r6,LocationSummary * locations_to_set)5190 static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* locations_to_set) {
5191 bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
5192 HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
5193 HCondition* condition = cond->AsCondition();
5194
5195 Primitive::Type cond_type = materialized ? Primitive::kPrimInt : condition->InputAt(0)->GetType();
5196 Primitive::Type dst_type = select->GetType();
5197
5198 HConstant* cst_true_value = select->GetTrueValue()->AsConstant();
5199 HConstant* cst_false_value = select->GetFalseValue()->AsConstant();
5200 bool is_true_value_zero_constant =
5201 (cst_true_value != nullptr && cst_true_value->IsZeroBitPattern());
5202 bool is_false_value_zero_constant =
5203 (cst_false_value != nullptr && cst_false_value->IsZeroBitPattern());
5204
5205 bool can_move_conditionally = false;
5206 bool use_const_for_false_in = false;
5207 bool use_const_for_true_in = false;
5208
5209 if (!cond->IsConstant()) {
5210 switch (cond_type) {
5211 default:
5212 switch (dst_type) {
5213 default:
5214 // Moving int on int condition.
5215 if (is_r6) {
5216 if (is_true_value_zero_constant) {
5217 // seleqz out_reg, false_reg, cond_reg
5218 can_move_conditionally = true;
5219 use_const_for_true_in = true;
5220 } else if (is_false_value_zero_constant) {
5221 // selnez out_reg, true_reg, cond_reg
5222 can_move_conditionally = true;
5223 use_const_for_false_in = true;
5224 } else if (materialized) {
5225 // Not materializing unmaterialized int conditions
5226 // to keep the instruction count low.
5227 // selnez AT, true_reg, cond_reg
5228 // seleqz TMP, false_reg, cond_reg
5229 // or out_reg, AT, TMP
5230 can_move_conditionally = true;
5231 }
5232 } else {
5233 // movn out_reg, true_reg/ZERO, cond_reg
5234 can_move_conditionally = true;
5235 use_const_for_true_in = is_true_value_zero_constant;
5236 }
5237 break;
5238 case Primitive::kPrimLong:
5239 // Moving long on int condition.
5240 if (is_r6) {
5241 if (is_true_value_zero_constant) {
5242 // seleqz out_reg_lo, false_reg_lo, cond_reg
5243 // seleqz out_reg_hi, false_reg_hi, cond_reg
5244 can_move_conditionally = true;
5245 use_const_for_true_in = true;
5246 } else if (is_false_value_zero_constant) {
5247 // selnez out_reg_lo, true_reg_lo, cond_reg
5248 // selnez out_reg_hi, true_reg_hi, cond_reg
5249 can_move_conditionally = true;
5250 use_const_for_false_in = true;
5251 }
5252 // Other long conditional moves would generate 6+ instructions,
5253 // which is too many.
5254 } else {
5255 // movn out_reg_lo, true_reg_lo/ZERO, cond_reg
5256 // movn out_reg_hi, true_reg_hi/ZERO, cond_reg
5257 can_move_conditionally = true;
5258 use_const_for_true_in = is_true_value_zero_constant;
5259 }
5260 break;
5261 case Primitive::kPrimFloat:
5262 case Primitive::kPrimDouble:
5263 // Moving float/double on int condition.
5264 if (is_r6) {
5265 if (materialized) {
5266 // Not materializing unmaterialized int conditions
5267 // to keep the instruction count low.
5268 can_move_conditionally = true;
5269 if (is_true_value_zero_constant) {
5270 // sltu TMP, ZERO, cond_reg
5271 // mtc1 TMP, temp_cond_reg
5272 // seleqz.fmt out_reg, false_reg, temp_cond_reg
5273 use_const_for_true_in = true;
5274 } else if (is_false_value_zero_constant) {
5275 // sltu TMP, ZERO, cond_reg
5276 // mtc1 TMP, temp_cond_reg
5277 // selnez.fmt out_reg, true_reg, temp_cond_reg
5278 use_const_for_false_in = true;
5279 } else {
5280 // sltu TMP, ZERO, cond_reg
5281 // mtc1 TMP, temp_cond_reg
5282 // sel.fmt temp_cond_reg, false_reg, true_reg
5283 // mov.fmt out_reg, temp_cond_reg
5284 }
5285 }
5286 } else {
5287 // movn.fmt out_reg, true_reg, cond_reg
5288 can_move_conditionally = true;
5289 }
5290 break;
5291 }
5292 break;
5293 case Primitive::kPrimLong:
5294 // We don't materialize long comparison now
5295 // and use conditional branches instead.
5296 break;
5297 case Primitive::kPrimFloat:
5298 case Primitive::kPrimDouble:
5299 switch (dst_type) {
5300 default:
5301 // Moving int on float/double condition.
5302 if (is_r6) {
5303 if (is_true_value_zero_constant) {
5304 // mfc1 TMP, temp_cond_reg
5305 // seleqz out_reg, false_reg, TMP
5306 can_move_conditionally = true;
5307 use_const_for_true_in = true;
5308 } else if (is_false_value_zero_constant) {
5309 // mfc1 TMP, temp_cond_reg
5310 // selnez out_reg, true_reg, TMP
5311 can_move_conditionally = true;
5312 use_const_for_false_in = true;
5313 } else {
5314 // mfc1 TMP, temp_cond_reg
5315 // selnez AT, true_reg, TMP
5316 // seleqz TMP, false_reg, TMP
5317 // or out_reg, AT, TMP
5318 can_move_conditionally = true;
5319 }
5320 } else {
5321 // movt out_reg, true_reg/ZERO, cc
5322 can_move_conditionally = true;
5323 use_const_for_true_in = is_true_value_zero_constant;
5324 }
5325 break;
5326 case Primitive::kPrimLong:
5327 // Moving long on float/double condition.
5328 if (is_r6) {
5329 if (is_true_value_zero_constant) {
5330 // mfc1 TMP, temp_cond_reg
5331 // seleqz out_reg_lo, false_reg_lo, TMP
5332 // seleqz out_reg_hi, false_reg_hi, TMP
5333 can_move_conditionally = true;
5334 use_const_for_true_in = true;
5335 } else if (is_false_value_zero_constant) {
5336 // mfc1 TMP, temp_cond_reg
5337 // selnez out_reg_lo, true_reg_lo, TMP
5338 // selnez out_reg_hi, true_reg_hi, TMP
5339 can_move_conditionally = true;
5340 use_const_for_false_in = true;
5341 }
5342 // Other long conditional moves would generate 6+ instructions,
5343 // which is too many.
5344 } else {
5345 // movt out_reg_lo, true_reg_lo/ZERO, cc
5346 // movt out_reg_hi, true_reg_hi/ZERO, cc
5347 can_move_conditionally = true;
5348 use_const_for_true_in = is_true_value_zero_constant;
5349 }
5350 break;
5351 case Primitive::kPrimFloat:
5352 case Primitive::kPrimDouble:
5353 // Moving float/double on float/double condition.
5354 if (is_r6) {
5355 can_move_conditionally = true;
5356 if (is_true_value_zero_constant) {
5357 // seleqz.fmt out_reg, false_reg, temp_cond_reg
5358 use_const_for_true_in = true;
5359 } else if (is_false_value_zero_constant) {
5360 // selnez.fmt out_reg, true_reg, temp_cond_reg
5361 use_const_for_false_in = true;
5362 } else {
5363 // sel.fmt temp_cond_reg, false_reg, true_reg
5364 // mov.fmt out_reg, temp_cond_reg
5365 }
5366 } else {
5367 // movt.fmt out_reg, true_reg, cc
5368 can_move_conditionally = true;
5369 }
5370 break;
5371 }
5372 break;
5373 }
5374 }
5375
5376 if (can_move_conditionally) {
5377 DCHECK(!use_const_for_false_in || !use_const_for_true_in);
5378 } else {
5379 DCHECK(!use_const_for_false_in);
5380 DCHECK(!use_const_for_true_in);
5381 }
5382
5383 if (locations_to_set != nullptr) {
5384 if (use_const_for_false_in) {
5385 locations_to_set->SetInAt(0, Location::ConstantLocation(cst_false_value));
5386 } else {
5387 locations_to_set->SetInAt(0,
5388 Primitive::IsFloatingPointType(dst_type)
5389 ? Location::RequiresFpuRegister()
5390 : Location::RequiresRegister());
5391 }
5392 if (use_const_for_true_in) {
5393 locations_to_set->SetInAt(1, Location::ConstantLocation(cst_true_value));
5394 } else {
5395 locations_to_set->SetInAt(1,
5396 Primitive::IsFloatingPointType(dst_type)
5397 ? Location::RequiresFpuRegister()
5398 : Location::RequiresRegister());
5399 }
5400 if (materialized) {
5401 locations_to_set->SetInAt(2, Location::RequiresRegister());
5402 }
5403 // On R6 we don't require the output to be the same as the
5404 // first input for conditional moves unlike on R2.
5405 bool is_out_same_as_first_in = !can_move_conditionally || !is_r6;
5406 if (is_out_same_as_first_in) {
5407 locations_to_set->SetOut(Location::SameAsFirstInput());
5408 } else {
5409 locations_to_set->SetOut(Primitive::IsFloatingPointType(dst_type)
5410 ? Location::RequiresFpuRegister()
5411 : Location::RequiresRegister());
5412 }
5413 }
5414
5415 return can_move_conditionally;
5416 }
5417
GenConditionalMoveR2(HSelect * select)5418 void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) {
5419 LocationSummary* locations = select->GetLocations();
5420 Location dst = locations->Out();
5421 Location src = locations->InAt(1);
5422 Register src_reg = ZERO;
5423 Register src_reg_high = ZERO;
5424 HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
5425 Register cond_reg = TMP;
5426 int cond_cc = 0;
5427 Primitive::Type cond_type = Primitive::kPrimInt;
5428 bool cond_inverted = false;
5429 Primitive::Type dst_type = select->GetType();
5430
5431 if (IsBooleanValueOrMaterializedCondition(cond)) {
5432 cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
5433 } else {
5434 HCondition* condition = cond->AsCondition();
5435 LocationSummary* cond_locations = cond->GetLocations();
5436 IfCondition if_cond = condition->GetCondition();
5437 cond_type = condition->InputAt(0)->GetType();
5438 switch (cond_type) {
5439 default:
5440 DCHECK_NE(cond_type, Primitive::kPrimLong);
5441 cond_inverted = MaterializeIntCompare(if_cond, cond_locations, cond_reg);
5442 break;
5443 case Primitive::kPrimFloat:
5444 case Primitive::kPrimDouble:
5445 cond_inverted = MaterializeFpCompareR2(if_cond,
5446 condition->IsGtBias(),
5447 cond_type,
5448 cond_locations,
5449 cond_cc);
5450 break;
5451 }
5452 }
5453
5454 DCHECK(dst.Equals(locations->InAt(0)));
5455 if (src.IsRegister()) {
5456 src_reg = src.AsRegister<Register>();
5457 } else if (src.IsRegisterPair()) {
5458 src_reg = src.AsRegisterPairLow<Register>();
5459 src_reg_high = src.AsRegisterPairHigh<Register>();
5460 } else if (src.IsConstant()) {
5461 DCHECK(src.GetConstant()->IsZeroBitPattern());
5462 }
5463
5464 switch (cond_type) {
5465 default:
5466 switch (dst_type) {
5467 default:
5468 if (cond_inverted) {
5469 __ Movz(dst.AsRegister<Register>(), src_reg, cond_reg);
5470 } else {
5471 __ Movn(dst.AsRegister<Register>(), src_reg, cond_reg);
5472 }
5473 break;
5474 case Primitive::kPrimLong:
5475 if (cond_inverted) {
5476 __ Movz(dst.AsRegisterPairLow<Register>(), src_reg, cond_reg);
5477 __ Movz(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_reg);
5478 } else {
5479 __ Movn(dst.AsRegisterPairLow<Register>(), src_reg, cond_reg);
5480 __ Movn(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_reg);
5481 }
5482 break;
5483 case Primitive::kPrimFloat:
5484 if (cond_inverted) {
5485 __ MovzS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
5486 } else {
5487 __ MovnS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
5488 }
5489 break;
5490 case Primitive::kPrimDouble:
5491 if (cond_inverted) {
5492 __ MovzD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
5493 } else {
5494 __ MovnD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
5495 }
5496 break;
5497 }
5498 break;
5499 case Primitive::kPrimLong:
5500 LOG(FATAL) << "Unreachable";
5501 UNREACHABLE();
5502 case Primitive::kPrimFloat:
5503 case Primitive::kPrimDouble:
5504 switch (dst_type) {
5505 default:
5506 if (cond_inverted) {
5507 __ Movf(dst.AsRegister<Register>(), src_reg, cond_cc);
5508 } else {
5509 __ Movt(dst.AsRegister<Register>(), src_reg, cond_cc);
5510 }
5511 break;
5512 case Primitive::kPrimLong:
5513 if (cond_inverted) {
5514 __ Movf(dst.AsRegisterPairLow<Register>(), src_reg, cond_cc);
5515 __ Movf(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_cc);
5516 } else {
5517 __ Movt(dst.AsRegisterPairLow<Register>(), src_reg, cond_cc);
5518 __ Movt(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_cc);
5519 }
5520 break;
5521 case Primitive::kPrimFloat:
5522 if (cond_inverted) {
5523 __ MovfS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
5524 } else {
5525 __ MovtS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
5526 }
5527 break;
5528 case Primitive::kPrimDouble:
5529 if (cond_inverted) {
5530 __ MovfD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
5531 } else {
5532 __ MovtD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
5533 }
5534 break;
5535 }
5536 break;
5537 }
5538 }
5539
GenConditionalMoveR6(HSelect * select)5540 void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) {
5541 LocationSummary* locations = select->GetLocations();
5542 Location dst = locations->Out();
5543 Location false_src = locations->InAt(0);
5544 Location true_src = locations->InAt(1);
5545 HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
5546 Register cond_reg = TMP;
5547 FRegister fcond_reg = FTMP;
5548 Primitive::Type cond_type = Primitive::kPrimInt;
5549 bool cond_inverted = false;
5550 Primitive::Type dst_type = select->GetType();
5551
5552 if (IsBooleanValueOrMaterializedCondition(cond)) {
5553 cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
5554 } else {
5555 HCondition* condition = cond->AsCondition();
5556 LocationSummary* cond_locations = cond->GetLocations();
5557 IfCondition if_cond = condition->GetCondition();
5558 cond_type = condition->InputAt(0)->GetType();
5559 switch (cond_type) {
5560 default:
5561 DCHECK_NE(cond_type, Primitive::kPrimLong);
5562 cond_inverted = MaterializeIntCompare(if_cond, cond_locations, cond_reg);
5563 break;
5564 case Primitive::kPrimFloat:
5565 case Primitive::kPrimDouble:
5566 cond_inverted = MaterializeFpCompareR6(if_cond,
5567 condition->IsGtBias(),
5568 cond_type,
5569 cond_locations,
5570 fcond_reg);
5571 break;
5572 }
5573 }
5574
5575 if (true_src.IsConstant()) {
5576 DCHECK(true_src.GetConstant()->IsZeroBitPattern());
5577 }
5578 if (false_src.IsConstant()) {
5579 DCHECK(false_src.GetConstant()->IsZeroBitPattern());
5580 }
5581
5582 switch (dst_type) {
5583 default:
5584 if (Primitive::IsFloatingPointType(cond_type)) {
5585 __ Mfc1(cond_reg, fcond_reg);
5586 }
5587 if (true_src.IsConstant()) {
5588 if (cond_inverted) {
5589 __ Selnez(dst.AsRegister<Register>(), false_src.AsRegister<Register>(), cond_reg);
5590 } else {
5591 __ Seleqz(dst.AsRegister<Register>(), false_src.AsRegister<Register>(), cond_reg);
5592 }
5593 } else if (false_src.IsConstant()) {
5594 if (cond_inverted) {
5595 __ Seleqz(dst.AsRegister<Register>(), true_src.AsRegister<Register>(), cond_reg);
5596 } else {
5597 __ Selnez(dst.AsRegister<Register>(), true_src.AsRegister<Register>(), cond_reg);
5598 }
5599 } else {
5600 DCHECK_NE(cond_reg, AT);
5601 if (cond_inverted) {
5602 __ Seleqz(AT, true_src.AsRegister<Register>(), cond_reg);
5603 __ Selnez(TMP, false_src.AsRegister<Register>(), cond_reg);
5604 } else {
5605 __ Selnez(AT, true_src.AsRegister<Register>(), cond_reg);
5606 __ Seleqz(TMP, false_src.AsRegister<Register>(), cond_reg);
5607 }
5608 __ Or(dst.AsRegister<Register>(), AT, TMP);
5609 }
5610 break;
5611 case Primitive::kPrimLong: {
5612 if (Primitive::IsFloatingPointType(cond_type)) {
5613 __ Mfc1(cond_reg, fcond_reg);
5614 }
5615 Register dst_lo = dst.AsRegisterPairLow<Register>();
5616 Register dst_hi = dst.AsRegisterPairHigh<Register>();
5617 if (true_src.IsConstant()) {
5618 Register src_lo = false_src.AsRegisterPairLow<Register>();
5619 Register src_hi = false_src.AsRegisterPairHigh<Register>();
5620 if (cond_inverted) {
5621 __ Selnez(dst_lo, src_lo, cond_reg);
5622 __ Selnez(dst_hi, src_hi, cond_reg);
5623 } else {
5624 __ Seleqz(dst_lo, src_lo, cond_reg);
5625 __ Seleqz(dst_hi, src_hi, cond_reg);
5626 }
5627 } else {
5628 DCHECK(false_src.IsConstant());
5629 Register src_lo = true_src.AsRegisterPairLow<Register>();
5630 Register src_hi = true_src.AsRegisterPairHigh<Register>();
5631 if (cond_inverted) {
5632 __ Seleqz(dst_lo, src_lo, cond_reg);
5633 __ Seleqz(dst_hi, src_hi, cond_reg);
5634 } else {
5635 __ Selnez(dst_lo, src_lo, cond_reg);
5636 __ Selnez(dst_hi, src_hi, cond_reg);
5637 }
5638 }
5639 break;
5640 }
5641 case Primitive::kPrimFloat: {
5642 if (!Primitive::IsFloatingPointType(cond_type)) {
5643 // sel*.fmt tests bit 0 of the condition register, account for that.
5644 __ Sltu(TMP, ZERO, cond_reg);
5645 __ Mtc1(TMP, fcond_reg);
5646 }
5647 FRegister dst_reg = dst.AsFpuRegister<FRegister>();
5648 if (true_src.IsConstant()) {
5649 FRegister src_reg = false_src.AsFpuRegister<FRegister>();
5650 if (cond_inverted) {
5651 __ SelnezS(dst_reg, src_reg, fcond_reg);
5652 } else {
5653 __ SeleqzS(dst_reg, src_reg, fcond_reg);
5654 }
5655 } else if (false_src.IsConstant()) {
5656 FRegister src_reg = true_src.AsFpuRegister<FRegister>();
5657 if (cond_inverted) {
5658 __ SeleqzS(dst_reg, src_reg, fcond_reg);
5659 } else {
5660 __ SelnezS(dst_reg, src_reg, fcond_reg);
5661 }
5662 } else {
5663 if (cond_inverted) {
5664 __ SelS(fcond_reg,
5665 true_src.AsFpuRegister<FRegister>(),
5666 false_src.AsFpuRegister<FRegister>());
5667 } else {
5668 __ SelS(fcond_reg,
5669 false_src.AsFpuRegister<FRegister>(),
5670 true_src.AsFpuRegister<FRegister>());
5671 }
5672 __ MovS(dst_reg, fcond_reg);
5673 }
5674 break;
5675 }
5676 case Primitive::kPrimDouble: {
5677 if (!Primitive::IsFloatingPointType(cond_type)) {
5678 // sel*.fmt tests bit 0 of the condition register, account for that.
5679 __ Sltu(TMP, ZERO, cond_reg);
5680 __ Mtc1(TMP, fcond_reg);
5681 }
5682 FRegister dst_reg = dst.AsFpuRegister<FRegister>();
5683 if (true_src.IsConstant()) {
5684 FRegister src_reg = false_src.AsFpuRegister<FRegister>();
5685 if (cond_inverted) {
5686 __ SelnezD(dst_reg, src_reg, fcond_reg);
5687 } else {
5688 __ SeleqzD(dst_reg, src_reg, fcond_reg);
5689 }
5690 } else if (false_src.IsConstant()) {
5691 FRegister src_reg = true_src.AsFpuRegister<FRegister>();
5692 if (cond_inverted) {
5693 __ SeleqzD(dst_reg, src_reg, fcond_reg);
5694 } else {
5695 __ SelnezD(dst_reg, src_reg, fcond_reg);
5696 }
5697 } else {
5698 if (cond_inverted) {
5699 __ SelD(fcond_reg,
5700 true_src.AsFpuRegister<FRegister>(),
5701 false_src.AsFpuRegister<FRegister>());
5702 } else {
5703 __ SelD(fcond_reg,
5704 false_src.AsFpuRegister<FRegister>(),
5705 true_src.AsFpuRegister<FRegister>());
5706 }
5707 __ MovD(dst_reg, fcond_reg);
5708 }
5709 break;
5710 }
5711 }
5712 }
5713
VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag * flag)5714 void LocationsBuilderMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
5715 LocationSummary* locations = new (GetGraph()->GetArena())
5716 LocationSummary(flag, LocationSummary::kNoCall);
5717 locations->SetOut(Location::RequiresRegister());
5718 }
5719
VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag * flag)5720 void InstructionCodeGeneratorMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
5721 __ LoadFromOffset(kLoadWord,
5722 flag->GetLocations()->Out().AsRegister<Register>(),
5723 SP,
5724 codegen_->GetStackOffsetOfShouldDeoptimizeFlag());
5725 }
5726
VisitSelect(HSelect * select)5727 void LocationsBuilderMIPS::VisitSelect(HSelect* select) {
5728 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
5729 CanMoveConditionally(select, codegen_->GetInstructionSetFeatures().IsR6(), locations);
5730 }
5731
VisitSelect(HSelect * select)5732 void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) {
5733 bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
5734 if (CanMoveConditionally(select, is_r6, /* locations_to_set */ nullptr)) {
5735 if (is_r6) {
5736 GenConditionalMoveR6(select);
5737 } else {
5738 GenConditionalMoveR2(select);
5739 }
5740 } else {
5741 LocationSummary* locations = select->GetLocations();
5742 MipsLabel false_target;
5743 GenerateTestAndBranch(select,
5744 /* condition_input_index */ 2,
5745 /* true_target */ nullptr,
5746 &false_target);
5747 codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
5748 __ Bind(&false_target);
5749 }
5750 }
5751
VisitNativeDebugInfo(HNativeDebugInfo * info)5752 void LocationsBuilderMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) {
5753 new (GetGraph()->GetArena()) LocationSummary(info);
5754 }
5755
VisitNativeDebugInfo(HNativeDebugInfo *)5756 void InstructionCodeGeneratorMIPS::VisitNativeDebugInfo(HNativeDebugInfo*) {
5757 // MaybeRecordNativeDebugInfo is already called implicitly in CodeGenerator::Compile.
5758 }
5759
GenerateNop()5760 void CodeGeneratorMIPS::GenerateNop() {
5761 __ Nop();
5762 }
5763
HandleFieldGet(HInstruction * instruction,const FieldInfo & field_info)5764 void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
5765 Primitive::Type field_type = field_info.GetFieldType();
5766 bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble);
5767 bool generate_volatile = field_info.IsVolatile() && is_wide;
5768 bool object_field_get_with_read_barrier =
5769 kEmitCompilerReadBarrier && (field_type == Primitive::kPrimNot);
5770 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
5771 instruction,
5772 generate_volatile
5773 ? LocationSummary::kCallOnMainOnly
5774 : (object_field_get_with_read_barrier
5775 ? LocationSummary::kCallOnSlowPath
5776 : LocationSummary::kNoCall));
5777
5778 locations->SetInAt(0, Location::RequiresRegister());
5779 if (generate_volatile) {
5780 InvokeRuntimeCallingConvention calling_convention;
5781 // need A0 to hold base + offset
5782 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
5783 if (field_type == Primitive::kPrimLong) {
5784 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimLong));
5785 } else {
5786 // Use Location::Any() to prevent situations when running out of available fp registers.
5787 locations->SetOut(Location::Any());
5788 // Need some temp core regs since FP results are returned in core registers
5789 Location reg = calling_convention.GetReturnLocation(Primitive::kPrimLong);
5790 locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairLow<Register>()));
5791 locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairHigh<Register>()));
5792 }
5793 } else {
5794 if (Primitive::IsFloatingPointType(instruction->GetType())) {
5795 locations->SetOut(Location::RequiresFpuRegister());
5796 } else {
5797 // The output overlaps in the case of an object field get with
5798 // read barriers enabled: we do not want the move to overwrite the
5799 // object's location, as we need it to emit the read barrier.
5800 locations->SetOut(Location::RequiresRegister(),
5801 object_field_get_with_read_barrier
5802 ? Location::kOutputOverlap
5803 : Location::kNoOutputOverlap);
5804 }
5805 if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
5806 // We need a temporary register for the read barrier marking slow
5807 // path in CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier.
5808 locations->AddTemp(Location::RequiresRegister());
5809 }
5810 }
5811 }
5812
HandleFieldGet(HInstruction * instruction,const FieldInfo & field_info,uint32_t dex_pc)5813 void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
5814 const FieldInfo& field_info,
5815 uint32_t dex_pc) {
5816 Primitive::Type type = field_info.GetFieldType();
5817 LocationSummary* locations = instruction->GetLocations();
5818 Location obj_loc = locations->InAt(0);
5819 Register obj = obj_loc.AsRegister<Register>();
5820 Location dst_loc = locations->Out();
5821 LoadOperandType load_type = kLoadUnsignedByte;
5822 bool is_volatile = field_info.IsVolatile();
5823 uint32_t offset = field_info.GetFieldOffset().Uint32Value();
5824 auto null_checker = GetImplicitNullChecker(instruction, codegen_);
5825
5826 switch (type) {
5827 case Primitive::kPrimBoolean:
5828 load_type = kLoadUnsignedByte;
5829 break;
5830 case Primitive::kPrimByte:
5831 load_type = kLoadSignedByte;
5832 break;
5833 case Primitive::kPrimShort:
5834 load_type = kLoadSignedHalfword;
5835 break;
5836 case Primitive::kPrimChar:
5837 load_type = kLoadUnsignedHalfword;
5838 break;
5839 case Primitive::kPrimInt:
5840 case Primitive::kPrimFloat:
5841 case Primitive::kPrimNot:
5842 load_type = kLoadWord;
5843 break;
5844 case Primitive::kPrimLong:
5845 case Primitive::kPrimDouble:
5846 load_type = kLoadDoubleword;
5847 break;
5848 case Primitive::kPrimVoid:
5849 LOG(FATAL) << "Unreachable type " << type;
5850 UNREACHABLE();
5851 }
5852
5853 if (is_volatile && load_type == kLoadDoubleword) {
5854 InvokeRuntimeCallingConvention calling_convention;
5855 __ Addiu32(locations->GetTemp(0).AsRegister<Register>(), obj, offset);
5856 // Do implicit Null check
5857 __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
5858 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
5859 codegen_->InvokeRuntime(kQuickA64Load, instruction, dex_pc);
5860 CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>();
5861 if (type == Primitive::kPrimDouble) {
5862 // FP results are returned in core registers. Need to move them.
5863 if (dst_loc.IsFpuRegister()) {
5864 __ Mtc1(locations->GetTemp(1).AsRegister<Register>(), dst_loc.AsFpuRegister<FRegister>());
5865 __ MoveToFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
5866 dst_loc.AsFpuRegister<FRegister>());
5867 } else {
5868 DCHECK(dst_loc.IsDoubleStackSlot());
5869 __ StoreToOffset(kStoreWord,
5870 locations->GetTemp(1).AsRegister<Register>(),
5871 SP,
5872 dst_loc.GetStackIndex());
5873 __ StoreToOffset(kStoreWord,
5874 locations->GetTemp(2).AsRegister<Register>(),
5875 SP,
5876 dst_loc.GetStackIndex() + 4);
5877 }
5878 }
5879 } else {
5880 if (type == Primitive::kPrimNot) {
5881 // /* HeapReference<Object> */ dst = *(obj + offset)
5882 if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
5883 Location temp_loc = locations->GetTemp(0);
5884 // Note that a potential implicit null check is handled in this
5885 // CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier call.
5886 codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
5887 dst_loc,
5888 obj,
5889 offset,
5890 temp_loc,
5891 /* needs_null_check */ true);
5892 if (is_volatile) {
5893 GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
5894 }
5895 } else {
5896 __ LoadFromOffset(kLoadWord, dst_loc.AsRegister<Register>(), obj, offset, null_checker);
5897 if (is_volatile) {
5898 GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
5899 }
5900 // If read barriers are enabled, emit read barriers other than
5901 // Baker's using a slow path (and also unpoison the loaded
5902 // reference, if heap poisoning is enabled).
5903 codegen_->MaybeGenerateReadBarrierSlow(instruction, dst_loc, dst_loc, obj_loc, offset);
5904 }
5905 } else if (!Primitive::IsFloatingPointType(type)) {
5906 Register dst;
5907 if (type == Primitive::kPrimLong) {
5908 DCHECK(dst_loc.IsRegisterPair());
5909 dst = dst_loc.AsRegisterPairLow<Register>();
5910 } else {
5911 DCHECK(dst_loc.IsRegister());
5912 dst = dst_loc.AsRegister<Register>();
5913 }
5914 __ LoadFromOffset(load_type, dst, obj, offset, null_checker);
5915 } else {
5916 DCHECK(dst_loc.IsFpuRegister());
5917 FRegister dst = dst_loc.AsFpuRegister<FRegister>();
5918 if (type == Primitive::kPrimFloat) {
5919 __ LoadSFromOffset(dst, obj, offset, null_checker);
5920 } else {
5921 __ LoadDFromOffset(dst, obj, offset, null_checker);
5922 }
5923 }
5924 }
5925
5926 // Memory barriers, in the case of references, are handled in the
5927 // previous switch statement.
5928 if (is_volatile && (type != Primitive::kPrimNot)) {
5929 GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
5930 }
5931 }
5932
HandleFieldSet(HInstruction * instruction,const FieldInfo & field_info)5933 void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
5934 Primitive::Type field_type = field_info.GetFieldType();
5935 bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble);
5936 bool generate_volatile = field_info.IsVolatile() && is_wide;
5937 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
5938 instruction, generate_volatile ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall);
5939
5940 locations->SetInAt(0, Location::RequiresRegister());
5941 if (generate_volatile) {
5942 InvokeRuntimeCallingConvention calling_convention;
5943 // need A0 to hold base + offset
5944 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
5945 if (field_type == Primitive::kPrimLong) {
5946 locations->SetInAt(1, Location::RegisterPairLocation(
5947 calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
5948 } else {
5949 // Use Location::Any() to prevent situations when running out of available fp registers.
5950 locations->SetInAt(1, Location::Any());
5951 // Pass FP parameters in core registers.
5952 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
5953 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
5954 }
5955 } else {
5956 if (Primitive::IsFloatingPointType(field_type)) {
5957 locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1)));
5958 } else {
5959 locations->SetInAt(1, RegisterOrZeroConstant(instruction->InputAt(1)));
5960 }
5961 }
5962 }
5963
HandleFieldSet(HInstruction * instruction,const FieldInfo & field_info,uint32_t dex_pc,bool value_can_be_null)5964 void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
5965 const FieldInfo& field_info,
5966 uint32_t dex_pc,
5967 bool value_can_be_null) {
5968 Primitive::Type type = field_info.GetFieldType();
5969 LocationSummary* locations = instruction->GetLocations();
5970 Register obj = locations->InAt(0).AsRegister<Register>();
5971 Location value_location = locations->InAt(1);
5972 StoreOperandType store_type = kStoreByte;
5973 bool is_volatile = field_info.IsVolatile();
5974 uint32_t offset = field_info.GetFieldOffset().Uint32Value();
5975 bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1));
5976 auto null_checker = GetImplicitNullChecker(instruction, codegen_);
5977
5978 switch (type) {
5979 case Primitive::kPrimBoolean:
5980 case Primitive::kPrimByte:
5981 store_type = kStoreByte;
5982 break;
5983 case Primitive::kPrimShort:
5984 case Primitive::kPrimChar:
5985 store_type = kStoreHalfword;
5986 break;
5987 case Primitive::kPrimInt:
5988 case Primitive::kPrimFloat:
5989 case Primitive::kPrimNot:
5990 store_type = kStoreWord;
5991 break;
5992 case Primitive::kPrimLong:
5993 case Primitive::kPrimDouble:
5994 store_type = kStoreDoubleword;
5995 break;
5996 case Primitive::kPrimVoid:
5997 LOG(FATAL) << "Unreachable type " << type;
5998 UNREACHABLE();
5999 }
6000
6001 if (is_volatile) {
6002 GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
6003 }
6004
6005 if (is_volatile && store_type == kStoreDoubleword) {
6006 InvokeRuntimeCallingConvention calling_convention;
6007 __ Addiu32(locations->GetTemp(0).AsRegister<Register>(), obj, offset);
6008 // Do implicit Null check.
6009 __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
6010 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
6011 if (type == Primitive::kPrimDouble) {
6012 // Pass FP parameters in core registers.
6013 if (value_location.IsFpuRegister()) {
6014 __ Mfc1(locations->GetTemp(1).AsRegister<Register>(),
6015 value_location.AsFpuRegister<FRegister>());
6016 __ MoveFromFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
6017 value_location.AsFpuRegister<FRegister>());
6018 } else if (value_location.IsDoubleStackSlot()) {
6019 __ LoadFromOffset(kLoadWord,
6020 locations->GetTemp(1).AsRegister<Register>(),
6021 SP,
6022 value_location.GetStackIndex());
6023 __ LoadFromOffset(kLoadWord,
6024 locations->GetTemp(2).AsRegister<Register>(),
6025 SP,
6026 value_location.GetStackIndex() + 4);
6027 } else {
6028 DCHECK(value_location.IsConstant());
6029 DCHECK(value_location.GetConstant()->IsDoubleConstant());
6030 int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
6031 __ LoadConst64(locations->GetTemp(2).AsRegister<Register>(),
6032 locations->GetTemp(1).AsRegister<Register>(),
6033 value);
6034 }
6035 }
6036 codegen_->InvokeRuntime(kQuickA64Store, instruction, dex_pc);
6037 CheckEntrypointTypes<kQuickA64Store, void, volatile int64_t *, int64_t>();
6038 } else {
6039 if (value_location.IsConstant()) {
6040 int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
6041 __ StoreConstToOffset(store_type, value, obj, offset, TMP, null_checker);
6042 } else if (!Primitive::IsFloatingPointType(type)) {
6043 Register src;
6044 if (type == Primitive::kPrimLong) {
6045 src = value_location.AsRegisterPairLow<Register>();
6046 } else {
6047 src = value_location.AsRegister<Register>();
6048 }
6049 if (kPoisonHeapReferences && needs_write_barrier) {
6050 // Note that in the case where `value` is a null reference,
6051 // we do not enter this block, as a null reference does not
6052 // need poisoning.
6053 DCHECK_EQ(type, Primitive::kPrimNot);
6054 __ PoisonHeapReference(TMP, src);
6055 __ StoreToOffset(store_type, TMP, obj, offset, null_checker);
6056 } else {
6057 __ StoreToOffset(store_type, src, obj, offset, null_checker);
6058 }
6059 } else {
6060 FRegister src = value_location.AsFpuRegister<FRegister>();
6061 if (type == Primitive::kPrimFloat) {
6062 __ StoreSToOffset(src, obj, offset, null_checker);
6063 } else {
6064 __ StoreDToOffset(src, obj, offset, null_checker);
6065 }
6066 }
6067 }
6068
6069 if (needs_write_barrier) {
6070 Register src = value_location.AsRegister<Register>();
6071 codegen_->MarkGCCard(obj, src, value_can_be_null);
6072 }
6073
6074 if (is_volatile) {
6075 GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
6076 }
6077 }
6078
VisitInstanceFieldGet(HInstanceFieldGet * instruction)6079 void LocationsBuilderMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
6080 HandleFieldGet(instruction, instruction->GetFieldInfo());
6081 }
6082
VisitInstanceFieldGet(HInstanceFieldGet * instruction)6083 void InstructionCodeGeneratorMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
6084 HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
6085 }
6086
VisitInstanceFieldSet(HInstanceFieldSet * instruction)6087 void LocationsBuilderMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
6088 HandleFieldSet(instruction, instruction->GetFieldInfo());
6089 }
6090
VisitInstanceFieldSet(HInstanceFieldSet * instruction)6091 void InstructionCodeGeneratorMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
6092 HandleFieldSet(instruction,
6093 instruction->GetFieldInfo(),
6094 instruction->GetDexPc(),
6095 instruction->GetValueCanBeNull());
6096 }
6097
GenerateReferenceLoadOneRegister(HInstruction * instruction,Location out,uint32_t offset,Location maybe_temp,ReadBarrierOption read_barrier_option)6098 void InstructionCodeGeneratorMIPS::GenerateReferenceLoadOneRegister(
6099 HInstruction* instruction,
6100 Location out,
6101 uint32_t offset,
6102 Location maybe_temp,
6103 ReadBarrierOption read_barrier_option) {
6104 Register out_reg = out.AsRegister<Register>();
6105 if (read_barrier_option == kWithReadBarrier) {
6106 CHECK(kEmitCompilerReadBarrier);
6107 DCHECK(maybe_temp.IsRegister()) << maybe_temp;
6108 if (kUseBakerReadBarrier) {
6109 // Load with fast path based Baker's read barrier.
6110 // /* HeapReference<Object> */ out = *(out + offset)
6111 codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
6112 out,
6113 out_reg,
6114 offset,
6115 maybe_temp,
6116 /* needs_null_check */ false);
6117 } else {
6118 // Load with slow path based read barrier.
6119 // Save the value of `out` into `maybe_temp` before overwriting it
6120 // in the following move operation, as we will need it for the
6121 // read barrier below.
6122 __ Move(maybe_temp.AsRegister<Register>(), out_reg);
6123 // /* HeapReference<Object> */ out = *(out + offset)
6124 __ LoadFromOffset(kLoadWord, out_reg, out_reg, offset);
6125 codegen_->GenerateReadBarrierSlow(instruction, out, out, maybe_temp, offset);
6126 }
6127 } else {
6128 // Plain load with no read barrier.
6129 // /* HeapReference<Object> */ out = *(out + offset)
6130 __ LoadFromOffset(kLoadWord, out_reg, out_reg, offset);
6131 __ MaybeUnpoisonHeapReference(out_reg);
6132 }
6133 }
6134
GenerateReferenceLoadTwoRegisters(HInstruction * instruction,Location out,Location obj,uint32_t offset,Location maybe_temp,ReadBarrierOption read_barrier_option)6135 void InstructionCodeGeneratorMIPS::GenerateReferenceLoadTwoRegisters(
6136 HInstruction* instruction,
6137 Location out,
6138 Location obj,
6139 uint32_t offset,
6140 Location maybe_temp,
6141 ReadBarrierOption read_barrier_option) {
6142 Register out_reg = out.AsRegister<Register>();
6143 Register obj_reg = obj.AsRegister<Register>();
6144 if (read_barrier_option == kWithReadBarrier) {
6145 CHECK(kEmitCompilerReadBarrier);
6146 if (kUseBakerReadBarrier) {
6147 DCHECK(maybe_temp.IsRegister()) << maybe_temp;
6148 // Load with fast path based Baker's read barrier.
6149 // /* HeapReference<Object> */ out = *(obj + offset)
6150 codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
6151 out,
6152 obj_reg,
6153 offset,
6154 maybe_temp,
6155 /* needs_null_check */ false);
6156 } else {
6157 // Load with slow path based read barrier.
6158 // /* HeapReference<Object> */ out = *(obj + offset)
6159 __ LoadFromOffset(kLoadWord, out_reg, obj_reg, offset);
6160 codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset);
6161 }
6162 } else {
6163 // Plain load with no read barrier.
6164 // /* HeapReference<Object> */ out = *(obj + offset)
6165 __ LoadFromOffset(kLoadWord, out_reg, obj_reg, offset);
6166 __ MaybeUnpoisonHeapReference(out_reg);
6167 }
6168 }
6169
GenerateGcRootFieldLoad(HInstruction * instruction,Location root,Register obj,uint32_t offset,ReadBarrierOption read_barrier_option)6170 void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(HInstruction* instruction,
6171 Location root,
6172 Register obj,
6173 uint32_t offset,
6174 ReadBarrierOption read_barrier_option) {
6175 Register root_reg = root.AsRegister<Register>();
6176 if (read_barrier_option == kWithReadBarrier) {
6177 DCHECK(kEmitCompilerReadBarrier);
6178 if (kUseBakerReadBarrier) {
6179 // Fast path implementation of art::ReadBarrier::BarrierForRoot when
6180 // Baker's read barrier are used:
6181 //
6182 // root = obj.field;
6183 // temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
6184 // if (temp != null) {
6185 // root = temp(root)
6186 // }
6187
6188 // /* GcRoot<mirror::Object> */ root = *(obj + offset)
6189 __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
6190 static_assert(
6191 sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
6192 "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
6193 "have different sizes.");
6194 static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
6195 "art::mirror::CompressedReference<mirror::Object> and int32_t "
6196 "have different sizes.");
6197
6198 // Slow path marking the GC root `root`.
6199 Location temp = Location::RegisterLocation(T9);
6200 SlowPathCodeMIPS* slow_path =
6201 new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(
6202 instruction,
6203 root,
6204 /*entrypoint*/ temp);
6205 codegen_->AddSlowPath(slow_path);
6206
6207 // temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
6208 const int32_t entry_point_offset =
6209 CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(root.reg() - 1);
6210 // Loading the entrypoint does not require a load acquire since it is only changed when
6211 // threads are suspended or running a checkpoint.
6212 __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), TR, entry_point_offset);
6213 // The entrypoint is null when the GC is not marking, this prevents one load compared to
6214 // checking GetIsGcMarking.
6215 __ Bnez(temp.AsRegister<Register>(), slow_path->GetEntryLabel());
6216 __ Bind(slow_path->GetExitLabel());
6217 } else {
6218 // GC root loaded through a slow path for read barriers other
6219 // than Baker's.
6220 // /* GcRoot<mirror::Object>* */ root = obj + offset
6221 __ Addiu32(root_reg, obj, offset);
6222 // /* mirror::Object* */ root = root->Read()
6223 codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
6224 }
6225 } else {
6226 // Plain GC root load with no read barrier.
6227 // /* GcRoot<mirror::Object> */ root = *(obj + offset)
6228 __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
6229 // Note that GC roots are not affected by heap poisoning, thus we
6230 // do not have to unpoison `root_reg` here.
6231 }
6232 }
6233
GenerateFieldLoadWithBakerReadBarrier(HInstruction * instruction,Location ref,Register obj,uint32_t offset,Location temp,bool needs_null_check)6234 void CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
6235 Location ref,
6236 Register obj,
6237 uint32_t offset,
6238 Location temp,
6239 bool needs_null_check) {
6240 DCHECK(kEmitCompilerReadBarrier);
6241 DCHECK(kUseBakerReadBarrier);
6242
6243 // /* HeapReference<Object> */ ref = *(obj + offset)
6244 Location no_index = Location::NoLocation();
6245 ScaleFactor no_scale_factor = TIMES_1;
6246 GenerateReferenceLoadWithBakerReadBarrier(instruction,
6247 ref,
6248 obj,
6249 offset,
6250 no_index,
6251 no_scale_factor,
6252 temp,
6253 needs_null_check);
6254 }
6255
GenerateArrayLoadWithBakerReadBarrier(HInstruction * instruction,Location ref,Register obj,uint32_t data_offset,Location index,Location temp,bool needs_null_check)6256 void CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
6257 Location ref,
6258 Register obj,
6259 uint32_t data_offset,
6260 Location index,
6261 Location temp,
6262 bool needs_null_check) {
6263 DCHECK(kEmitCompilerReadBarrier);
6264 DCHECK(kUseBakerReadBarrier);
6265
6266 static_assert(
6267 sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
6268 "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
6269 // /* HeapReference<Object> */ ref =
6270 // *(obj + data_offset + index * sizeof(HeapReference<Object>))
6271 ScaleFactor scale_factor = TIMES_4;
6272 GenerateReferenceLoadWithBakerReadBarrier(instruction,
6273 ref,
6274 obj,
6275 data_offset,
6276 index,
6277 scale_factor,
6278 temp,
6279 needs_null_check);
6280 }
6281
GenerateReferenceLoadWithBakerReadBarrier(HInstruction * instruction,Location ref,Register obj,uint32_t offset,Location index,ScaleFactor scale_factor,Location temp,bool needs_null_check,bool always_update_field)6282 void CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
6283 Location ref,
6284 Register obj,
6285 uint32_t offset,
6286 Location index,
6287 ScaleFactor scale_factor,
6288 Location temp,
6289 bool needs_null_check,
6290 bool always_update_field) {
6291 DCHECK(kEmitCompilerReadBarrier);
6292 DCHECK(kUseBakerReadBarrier);
6293
6294 // In slow path based read barriers, the read barrier call is
6295 // inserted after the original load. However, in fast path based
6296 // Baker's read barriers, we need to perform the load of
6297 // mirror::Object::monitor_ *before* the original reference load.
6298 // This load-load ordering is required by the read barrier.
6299 // The fast path/slow path (for Baker's algorithm) should look like:
6300 //
6301 // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
6302 // lfence; // Load fence or artificial data dependency to prevent load-load reordering
6303 // HeapReference<Object> ref = *src; // Original reference load.
6304 // bool is_gray = (rb_state == ReadBarrier::GrayState());
6305 // if (is_gray) {
6306 // ref = ReadBarrier::Mark(ref); // Performed by runtime entrypoint slow path.
6307 // }
6308 //
6309 // Note: the original implementation in ReadBarrier::Barrier is
6310 // slightly more complex as it performs additional checks that we do
6311 // not do here for performance reasons.
6312
6313 Register ref_reg = ref.AsRegister<Register>();
6314 Register temp_reg = temp.AsRegister<Register>();
6315 uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
6316
6317 // /* int32_t */ monitor = obj->monitor_
6318 __ LoadFromOffset(kLoadWord, temp_reg, obj, monitor_offset);
6319 if (needs_null_check) {
6320 MaybeRecordImplicitNullCheck(instruction);
6321 }
6322 // /* LockWord */ lock_word = LockWord(monitor)
6323 static_assert(sizeof(LockWord) == sizeof(int32_t),
6324 "art::LockWord and int32_t have different sizes.");
6325
6326 __ Sync(0); // Barrier to prevent load-load reordering.
6327
6328 // The actual reference load.
6329 if (index.IsValid()) {
6330 // Load types involving an "index": ArrayGet,
6331 // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
6332 // intrinsics.
6333 // /* HeapReference<Object> */ ref = *(obj + offset + (index << scale_factor))
6334 if (index.IsConstant()) {
6335 size_t computed_offset =
6336 (index.GetConstant()->AsIntConstant()->GetValue() << scale_factor) + offset;
6337 __ LoadFromOffset(kLoadWord, ref_reg, obj, computed_offset);
6338 } else {
6339 // Handle the special case of the
6340 // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
6341 // intrinsics, which use a register pair as index ("long
6342 // offset"), of which only the low part contains data.
6343 Register index_reg = index.IsRegisterPair()
6344 ? index.AsRegisterPairLow<Register>()
6345 : index.AsRegister<Register>();
6346 __ ShiftAndAdd(TMP, index_reg, obj, scale_factor, TMP);
6347 __ LoadFromOffset(kLoadWord, ref_reg, TMP, offset);
6348 }
6349 } else {
6350 // /* HeapReference<Object> */ ref = *(obj + offset)
6351 __ LoadFromOffset(kLoadWord, ref_reg, obj, offset);
6352 }
6353
6354 // Object* ref = ref_addr->AsMirrorPtr()
6355 __ MaybeUnpoisonHeapReference(ref_reg);
6356
6357 // Slow path marking the object `ref` when it is gray.
6358 SlowPathCodeMIPS* slow_path;
6359 if (always_update_field) {
6360 // ReadBarrierMarkAndUpdateFieldSlowPathMIPS only supports address
6361 // of the form `obj + field_offset`, where `obj` is a register and
6362 // `field_offset` is a register pair (of which only the lower half
6363 // is used). Thus `offset` and `scale_factor` above are expected
6364 // to be null in this code path.
6365 DCHECK_EQ(offset, 0u);
6366 DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
6367 slow_path = new (GetGraph()->GetArena())
6368 ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
6369 ref,
6370 obj,
6371 /* field_offset */ index,
6372 temp_reg);
6373 } else {
6374 slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
6375 }
6376 AddSlowPath(slow_path);
6377
6378 // if (rb_state == ReadBarrier::GrayState())
6379 // ref = ReadBarrier::Mark(ref);
6380 // Given the numeric representation, it's enough to check the low bit of the
6381 // rb_state. We do that by shifting the bit into the sign bit (31) and
6382 // performing a branch on less than zero.
6383 static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
6384 static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
6385 static_assert(LockWord::kReadBarrierStateSize == 1, "Expecting 1-bit read barrier state size");
6386 __ Sll(temp_reg, temp_reg, 31 - LockWord::kReadBarrierStateShift);
6387 __ Bltz(temp_reg, slow_path->GetEntryLabel());
6388 __ Bind(slow_path->GetExitLabel());
6389 }
6390
GenerateReadBarrierSlow(HInstruction * instruction,Location out,Location ref,Location obj,uint32_t offset,Location index)6391 void CodeGeneratorMIPS::GenerateReadBarrierSlow(HInstruction* instruction,
6392 Location out,
6393 Location ref,
6394 Location obj,
6395 uint32_t offset,
6396 Location index) {
6397 DCHECK(kEmitCompilerReadBarrier);
6398
6399 // Insert a slow path based read barrier *after* the reference load.
6400 //
6401 // If heap poisoning is enabled, the unpoisoning of the loaded
6402 // reference will be carried out by the runtime within the slow
6403 // path.
6404 //
6405 // Note that `ref` currently does not get unpoisoned (when heap
6406 // poisoning is enabled), which is alright as the `ref` argument is
6407 // not used by the artReadBarrierSlow entry point.
6408 //
6409 // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
6410 SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena())
6411 ReadBarrierForHeapReferenceSlowPathMIPS(instruction, out, ref, obj, offset, index);
6412 AddSlowPath(slow_path);
6413
6414 __ B(slow_path->GetEntryLabel());
6415 __ Bind(slow_path->GetExitLabel());
6416 }
6417
MaybeGenerateReadBarrierSlow(HInstruction * instruction,Location out,Location ref,Location obj,uint32_t offset,Location index)6418 void CodeGeneratorMIPS::MaybeGenerateReadBarrierSlow(HInstruction* instruction,
6419 Location out,
6420 Location ref,
6421 Location obj,
6422 uint32_t offset,
6423 Location index) {
6424 if (kEmitCompilerReadBarrier) {
6425 // Baker's read barriers shall be handled by the fast path
6426 // (CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier).
6427 DCHECK(!kUseBakerReadBarrier);
6428 // If heap poisoning is enabled, unpoisoning will be taken care of
6429 // by the runtime within the slow path.
6430 GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index);
6431 } else if (kPoisonHeapReferences) {
6432 __ UnpoisonHeapReference(out.AsRegister<Register>());
6433 }
6434 }
6435
GenerateReadBarrierForRootSlow(HInstruction * instruction,Location out,Location root)6436 void CodeGeneratorMIPS::GenerateReadBarrierForRootSlow(HInstruction* instruction,
6437 Location out,
6438 Location root) {
6439 DCHECK(kEmitCompilerReadBarrier);
6440
6441 // Insert a slow path based read barrier *after* the GC root load.
6442 //
6443 // Note that GC roots are not affected by heap poisoning, so we do
6444 // not need to do anything special for this here.
6445 SlowPathCodeMIPS* slow_path =
6446 new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
6447 AddSlowPath(slow_path);
6448
6449 __ B(slow_path->GetEntryLabel());
6450 __ Bind(slow_path->GetExitLabel());
6451 }
6452
VisitInstanceOf(HInstanceOf * instruction)6453 void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
6454 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
6455 TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
6456 switch (type_check_kind) {
6457 case TypeCheckKind::kExactCheck:
6458 case TypeCheckKind::kAbstractClassCheck:
6459 case TypeCheckKind::kClassHierarchyCheck:
6460 case TypeCheckKind::kArrayObjectCheck:
6461 call_kind =
6462 kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
6463 break;
6464 case TypeCheckKind::kArrayCheck:
6465 case TypeCheckKind::kUnresolvedCheck:
6466 case TypeCheckKind::kInterfaceCheck:
6467 call_kind = LocationSummary::kCallOnSlowPath;
6468 break;
6469 }
6470
6471 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
6472 locations->SetInAt(0, Location::RequiresRegister());
6473 locations->SetInAt(1, Location::RequiresRegister());
6474 // The output does overlap inputs.
6475 // Note that TypeCheckSlowPathMIPS uses this register too.
6476 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
6477 locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
6478 }
6479
VisitInstanceOf(HInstanceOf * instruction)6480 void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
6481 TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
6482 LocationSummary* locations = instruction->GetLocations();
6483 Location obj_loc = locations->InAt(0);
6484 Register obj = obj_loc.AsRegister<Register>();
6485 Register cls = locations->InAt(1).AsRegister<Register>();
6486 Location out_loc = locations->Out();
6487 Register out = out_loc.AsRegister<Register>();
6488 const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
6489 DCHECK_LE(num_temps, 1u);
6490 Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
6491 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
6492 uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
6493 uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
6494 uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
6495 MipsLabel done;
6496 SlowPathCodeMIPS* slow_path = nullptr;
6497
6498 // Return 0 if `obj` is null.
6499 // Avoid this check if we know `obj` is not null.
6500 if (instruction->MustDoNullCheck()) {
6501 __ Move(out, ZERO);
6502 __ Beqz(obj, &done);
6503 }
6504
6505 switch (type_check_kind) {
6506 case TypeCheckKind::kExactCheck: {
6507 // /* HeapReference<Class> */ out = obj->klass_
6508 GenerateReferenceLoadTwoRegisters(instruction,
6509 out_loc,
6510 obj_loc,
6511 class_offset,
6512 maybe_temp_loc,
6513 kCompilerReadBarrierOption);
6514 // Classes must be equal for the instanceof to succeed.
6515 __ Xor(out, out, cls);
6516 __ Sltiu(out, out, 1);
6517 break;
6518 }
6519
6520 case TypeCheckKind::kAbstractClassCheck: {
6521 // /* HeapReference<Class> */ out = obj->klass_
6522 GenerateReferenceLoadTwoRegisters(instruction,
6523 out_loc,
6524 obj_loc,
6525 class_offset,
6526 maybe_temp_loc,
6527 kCompilerReadBarrierOption);
6528 // If the class is abstract, we eagerly fetch the super class of the
6529 // object to avoid doing a comparison we know will fail.
6530 MipsLabel loop;
6531 __ Bind(&loop);
6532 // /* HeapReference<Class> */ out = out->super_class_
6533 GenerateReferenceLoadOneRegister(instruction,
6534 out_loc,
6535 super_offset,
6536 maybe_temp_loc,
6537 kCompilerReadBarrierOption);
6538 // If `out` is null, we use it for the result, and jump to `done`.
6539 __ Beqz(out, &done);
6540 __ Bne(out, cls, &loop);
6541 __ LoadConst32(out, 1);
6542 break;
6543 }
6544
6545 case TypeCheckKind::kClassHierarchyCheck: {
6546 // /* HeapReference<Class> */ out = obj->klass_
6547 GenerateReferenceLoadTwoRegisters(instruction,
6548 out_loc,
6549 obj_loc,
6550 class_offset,
6551 maybe_temp_loc,
6552 kCompilerReadBarrierOption);
6553 // Walk over the class hierarchy to find a match.
6554 MipsLabel loop, success;
6555 __ Bind(&loop);
6556 __ Beq(out, cls, &success);
6557 // /* HeapReference<Class> */ out = out->super_class_
6558 GenerateReferenceLoadOneRegister(instruction,
6559 out_loc,
6560 super_offset,
6561 maybe_temp_loc,
6562 kCompilerReadBarrierOption);
6563 __ Bnez(out, &loop);
6564 // If `out` is null, we use it for the result, and jump to `done`.
6565 __ B(&done);
6566 __ Bind(&success);
6567 __ LoadConst32(out, 1);
6568 break;
6569 }
6570
6571 case TypeCheckKind::kArrayObjectCheck: {
6572 // /* HeapReference<Class> */ out = obj->klass_
6573 GenerateReferenceLoadTwoRegisters(instruction,
6574 out_loc,
6575 obj_loc,
6576 class_offset,
6577 maybe_temp_loc,
6578 kCompilerReadBarrierOption);
6579 // Do an exact check.
6580 MipsLabel success;
6581 __ Beq(out, cls, &success);
6582 // Otherwise, we need to check that the object's class is a non-primitive array.
6583 // /* HeapReference<Class> */ out = out->component_type_
6584 GenerateReferenceLoadOneRegister(instruction,
6585 out_loc,
6586 component_offset,
6587 maybe_temp_loc,
6588 kCompilerReadBarrierOption);
6589 // If `out` is null, we use it for the result, and jump to `done`.
6590 __ Beqz(out, &done);
6591 __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
6592 static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
6593 __ Sltiu(out, out, 1);
6594 __ B(&done);
6595 __ Bind(&success);
6596 __ LoadConst32(out, 1);
6597 break;
6598 }
6599
6600 case TypeCheckKind::kArrayCheck: {
6601 // No read barrier since the slow path will retry upon failure.
6602 // /* HeapReference<Class> */ out = obj->klass_
6603 GenerateReferenceLoadTwoRegisters(instruction,
6604 out_loc,
6605 obj_loc,
6606 class_offset,
6607 maybe_temp_loc,
6608 kWithoutReadBarrier);
6609 DCHECK(locations->OnlyCallsOnSlowPath());
6610 slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
6611 /* is_fatal */ false);
6612 codegen_->AddSlowPath(slow_path);
6613 __ Bne(out, cls, slow_path->GetEntryLabel());
6614 __ LoadConst32(out, 1);
6615 break;
6616 }
6617
6618 case TypeCheckKind::kUnresolvedCheck:
6619 case TypeCheckKind::kInterfaceCheck: {
6620 // Note that we indeed only call on slow path, but we always go
6621 // into the slow path for the unresolved and interface check
6622 // cases.
6623 //
6624 // We cannot directly call the InstanceofNonTrivial runtime
6625 // entry point without resorting to a type checking slow path
6626 // here (i.e. by calling InvokeRuntime directly), as it would
6627 // require to assign fixed registers for the inputs of this
6628 // HInstanceOf instruction (following the runtime calling
6629 // convention), which might be cluttered by the potential first
6630 // read barrier emission at the beginning of this method.
6631 //
6632 // TODO: Introduce a new runtime entry point taking the object
6633 // to test (instead of its class) as argument, and let it deal
6634 // with the read barrier issues. This will let us refactor this
6635 // case of the `switch` code as it was previously (with a direct
6636 // call to the runtime not using a type checking slow path).
6637 // This should also be beneficial for the other cases above.
6638 DCHECK(locations->OnlyCallsOnSlowPath());
6639 slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
6640 /* is_fatal */ false);
6641 codegen_->AddSlowPath(slow_path);
6642 __ B(slow_path->GetEntryLabel());
6643 break;
6644 }
6645 }
6646
6647 __ Bind(&done);
6648
6649 if (slow_path != nullptr) {
6650 __ Bind(slow_path->GetExitLabel());
6651 }
6652 }
6653
VisitIntConstant(HIntConstant * constant)6654 void LocationsBuilderMIPS::VisitIntConstant(HIntConstant* constant) {
6655 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
6656 locations->SetOut(Location::ConstantLocation(constant));
6657 }
6658
VisitIntConstant(HIntConstant * constant ATTRIBUTE_UNUSED)6659 void InstructionCodeGeneratorMIPS::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
6660 // Will be generated at use site.
6661 }
6662
VisitNullConstant(HNullConstant * constant)6663 void LocationsBuilderMIPS::VisitNullConstant(HNullConstant* constant) {
6664 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
6665 locations->SetOut(Location::ConstantLocation(constant));
6666 }
6667
VisitNullConstant(HNullConstant * constant ATTRIBUTE_UNUSED)6668 void InstructionCodeGeneratorMIPS::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
6669 // Will be generated at use site.
6670 }
6671
HandleInvoke(HInvoke * invoke)6672 void LocationsBuilderMIPS::HandleInvoke(HInvoke* invoke) {
6673 InvokeDexCallingConventionVisitorMIPS calling_convention_visitor;
6674 CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
6675 }
6676
VisitInvokeInterface(HInvokeInterface * invoke)6677 void LocationsBuilderMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
6678 HandleInvoke(invoke);
6679 // The register T7 is required to be used for the hidden argument in
6680 // art_quick_imt_conflict_trampoline, so add the hidden argument.
6681 invoke->GetLocations()->AddTemp(Location::RegisterLocation(T7));
6682 }
6683
VisitInvokeInterface(HInvokeInterface * invoke)6684 void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
6685 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
6686 Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
6687 Location receiver = invoke->GetLocations()->InAt(0);
6688 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
6689 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
6690
6691 // Set the hidden argument.
6692 __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
6693 invoke->GetDexMethodIndex());
6694
6695 // temp = object->GetClass();
6696 if (receiver.IsStackSlot()) {
6697 __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
6698 __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
6699 } else {
6700 __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
6701 }
6702 codegen_->MaybeRecordImplicitNullCheck(invoke);
6703 // Instead of simply (possibly) unpoisoning `temp` here, we should
6704 // emit a read barrier for the previous class reference load.
6705 // However this is not required in practice, as this is an
6706 // intermediate/temporary reference and because the current
6707 // concurrent copying collector keeps the from-space memory
6708 // intact/accessible until the end of the marking phase (the
6709 // concurrent copying collector may not in the future).
6710 __ MaybeUnpoisonHeapReference(temp);
6711 __ LoadFromOffset(kLoadWord, temp, temp,
6712 mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
6713 uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
6714 invoke->GetImtIndex(), kMipsPointerSize));
6715 // temp = temp->GetImtEntryAt(method_offset);
6716 __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
6717 // T9 = temp->GetEntryPoint();
6718 __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
6719 // T9();
6720 __ Jalr(T9);
6721 __ NopIfNoReordering();
6722 DCHECK(!codegen_->IsLeafMethod());
6723 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
6724 }
6725
VisitInvokeVirtual(HInvokeVirtual * invoke)6726 void LocationsBuilderMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
6727 IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
6728 if (intrinsic.TryDispatch(invoke)) {
6729 return;
6730 }
6731
6732 HandleInvoke(invoke);
6733 }
6734
VisitInvokeStaticOrDirect(HInvokeStaticOrDirect * invoke)6735 void LocationsBuilderMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
6736 // Explicit clinit checks triggered by static invokes must have been pruned by
6737 // art::PrepareForRegisterAllocation.
6738 DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
6739
6740 bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
6741 bool has_extra_input = invoke->HasPcRelativeDexCache() && !is_r6;
6742
6743 IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
6744 if (intrinsic.TryDispatch(invoke)) {
6745 if (invoke->GetLocations()->CanCall() && has_extra_input) {
6746 invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::Any());
6747 }
6748 return;
6749 }
6750
6751 HandleInvoke(invoke);
6752
6753 // Add the extra input register if either the dex cache array base register
6754 // or the PC-relative base register for accessing literals is needed.
6755 if (has_extra_input) {
6756 invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
6757 }
6758 }
6759
VisitInvokePolymorphic(HInvokePolymorphic * invoke)6760 void LocationsBuilderMIPS::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
6761 HandleInvoke(invoke);
6762 }
6763
VisitInvokePolymorphic(HInvokePolymorphic * invoke)6764 void InstructionCodeGeneratorMIPS::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
6765 codegen_->GenerateInvokePolymorphicCall(invoke);
6766 }
6767
TryGenerateIntrinsicCode(HInvoke * invoke,CodeGeneratorMIPS * codegen)6768 static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen) {
6769 if (invoke->GetLocations()->Intrinsified()) {
6770 IntrinsicCodeGeneratorMIPS intrinsic(codegen);
6771 intrinsic.Dispatch(invoke);
6772 return true;
6773 }
6774 return false;
6775 }
6776
GetSupportedLoadStringKind(HLoadString::LoadKind desired_string_load_kind)6777 HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind(
6778 HLoadString::LoadKind desired_string_load_kind) {
6779 // We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
6780 // is incompatible with it.
6781 // TODO: Create as many MipsDexCacheArraysBase instructions as needed for methods
6782 // with irreducible loops.
6783 bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
6784 bool is_r6 = GetInstructionSetFeatures().IsR6();
6785 bool fallback_load = has_irreducible_loops && !is_r6;
6786 switch (desired_string_load_kind) {
6787 case HLoadString::LoadKind::kBootImageLinkTimeAddress:
6788 DCHECK(!GetCompilerOptions().GetCompilePic());
6789 break;
6790 case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
6791 DCHECK(GetCompilerOptions().GetCompilePic());
6792 break;
6793 case HLoadString::LoadKind::kBootImageAddress:
6794 break;
6795 case HLoadString::LoadKind::kBssEntry:
6796 DCHECK(!Runtime::Current()->UseJitCompilation());
6797 break;
6798 case HLoadString::LoadKind::kJitTableAddress:
6799 DCHECK(Runtime::Current()->UseJitCompilation());
6800 fallback_load = false;
6801 break;
6802 case HLoadString::LoadKind::kDexCacheViaMethod:
6803 fallback_load = false;
6804 break;
6805 }
6806 if (fallback_load) {
6807 desired_string_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
6808 }
6809 return desired_string_load_kind;
6810 }
6811
GetSupportedLoadClassKind(HLoadClass::LoadKind desired_class_load_kind)6812 HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
6813 HLoadClass::LoadKind desired_class_load_kind) {
6814 // We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
6815 // is incompatible with it.
6816 bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
6817 bool is_r6 = GetInstructionSetFeatures().IsR6();
6818 bool fallback_load = has_irreducible_loops && !is_r6;
6819 switch (desired_class_load_kind) {
6820 case HLoadClass::LoadKind::kInvalid:
6821 LOG(FATAL) << "UNREACHABLE";
6822 UNREACHABLE();
6823 case HLoadClass::LoadKind::kReferrersClass:
6824 fallback_load = false;
6825 break;
6826 case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
6827 DCHECK(!GetCompilerOptions().GetCompilePic());
6828 break;
6829 case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
6830 DCHECK(GetCompilerOptions().GetCompilePic());
6831 break;
6832 case HLoadClass::LoadKind::kBootImageAddress:
6833 break;
6834 case HLoadClass::LoadKind::kBssEntry:
6835 DCHECK(!Runtime::Current()->UseJitCompilation());
6836 break;
6837 case HLoadClass::LoadKind::kJitTableAddress:
6838 DCHECK(Runtime::Current()->UseJitCompilation());
6839 fallback_load = false;
6840 break;
6841 case HLoadClass::LoadKind::kDexCacheViaMethod:
6842 fallback_load = false;
6843 break;
6844 }
6845 if (fallback_load) {
6846 desired_class_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
6847 }
6848 return desired_class_load_kind;
6849 }
6850
GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect * invoke,Register temp)6851 Register CodeGeneratorMIPS::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
6852 Register temp) {
6853 CHECK(!GetInstructionSetFeatures().IsR6());
6854 CHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
6855 Location location = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
6856 if (!invoke->GetLocations()->Intrinsified()) {
6857 return location.AsRegister<Register>();
6858 }
6859 // For intrinsics we allow any location, so it may be on the stack.
6860 if (!location.IsRegister()) {
6861 __ LoadFromOffset(kLoadWord, temp, SP, location.GetStackIndex());
6862 return temp;
6863 }
6864 // For register locations, check if the register was saved. If so, get it from the stack.
6865 // Note: There is a chance that the register was saved but not overwritten, so we could
6866 // save one load. However, since this is just an intrinsic slow path we prefer this
6867 // simple and more robust approach rather that trying to determine if that's the case.
6868 SlowPathCode* slow_path = GetCurrentSlowPath();
6869 DCHECK(slow_path != nullptr); // For intrinsified invokes the call is emitted on the slow path.
6870 if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
6871 int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
6872 __ LoadFromOffset(kLoadWord, temp, SP, stack_offset);
6873 return temp;
6874 }
6875 return location.AsRegister<Register>();
6876 }
6877
GetSupportedInvokeStaticOrDirectDispatch(const HInvokeStaticOrDirect::DispatchInfo & desired_dispatch_info,HInvokeStaticOrDirect * invoke ATTRIBUTE_UNUSED)6878 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticOrDirectDispatch(
6879 const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
6880 HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
6881 HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
6882 // We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
6883 // is incompatible with it.
6884 bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
6885 bool is_r6 = GetInstructionSetFeatures().IsR6();
6886 bool fallback_load = has_irreducible_loops && !is_r6;
6887 switch (dispatch_info.method_load_kind) {
6888 case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
6889 break;
6890 default:
6891 fallback_load = false;
6892 break;
6893 }
6894 if (fallback_load) {
6895 dispatch_info.method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
6896 dispatch_info.method_load_data = 0;
6897 }
6898 return dispatch_info;
6899 }
6900
GenerateStaticOrDirectCall(HInvokeStaticOrDirect * invoke,Location temp)6901 void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
6902 // All registers are assumed to be correctly set up per the calling convention.
6903 Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
6904 HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
6905 HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = invoke->GetCodePtrLocation();
6906 bool is_r6 = GetInstructionSetFeatures().IsR6();
6907 Register base_reg = (invoke->HasPcRelativeDexCache() && !is_r6)
6908 ? GetInvokeStaticOrDirectExtraParameter(invoke, temp.AsRegister<Register>())
6909 : ZERO;
6910
6911 switch (method_load_kind) {
6912 case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
6913 // temp = thread->string_init_entrypoint
6914 uint32_t offset =
6915 GetThreadOffset<kMipsPointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
6916 __ LoadFromOffset(kLoadWord,
6917 temp.AsRegister<Register>(),
6918 TR,
6919 offset);
6920 break;
6921 }
6922 case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
6923 callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
6924 break;
6925 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
6926 __ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
6927 break;
6928 case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
6929 if (is_r6) {
6930 uint32_t offset = invoke->GetDexCacheArrayOffset();
6931 CodeGeneratorMIPS::PcRelativePatchInfo* info =
6932 NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset);
6933 bool reordering = __ SetReorder(false);
6934 EmitPcRelativeAddressPlaceholderHigh(info, TMP, ZERO);
6935 __ Lw(temp.AsRegister<Register>(), TMP, /* placeholder */ 0x5678);
6936 __ SetReorder(reordering);
6937 } else {
6938 HMipsDexCacheArraysBase* base =
6939 invoke->InputAt(invoke->GetSpecialInputIndex())->AsMipsDexCacheArraysBase();
6940 int32_t offset =
6941 invoke->GetDexCacheArrayOffset() - base->GetElementOffset() - kDexCacheArrayLwOffset;
6942 __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), base_reg, offset);
6943 }
6944 break;
6945 case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
6946 Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
6947 Register reg = temp.AsRegister<Register>();
6948 Register method_reg;
6949 if (current_method.IsRegister()) {
6950 method_reg = current_method.AsRegister<Register>();
6951 } else {
6952 // TODO: use the appropriate DCHECK() here if possible.
6953 // DCHECK(invoke->GetLocations()->Intrinsified());
6954 DCHECK(!current_method.IsValid());
6955 method_reg = reg;
6956 __ Lw(reg, SP, kCurrentMethodStackOffset);
6957 }
6958
6959 // temp = temp->dex_cache_resolved_methods_;
6960 __ LoadFromOffset(kLoadWord,
6961 reg,
6962 method_reg,
6963 ArtMethod::DexCacheResolvedMethodsOffset(kMipsPointerSize).Int32Value());
6964 // temp = temp[index_in_cache];
6965 // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
6966 uint32_t index_in_cache = invoke->GetDexMethodIndex();
6967 __ LoadFromOffset(kLoadWord,
6968 reg,
6969 reg,
6970 CodeGenerator::GetCachePointerOffset(index_in_cache));
6971 break;
6972 }
6973 }
6974
6975 switch (code_ptr_location) {
6976 case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
6977 __ Bal(&frame_entry_label_);
6978 break;
6979 case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
6980 // T9 = callee_method->entry_point_from_quick_compiled_code_;
6981 __ LoadFromOffset(kLoadWord,
6982 T9,
6983 callee_method.AsRegister<Register>(),
6984 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
6985 kMipsPointerSize).Int32Value());
6986 // T9()
6987 __ Jalr(T9);
6988 __ NopIfNoReordering();
6989 break;
6990 }
6991 DCHECK(!IsLeafMethod());
6992 }
6993
VisitInvokeStaticOrDirect(HInvokeStaticOrDirect * invoke)6994 void InstructionCodeGeneratorMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
6995 // Explicit clinit checks triggered by static invokes must have been pruned by
6996 // art::PrepareForRegisterAllocation.
6997 DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
6998
6999 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
7000 return;
7001 }
7002
7003 LocationSummary* locations = invoke->GetLocations();
7004 codegen_->GenerateStaticOrDirectCall(invoke,
7005 locations->HasTemps()
7006 ? locations->GetTemp(0)
7007 : Location::NoLocation());
7008 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
7009 }
7010
GenerateVirtualCall(HInvokeVirtual * invoke,Location temp_location)7011 void CodeGeneratorMIPS::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
7012 // Use the calling convention instead of the location of the receiver, as
7013 // intrinsics may have put the receiver in a different register. In the intrinsics
7014 // slow path, the arguments have been moved to the right place, so here we are
7015 // guaranteed that the receiver is the first register of the calling convention.
7016 InvokeDexCallingConvention calling_convention;
7017 Register receiver = calling_convention.GetRegisterAt(0);
7018
7019 Register temp = temp_location.AsRegister<Register>();
7020 size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
7021 invoke->GetVTableIndex(), kMipsPointerSize).SizeValue();
7022 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
7023 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
7024
7025 // temp = object->GetClass();
7026 __ LoadFromOffset(kLoadWord, temp, receiver, class_offset);
7027 MaybeRecordImplicitNullCheck(invoke);
7028 // Instead of simply (possibly) unpoisoning `temp` here, we should
7029 // emit a read barrier for the previous class reference load.
7030 // However this is not required in practice, as this is an
7031 // intermediate/temporary reference and because the current
7032 // concurrent copying collector keeps the from-space memory
7033 // intact/accessible until the end of the marking phase (the
7034 // concurrent copying collector may not in the future).
7035 __ MaybeUnpoisonHeapReference(temp);
7036 // temp = temp->GetMethodAt(method_offset);
7037 __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
7038 // T9 = temp->GetEntryPoint();
7039 __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
7040 // T9();
7041 __ Jalr(T9);
7042 __ NopIfNoReordering();
7043 }
7044
VisitInvokeVirtual(HInvokeVirtual * invoke)7045 void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
7046 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
7047 return;
7048 }
7049
7050 codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
7051 DCHECK(!codegen_->IsLeafMethod());
7052 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
7053 }
7054
VisitLoadClass(HLoadClass * cls)7055 void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
7056 HLoadClass::LoadKind load_kind = cls->GetLoadKind();
7057 if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
7058 InvokeRuntimeCallingConvention calling_convention;
7059 CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
7060 cls,
7061 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
7062 calling_convention.GetReturnLocation(Primitive::kPrimNot));
7063 return;
7064 }
7065 DCHECK(!cls->NeedsAccessCheck());
7066
7067 const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
7068 LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
7069 ? LocationSummary::kCallOnSlowPath
7070 : LocationSummary::kNoCall;
7071 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
7072 switch (load_kind) {
7073 // We need an extra register for PC-relative literals on R2.
7074 case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
7075 case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
7076 case HLoadClass::LoadKind::kBootImageAddress:
7077 case HLoadClass::LoadKind::kBssEntry:
7078 if (codegen_->GetInstructionSetFeatures().IsR6()) {
7079 break;
7080 }
7081 FALLTHROUGH_INTENDED;
7082 case HLoadClass::LoadKind::kReferrersClass:
7083 locations->SetInAt(0, Location::RequiresRegister());
7084 break;
7085 default:
7086 break;
7087 }
7088 locations->SetOut(Location::RequiresRegister());
7089 }
7090
7091 // NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
7092 // move.
VisitLoadClass(HLoadClass * cls)7093 void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
7094 HLoadClass::LoadKind load_kind = cls->GetLoadKind();
7095 if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
7096 codegen_->GenerateLoadClassRuntimeCall(cls);
7097 return;
7098 }
7099 DCHECK(!cls->NeedsAccessCheck());
7100
7101 LocationSummary* locations = cls->GetLocations();
7102 Location out_loc = locations->Out();
7103 Register out = out_loc.AsRegister<Register>();
7104 Register base_or_current_method_reg;
7105 bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
7106 switch (load_kind) {
7107 // We need an extra register for PC-relative literals on R2.
7108 case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
7109 case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
7110 case HLoadClass::LoadKind::kBootImageAddress:
7111 case HLoadClass::LoadKind::kBssEntry:
7112 base_or_current_method_reg = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
7113 break;
7114 case HLoadClass::LoadKind::kReferrersClass:
7115 case HLoadClass::LoadKind::kDexCacheViaMethod:
7116 base_or_current_method_reg = locations->InAt(0).AsRegister<Register>();
7117 break;
7118 default:
7119 base_or_current_method_reg = ZERO;
7120 break;
7121 }
7122
7123 const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
7124 ? kWithoutReadBarrier
7125 : kCompilerReadBarrierOption;
7126 bool generate_null_check = false;
7127 switch (load_kind) {
7128 case HLoadClass::LoadKind::kReferrersClass: {
7129 DCHECK(!cls->CanCallRuntime());
7130 DCHECK(!cls->MustGenerateClinitCheck());
7131 // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
7132 GenerateGcRootFieldLoad(cls,
7133 out_loc,
7134 base_or_current_method_reg,
7135 ArtMethod::DeclaringClassOffset().Int32Value(),
7136 read_barrier_option);
7137 break;
7138 }
7139 case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
7140 DCHECK(codegen_->GetCompilerOptions().IsBootImage());
7141 DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
7142 __ LoadLiteral(out,
7143 base_or_current_method_reg,
7144 codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
7145 cls->GetTypeIndex()));
7146 break;
7147 case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
7148 DCHECK(codegen_->GetCompilerOptions().IsBootImage());
7149 DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
7150 CodeGeneratorMIPS::PcRelativePatchInfo* info =
7151 codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
7152 bool reordering = __ SetReorder(false);
7153 codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
7154 __ Addiu(out, out, /* placeholder */ 0x5678);
7155 __ SetReorder(reordering);
7156 break;
7157 }
7158 case HLoadClass::LoadKind::kBootImageAddress: {
7159 DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
7160 uint32_t address = dchecked_integral_cast<uint32_t>(
7161 reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
7162 DCHECK_NE(address, 0u);
7163 __ LoadLiteral(out,
7164 base_or_current_method_reg,
7165 codegen_->DeduplicateBootImageAddressLiteral(address));
7166 break;
7167 }
7168 case HLoadClass::LoadKind::kBssEntry: {
7169 CodeGeneratorMIPS::PcRelativePatchInfo* info =
7170 codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
7171 bool reordering = __ SetReorder(false);
7172 codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
7173 GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678, read_barrier_option);
7174 __ SetReorder(reordering);
7175 generate_null_check = true;
7176 break;
7177 }
7178 case HLoadClass::LoadKind::kJitTableAddress: {
7179 CodeGeneratorMIPS::JitPatchInfo* info = codegen_->NewJitRootClassPatch(cls->GetDexFile(),
7180 cls->GetTypeIndex(),
7181 cls->GetClass());
7182 bool reordering = __ SetReorder(false);
7183 __ Bind(&info->high_label);
7184 __ Lui(out, /* placeholder */ 0x1234);
7185 GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678, read_barrier_option);
7186 __ SetReorder(reordering);
7187 break;
7188 }
7189 case HLoadClass::LoadKind::kDexCacheViaMethod:
7190 case HLoadClass::LoadKind::kInvalid:
7191 LOG(FATAL) << "UNREACHABLE";
7192 UNREACHABLE();
7193 }
7194
7195 if (generate_null_check || cls->MustGenerateClinitCheck()) {
7196 DCHECK(cls->CanCallRuntime());
7197 SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
7198 cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
7199 codegen_->AddSlowPath(slow_path);
7200 if (generate_null_check) {
7201 __ Beqz(out, slow_path->GetEntryLabel());
7202 }
7203 if (cls->MustGenerateClinitCheck()) {
7204 GenerateClassInitializationCheck(slow_path, out);
7205 } else {
7206 __ Bind(slow_path->GetExitLabel());
7207 }
7208 }
7209 }
7210
GetExceptionTlsOffset()7211 static int32_t GetExceptionTlsOffset() {
7212 return Thread::ExceptionOffset<kMipsPointerSize>().Int32Value();
7213 }
7214
VisitLoadException(HLoadException * load)7215 void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) {
7216 LocationSummary* locations =
7217 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
7218 locations->SetOut(Location::RequiresRegister());
7219 }
7220
VisitLoadException(HLoadException * load)7221 void InstructionCodeGeneratorMIPS::VisitLoadException(HLoadException* load) {
7222 Register out = load->GetLocations()->Out().AsRegister<Register>();
7223 __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset());
7224 }
7225
VisitClearException(HClearException * clear)7226 void LocationsBuilderMIPS::VisitClearException(HClearException* clear) {
7227 new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
7228 }
7229
VisitClearException(HClearException * clear ATTRIBUTE_UNUSED)7230 void InstructionCodeGeneratorMIPS::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
7231 __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
7232 }
7233
VisitLoadString(HLoadString * load)7234 void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
7235 LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
7236 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
7237 HLoadString::LoadKind load_kind = load->GetLoadKind();
7238 switch (load_kind) {
7239 // We need an extra register for PC-relative literals on R2.
7240 case HLoadString::LoadKind::kBootImageLinkTimeAddress:
7241 case HLoadString::LoadKind::kBootImageAddress:
7242 case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
7243 case HLoadString::LoadKind::kBssEntry:
7244 if (codegen_->GetInstructionSetFeatures().IsR6()) {
7245 break;
7246 }
7247 FALLTHROUGH_INTENDED;
7248 // We need an extra register for PC-relative dex cache accesses.
7249 case HLoadString::LoadKind::kDexCacheViaMethod:
7250 locations->SetInAt(0, Location::RequiresRegister());
7251 break;
7252 default:
7253 break;
7254 }
7255 if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
7256 InvokeRuntimeCallingConvention calling_convention;
7257 locations->SetOut(calling_convention.GetReturnLocation(load->GetType()));
7258 } else {
7259 locations->SetOut(Location::RequiresRegister());
7260 }
7261 }
7262
7263 // NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
7264 // move.
VisitLoadString(HLoadString * load)7265 void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
7266 HLoadString::LoadKind load_kind = load->GetLoadKind();
7267 LocationSummary* locations = load->GetLocations();
7268 Location out_loc = locations->Out();
7269 Register out = out_loc.AsRegister<Register>();
7270 Register base_or_current_method_reg;
7271 bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
7272 switch (load_kind) {
7273 // We need an extra register for PC-relative literals on R2.
7274 case HLoadString::LoadKind::kBootImageLinkTimeAddress:
7275 case HLoadString::LoadKind::kBootImageAddress:
7276 case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
7277 case HLoadString::LoadKind::kBssEntry:
7278 base_or_current_method_reg = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
7279 break;
7280 default:
7281 base_or_current_method_reg = ZERO;
7282 break;
7283 }
7284
7285 switch (load_kind) {
7286 case HLoadString::LoadKind::kBootImageLinkTimeAddress:
7287 DCHECK(codegen_->GetCompilerOptions().IsBootImage());
7288 __ LoadLiteral(out,
7289 base_or_current_method_reg,
7290 codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
7291 load->GetStringIndex()));
7292 return; // No dex cache slow path.
7293 case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
7294 DCHECK(codegen_->GetCompilerOptions().IsBootImage());
7295 CodeGeneratorMIPS::PcRelativePatchInfo* info =
7296 codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
7297 bool reordering = __ SetReorder(false);
7298 codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
7299 __ Addiu(out, out, /* placeholder */ 0x5678);
7300 __ SetReorder(reordering);
7301 return; // No dex cache slow path.
7302 }
7303 case HLoadString::LoadKind::kBootImageAddress: {
7304 uint32_t address = dchecked_integral_cast<uint32_t>(
7305 reinterpret_cast<uintptr_t>(load->GetString().Get()));
7306 DCHECK_NE(address, 0u);
7307 __ LoadLiteral(out,
7308 base_or_current_method_reg,
7309 codegen_->DeduplicateBootImageAddressLiteral(address));
7310 return; // No dex cache slow path.
7311 }
7312 case HLoadString::LoadKind::kBssEntry: {
7313 DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
7314 CodeGeneratorMIPS::PcRelativePatchInfo* info =
7315 codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
7316 bool reordering = __ SetReorder(false);
7317 codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
7318 GenerateGcRootFieldLoad(load,
7319 out_loc,
7320 out,
7321 /* placeholder */ 0x5678,
7322 kCompilerReadBarrierOption);
7323 __ SetReorder(reordering);
7324 SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
7325 codegen_->AddSlowPath(slow_path);
7326 __ Beqz(out, slow_path->GetEntryLabel());
7327 __ Bind(slow_path->GetExitLabel());
7328 return;
7329 }
7330 case HLoadString::LoadKind::kJitTableAddress: {
7331 CodeGeneratorMIPS::JitPatchInfo* info =
7332 codegen_->NewJitRootStringPatch(load->GetDexFile(),
7333 load->GetStringIndex(),
7334 load->GetString());
7335 bool reordering = __ SetReorder(false);
7336 __ Bind(&info->high_label);
7337 __ Lui(out, /* placeholder */ 0x1234);
7338 GenerateGcRootFieldLoad(load,
7339 out_loc,
7340 out,
7341 /* placeholder */ 0x5678,
7342 kCompilerReadBarrierOption);
7343 __ SetReorder(reordering);
7344 return;
7345 }
7346 default:
7347 break;
7348 }
7349
7350 // TODO: Re-add the compiler code to do string dex cache lookup again.
7351 DCHECK(load_kind == HLoadString::LoadKind::kDexCacheViaMethod);
7352 InvokeRuntimeCallingConvention calling_convention;
7353 __ LoadConst32(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
7354 codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
7355 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
7356 }
7357
VisitLongConstant(HLongConstant * constant)7358 void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) {
7359 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
7360 locations->SetOut(Location::ConstantLocation(constant));
7361 }
7362
VisitLongConstant(HLongConstant * constant ATTRIBUTE_UNUSED)7363 void InstructionCodeGeneratorMIPS::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
7364 // Will be generated at use site.
7365 }
7366
VisitMonitorOperation(HMonitorOperation * instruction)7367 void LocationsBuilderMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
7368 LocationSummary* locations =
7369 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
7370 InvokeRuntimeCallingConvention calling_convention;
7371 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
7372 }
7373
VisitMonitorOperation(HMonitorOperation * instruction)7374 void InstructionCodeGeneratorMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
7375 if (instruction->IsEnter()) {
7376 codegen_->InvokeRuntime(kQuickLockObject, instruction, instruction->GetDexPc());
7377 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
7378 } else {
7379 codegen_->InvokeRuntime(kQuickUnlockObject, instruction, instruction->GetDexPc());
7380 }
7381 CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
7382 }
7383
VisitMul(HMul * mul)7384 void LocationsBuilderMIPS::VisitMul(HMul* mul) {
7385 LocationSummary* locations =
7386 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
7387 switch (mul->GetResultType()) {
7388 case Primitive::kPrimInt:
7389 case Primitive::kPrimLong:
7390 locations->SetInAt(0, Location::RequiresRegister());
7391 locations->SetInAt(1, Location::RequiresRegister());
7392 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
7393 break;
7394
7395 case Primitive::kPrimFloat:
7396 case Primitive::kPrimDouble:
7397 locations->SetInAt(0, Location::RequiresFpuRegister());
7398 locations->SetInAt(1, Location::RequiresFpuRegister());
7399 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
7400 break;
7401
7402 default:
7403 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
7404 }
7405 }
7406
VisitMul(HMul * instruction)7407 void InstructionCodeGeneratorMIPS::VisitMul(HMul* instruction) {
7408 Primitive::Type type = instruction->GetType();
7409 LocationSummary* locations = instruction->GetLocations();
7410 bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
7411
7412 switch (type) {
7413 case Primitive::kPrimInt: {
7414 Register dst = locations->Out().AsRegister<Register>();
7415 Register lhs = locations->InAt(0).AsRegister<Register>();
7416 Register rhs = locations->InAt(1).AsRegister<Register>();
7417
7418 if (isR6) {
7419 __ MulR6(dst, lhs, rhs);
7420 } else {
7421 __ MulR2(dst, lhs, rhs);
7422 }
7423 break;
7424 }
7425 case Primitive::kPrimLong: {
7426 Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
7427 Register dst_low = locations->Out().AsRegisterPairLow<Register>();
7428 Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
7429 Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
7430 Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
7431 Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
7432
7433 // Extra checks to protect caused by the existance of A1_A2.
7434 // The algorithm is wrong if dst_high is either lhs_lo or rhs_lo:
7435 // (e.g. lhs=a0_a1, rhs=a2_a3 and dst=a1_a2).
7436 DCHECK_NE(dst_high, lhs_low);
7437 DCHECK_NE(dst_high, rhs_low);
7438
7439 // A_B * C_D
7440 // dst_hi: [ low(A*D) + low(B*C) + hi(B*D) ]
7441 // dst_lo: [ low(B*D) ]
7442 // Note: R2 and R6 MUL produce the low 32 bit of the multiplication result.
7443
7444 if (isR6) {
7445 __ MulR6(TMP, lhs_high, rhs_low);
7446 __ MulR6(dst_high, lhs_low, rhs_high);
7447 __ Addu(dst_high, dst_high, TMP);
7448 __ MuhuR6(TMP, lhs_low, rhs_low);
7449 __ Addu(dst_high, dst_high, TMP);
7450 __ MulR6(dst_low, lhs_low, rhs_low);
7451 } else {
7452 __ MulR2(TMP, lhs_high, rhs_low);
7453 __ MulR2(dst_high, lhs_low, rhs_high);
7454 __ Addu(dst_high, dst_high, TMP);
7455 __ MultuR2(lhs_low, rhs_low);
7456 __ Mfhi(TMP);
7457 __ Addu(dst_high, dst_high, TMP);
7458 __ Mflo(dst_low);
7459 }
7460 break;
7461 }
7462 case Primitive::kPrimFloat:
7463 case Primitive::kPrimDouble: {
7464 FRegister dst = locations->Out().AsFpuRegister<FRegister>();
7465 FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
7466 FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
7467 if (type == Primitive::kPrimFloat) {
7468 __ MulS(dst, lhs, rhs);
7469 } else {
7470 __ MulD(dst, lhs, rhs);
7471 }
7472 break;
7473 }
7474 default:
7475 LOG(FATAL) << "Unexpected mul type " << type;
7476 }
7477 }
7478
VisitNeg(HNeg * neg)7479 void LocationsBuilderMIPS::VisitNeg(HNeg* neg) {
7480 LocationSummary* locations =
7481 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
7482 switch (neg->GetResultType()) {
7483 case Primitive::kPrimInt:
7484 case Primitive::kPrimLong:
7485 locations->SetInAt(0, Location::RequiresRegister());
7486 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
7487 break;
7488
7489 case Primitive::kPrimFloat:
7490 case Primitive::kPrimDouble:
7491 locations->SetInAt(0, Location::RequiresFpuRegister());
7492 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
7493 break;
7494
7495 default:
7496 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
7497 }
7498 }
7499
VisitNeg(HNeg * instruction)7500 void InstructionCodeGeneratorMIPS::VisitNeg(HNeg* instruction) {
7501 Primitive::Type type = instruction->GetType();
7502 LocationSummary* locations = instruction->GetLocations();
7503
7504 switch (type) {
7505 case Primitive::kPrimInt: {
7506 Register dst = locations->Out().AsRegister<Register>();
7507 Register src = locations->InAt(0).AsRegister<Register>();
7508 __ Subu(dst, ZERO, src);
7509 break;
7510 }
7511 case Primitive::kPrimLong: {
7512 Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
7513 Register dst_low = locations->Out().AsRegisterPairLow<Register>();
7514 Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
7515 Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
7516 __ Subu(dst_low, ZERO, src_low);
7517 __ Sltu(TMP, ZERO, dst_low);
7518 __ Subu(dst_high, ZERO, src_high);
7519 __ Subu(dst_high, dst_high, TMP);
7520 break;
7521 }
7522 case Primitive::kPrimFloat:
7523 case Primitive::kPrimDouble: {
7524 FRegister dst = locations->Out().AsFpuRegister<FRegister>();
7525 FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
7526 if (type == Primitive::kPrimFloat) {
7527 __ NegS(dst, src);
7528 } else {
7529 __ NegD(dst, src);
7530 }
7531 break;
7532 }
7533 default:
7534 LOG(FATAL) << "Unexpected neg type " << type;
7535 }
7536 }
7537
VisitNewArray(HNewArray * instruction)7538 void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
7539 LocationSummary* locations =
7540 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
7541 InvokeRuntimeCallingConvention calling_convention;
7542 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
7543 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
7544 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
7545 }
7546
VisitNewArray(HNewArray * instruction)7547 void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
7548 // Note: if heap poisoning is enabled, the entry point takes care
7549 // of poisoning the reference.
7550 codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
7551 CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
7552 }
7553
VisitNewInstance(HNewInstance * instruction)7554 void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
7555 LocationSummary* locations =
7556 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
7557 InvokeRuntimeCallingConvention calling_convention;
7558 if (instruction->IsStringAlloc()) {
7559 locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
7560 } else {
7561 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
7562 }
7563 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
7564 }
7565
VisitNewInstance(HNewInstance * instruction)7566 void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
7567 // Note: if heap poisoning is enabled, the entry point takes care
7568 // of poisoning the reference.
7569 if (instruction->IsStringAlloc()) {
7570 // String is allocated through StringFactory. Call NewEmptyString entry point.
7571 Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>();
7572 MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
7573 __ LoadFromOffset(kLoadWord, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
7574 __ LoadFromOffset(kLoadWord, T9, temp, code_offset.Int32Value());
7575 __ Jalr(T9);
7576 __ NopIfNoReordering();
7577 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
7578 } else {
7579 codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
7580 CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
7581 }
7582 }
7583
VisitNot(HNot * instruction)7584 void LocationsBuilderMIPS::VisitNot(HNot* instruction) {
7585 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
7586 locations->SetInAt(0, Location::RequiresRegister());
7587 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
7588 }
7589
VisitNot(HNot * instruction)7590 void InstructionCodeGeneratorMIPS::VisitNot(HNot* instruction) {
7591 Primitive::Type type = instruction->GetType();
7592 LocationSummary* locations = instruction->GetLocations();
7593
7594 switch (type) {
7595 case Primitive::kPrimInt: {
7596 Register dst = locations->Out().AsRegister<Register>();
7597 Register src = locations->InAt(0).AsRegister<Register>();
7598 __ Nor(dst, src, ZERO);
7599 break;
7600 }
7601
7602 case Primitive::kPrimLong: {
7603 Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
7604 Register dst_low = locations->Out().AsRegisterPairLow<Register>();
7605 Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
7606 Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
7607 __ Nor(dst_high, src_high, ZERO);
7608 __ Nor(dst_low, src_low, ZERO);
7609 break;
7610 }
7611
7612 default:
7613 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
7614 }
7615 }
7616
VisitBooleanNot(HBooleanNot * instruction)7617 void LocationsBuilderMIPS::VisitBooleanNot(HBooleanNot* instruction) {
7618 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
7619 locations->SetInAt(0, Location::RequiresRegister());
7620 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
7621 }
7622
VisitBooleanNot(HBooleanNot * instruction)7623 void InstructionCodeGeneratorMIPS::VisitBooleanNot(HBooleanNot* instruction) {
7624 LocationSummary* locations = instruction->GetLocations();
7625 __ Xori(locations->Out().AsRegister<Register>(),
7626 locations->InAt(0).AsRegister<Register>(),
7627 1);
7628 }
7629
VisitNullCheck(HNullCheck * instruction)7630 void LocationsBuilderMIPS::VisitNullCheck(HNullCheck* instruction) {
7631 LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
7632 locations->SetInAt(0, Location::RequiresRegister());
7633 }
7634
GenerateImplicitNullCheck(HNullCheck * instruction)7635 void CodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) {
7636 if (CanMoveNullCheckToUser(instruction)) {
7637 return;
7638 }
7639 Location obj = instruction->GetLocations()->InAt(0);
7640
7641 __ Lw(ZERO, obj.AsRegister<Register>(), 0);
7642 RecordPcInfo(instruction, instruction->GetDexPc());
7643 }
7644
GenerateExplicitNullCheck(HNullCheck * instruction)7645 void CodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) {
7646 SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS(instruction);
7647 AddSlowPath(slow_path);
7648
7649 Location obj = instruction->GetLocations()->InAt(0);
7650
7651 __ Beqz(obj.AsRegister<Register>(), slow_path->GetEntryLabel());
7652 }
7653
VisitNullCheck(HNullCheck * instruction)7654 void InstructionCodeGeneratorMIPS::VisitNullCheck(HNullCheck* instruction) {
7655 codegen_->GenerateNullCheck(instruction);
7656 }
7657
VisitOr(HOr * instruction)7658 void LocationsBuilderMIPS::VisitOr(HOr* instruction) {
7659 HandleBinaryOp(instruction);
7660 }
7661
VisitOr(HOr * instruction)7662 void InstructionCodeGeneratorMIPS::VisitOr(HOr* instruction) {
7663 HandleBinaryOp(instruction);
7664 }
7665
VisitParallelMove(HParallelMove * instruction ATTRIBUTE_UNUSED)7666 void LocationsBuilderMIPS::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
7667 LOG(FATAL) << "Unreachable";
7668 }
7669
VisitParallelMove(HParallelMove * instruction)7670 void InstructionCodeGeneratorMIPS::VisitParallelMove(HParallelMove* instruction) {
7671 codegen_->GetMoveResolver()->EmitNativeCode(instruction);
7672 }
7673
VisitParameterValue(HParameterValue * instruction)7674 void LocationsBuilderMIPS::VisitParameterValue(HParameterValue* instruction) {
7675 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
7676 Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
7677 if (location.IsStackSlot()) {
7678 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
7679 } else if (location.IsDoubleStackSlot()) {
7680 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
7681 }
7682 locations->SetOut(location);
7683 }
7684
VisitParameterValue(HParameterValue * instruction ATTRIBUTE_UNUSED)7685 void InstructionCodeGeneratorMIPS::VisitParameterValue(HParameterValue* instruction
7686 ATTRIBUTE_UNUSED) {
7687 // Nothing to do, the parameter is already at its location.
7688 }
7689
VisitCurrentMethod(HCurrentMethod * instruction)7690 void LocationsBuilderMIPS::VisitCurrentMethod(HCurrentMethod* instruction) {
7691 LocationSummary* locations =
7692 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
7693 locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
7694 }
7695
VisitCurrentMethod(HCurrentMethod * instruction ATTRIBUTE_UNUSED)7696 void InstructionCodeGeneratorMIPS::VisitCurrentMethod(HCurrentMethod* instruction
7697 ATTRIBUTE_UNUSED) {
7698 // Nothing to do, the method is already at its location.
7699 }
7700
VisitPhi(HPhi * instruction)7701 void LocationsBuilderMIPS::VisitPhi(HPhi* instruction) {
7702 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
7703 for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
7704 locations->SetInAt(i, Location::Any());
7705 }
7706 locations->SetOut(Location::Any());
7707 }
7708
VisitPhi(HPhi * instruction ATTRIBUTE_UNUSED)7709 void InstructionCodeGeneratorMIPS::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
7710 LOG(FATAL) << "Unreachable";
7711 }
7712
VisitRem(HRem * rem)7713 void LocationsBuilderMIPS::VisitRem(HRem* rem) {
7714 Primitive::Type type = rem->GetResultType();
7715 LocationSummary::CallKind call_kind =
7716 (type == Primitive::kPrimInt) ? LocationSummary::kNoCall : LocationSummary::kCallOnMainOnly;
7717 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
7718
7719 switch (type) {
7720 case Primitive::kPrimInt:
7721 locations->SetInAt(0, Location::RequiresRegister());
7722 locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
7723 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
7724 break;
7725
7726 case Primitive::kPrimLong: {
7727 InvokeRuntimeCallingConvention calling_convention;
7728 locations->SetInAt(0, Location::RegisterPairLocation(
7729 calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
7730 locations->SetInAt(1, Location::RegisterPairLocation(
7731 calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
7732 locations->SetOut(calling_convention.GetReturnLocation(type));
7733 break;
7734 }
7735
7736 case Primitive::kPrimFloat:
7737 case Primitive::kPrimDouble: {
7738 InvokeRuntimeCallingConvention calling_convention;
7739 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
7740 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
7741 locations->SetOut(calling_convention.GetReturnLocation(type));
7742 break;
7743 }
7744
7745 default:
7746 LOG(FATAL) << "Unexpected rem type " << type;
7747 }
7748 }
7749
VisitRem(HRem * instruction)7750 void InstructionCodeGeneratorMIPS::VisitRem(HRem* instruction) {
7751 Primitive::Type type = instruction->GetType();
7752
7753 switch (type) {
7754 case Primitive::kPrimInt:
7755 GenerateDivRemIntegral(instruction);
7756 break;
7757 case Primitive::kPrimLong: {
7758 codegen_->InvokeRuntime(kQuickLmod, instruction, instruction->GetDexPc());
7759 CheckEntrypointTypes<kQuickLmod, int64_t, int64_t, int64_t>();
7760 break;
7761 }
7762 case Primitive::kPrimFloat: {
7763 codegen_->InvokeRuntime(kQuickFmodf, instruction, instruction->GetDexPc());
7764 CheckEntrypointTypes<kQuickFmodf, float, float, float>();
7765 break;
7766 }
7767 case Primitive::kPrimDouble: {
7768 codegen_->InvokeRuntime(kQuickFmod, instruction, instruction->GetDexPc());
7769 CheckEntrypointTypes<kQuickFmod, double, double, double>();
7770 break;
7771 }
7772 default:
7773 LOG(FATAL) << "Unexpected rem type " << type;
7774 }
7775 }
7776
VisitMemoryBarrier(HMemoryBarrier * memory_barrier)7777 void LocationsBuilderMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
7778 memory_barrier->SetLocations(nullptr);
7779 }
7780
VisitMemoryBarrier(HMemoryBarrier * memory_barrier)7781 void InstructionCodeGeneratorMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
7782 GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
7783 }
7784
VisitReturn(HReturn * ret)7785 void LocationsBuilderMIPS::VisitReturn(HReturn* ret) {
7786 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
7787 Primitive::Type return_type = ret->InputAt(0)->GetType();
7788 locations->SetInAt(0, MipsReturnLocation(return_type));
7789 }
7790
VisitReturn(HReturn * ret ATTRIBUTE_UNUSED)7791 void InstructionCodeGeneratorMIPS::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
7792 codegen_->GenerateFrameExit();
7793 }
7794
VisitReturnVoid(HReturnVoid * ret)7795 void LocationsBuilderMIPS::VisitReturnVoid(HReturnVoid* ret) {
7796 ret->SetLocations(nullptr);
7797 }
7798
VisitReturnVoid(HReturnVoid * ret ATTRIBUTE_UNUSED)7799 void InstructionCodeGeneratorMIPS::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
7800 codegen_->GenerateFrameExit();
7801 }
7802
VisitRor(HRor * ror)7803 void LocationsBuilderMIPS::VisitRor(HRor* ror) {
7804 HandleShift(ror);
7805 }
7806
VisitRor(HRor * ror)7807 void InstructionCodeGeneratorMIPS::VisitRor(HRor* ror) {
7808 HandleShift(ror);
7809 }
7810
VisitShl(HShl * shl)7811 void LocationsBuilderMIPS::VisitShl(HShl* shl) {
7812 HandleShift(shl);
7813 }
7814
VisitShl(HShl * shl)7815 void InstructionCodeGeneratorMIPS::VisitShl(HShl* shl) {
7816 HandleShift(shl);
7817 }
7818
VisitShr(HShr * shr)7819 void LocationsBuilderMIPS::VisitShr(HShr* shr) {
7820 HandleShift(shr);
7821 }
7822
VisitShr(HShr * shr)7823 void InstructionCodeGeneratorMIPS::VisitShr(HShr* shr) {
7824 HandleShift(shr);
7825 }
7826
VisitSub(HSub * instruction)7827 void LocationsBuilderMIPS::VisitSub(HSub* instruction) {
7828 HandleBinaryOp(instruction);
7829 }
7830
VisitSub(HSub * instruction)7831 void InstructionCodeGeneratorMIPS::VisitSub(HSub* instruction) {
7832 HandleBinaryOp(instruction);
7833 }
7834
VisitStaticFieldGet(HStaticFieldGet * instruction)7835 void LocationsBuilderMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
7836 HandleFieldGet(instruction, instruction->GetFieldInfo());
7837 }
7838
VisitStaticFieldGet(HStaticFieldGet * instruction)7839 void InstructionCodeGeneratorMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
7840 HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
7841 }
7842
VisitStaticFieldSet(HStaticFieldSet * instruction)7843 void LocationsBuilderMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
7844 HandleFieldSet(instruction, instruction->GetFieldInfo());
7845 }
7846
VisitStaticFieldSet(HStaticFieldSet * instruction)7847 void InstructionCodeGeneratorMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
7848 HandleFieldSet(instruction,
7849 instruction->GetFieldInfo(),
7850 instruction->GetDexPc(),
7851 instruction->GetValueCanBeNull());
7852 }
7853
VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet * instruction)7854 void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldGet(
7855 HUnresolvedInstanceFieldGet* instruction) {
7856 FieldAccessCallingConventionMIPS calling_convention;
7857 codegen_->CreateUnresolvedFieldLocationSummary(instruction,
7858 instruction->GetFieldType(),
7859 calling_convention);
7860 }
7861
VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet * instruction)7862 void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldGet(
7863 HUnresolvedInstanceFieldGet* instruction) {
7864 FieldAccessCallingConventionMIPS calling_convention;
7865 codegen_->GenerateUnresolvedFieldAccess(instruction,
7866 instruction->GetFieldType(),
7867 instruction->GetFieldIndex(),
7868 instruction->GetDexPc(),
7869 calling_convention);
7870 }
7871
VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet * instruction)7872 void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldSet(
7873 HUnresolvedInstanceFieldSet* instruction) {
7874 FieldAccessCallingConventionMIPS calling_convention;
7875 codegen_->CreateUnresolvedFieldLocationSummary(instruction,
7876 instruction->GetFieldType(),
7877 calling_convention);
7878 }
7879
VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet * instruction)7880 void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldSet(
7881 HUnresolvedInstanceFieldSet* instruction) {
7882 FieldAccessCallingConventionMIPS calling_convention;
7883 codegen_->GenerateUnresolvedFieldAccess(instruction,
7884 instruction->GetFieldType(),
7885 instruction->GetFieldIndex(),
7886 instruction->GetDexPc(),
7887 calling_convention);
7888 }
7889
VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet * instruction)7890 void LocationsBuilderMIPS::VisitUnresolvedStaticFieldGet(
7891 HUnresolvedStaticFieldGet* instruction) {
7892 FieldAccessCallingConventionMIPS calling_convention;
7893 codegen_->CreateUnresolvedFieldLocationSummary(instruction,
7894 instruction->GetFieldType(),
7895 calling_convention);
7896 }
7897
VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet * instruction)7898 void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldGet(
7899 HUnresolvedStaticFieldGet* instruction) {
7900 FieldAccessCallingConventionMIPS calling_convention;
7901 codegen_->GenerateUnresolvedFieldAccess(instruction,
7902 instruction->GetFieldType(),
7903 instruction->GetFieldIndex(),
7904 instruction->GetDexPc(),
7905 calling_convention);
7906 }
7907
VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet * instruction)7908 void LocationsBuilderMIPS::VisitUnresolvedStaticFieldSet(
7909 HUnresolvedStaticFieldSet* instruction) {
7910 FieldAccessCallingConventionMIPS calling_convention;
7911 codegen_->CreateUnresolvedFieldLocationSummary(instruction,
7912 instruction->GetFieldType(),
7913 calling_convention);
7914 }
7915
VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet * instruction)7916 void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldSet(
7917 HUnresolvedStaticFieldSet* instruction) {
7918 FieldAccessCallingConventionMIPS calling_convention;
7919 codegen_->GenerateUnresolvedFieldAccess(instruction,
7920 instruction->GetFieldType(),
7921 instruction->GetFieldIndex(),
7922 instruction->GetDexPc(),
7923 calling_convention);
7924 }
7925
VisitSuspendCheck(HSuspendCheck * instruction)7926 void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
7927 LocationSummary* locations =
7928 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
7929 locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
7930 }
7931
VisitSuspendCheck(HSuspendCheck * instruction)7932 void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
7933 HBasicBlock* block = instruction->GetBlock();
7934 if (block->GetLoopInformation() != nullptr) {
7935 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
7936 // The back edge will generate the suspend check.
7937 return;
7938 }
7939 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
7940 // The goto will generate the suspend check.
7941 return;
7942 }
7943 GenerateSuspendCheck(instruction, nullptr);
7944 }
7945
VisitThrow(HThrow * instruction)7946 void LocationsBuilderMIPS::VisitThrow(HThrow* instruction) {
7947 LocationSummary* locations =
7948 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
7949 InvokeRuntimeCallingConvention calling_convention;
7950 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
7951 }
7952
VisitThrow(HThrow * instruction)7953 void InstructionCodeGeneratorMIPS::VisitThrow(HThrow* instruction) {
7954 codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
7955 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
7956 }
7957
VisitTypeConversion(HTypeConversion * conversion)7958 void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) {
7959 Primitive::Type input_type = conversion->GetInputType();
7960 Primitive::Type result_type = conversion->GetResultType();
7961 DCHECK_NE(input_type, result_type);
7962 bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
7963
7964 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
7965 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
7966 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
7967 }
7968
7969 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
7970 if (!isR6 &&
7971 ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
7972 (result_type == Primitive::kPrimLong && Primitive::IsFloatingPointType(input_type)))) {
7973 call_kind = LocationSummary::kCallOnMainOnly;
7974 }
7975
7976 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
7977
7978 if (call_kind == LocationSummary::kNoCall) {
7979 if (Primitive::IsFloatingPointType(input_type)) {
7980 locations->SetInAt(0, Location::RequiresFpuRegister());
7981 } else {
7982 locations->SetInAt(0, Location::RequiresRegister());
7983 }
7984
7985 if (Primitive::IsFloatingPointType(result_type)) {
7986 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
7987 } else {
7988 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
7989 }
7990 } else {
7991 InvokeRuntimeCallingConvention calling_convention;
7992
7993 if (Primitive::IsFloatingPointType(input_type)) {
7994 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
7995 } else {
7996 DCHECK_EQ(input_type, Primitive::kPrimLong);
7997 locations->SetInAt(0, Location::RegisterPairLocation(
7998 calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
7999 }
8000
8001 locations->SetOut(calling_convention.GetReturnLocation(result_type));
8002 }
8003 }
8004
VisitTypeConversion(HTypeConversion * conversion)8005 void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversion) {
8006 LocationSummary* locations = conversion->GetLocations();
8007 Primitive::Type result_type = conversion->GetResultType();
8008 Primitive::Type input_type = conversion->GetInputType();
8009 bool has_sign_extension = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
8010 bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
8011
8012 DCHECK_NE(input_type, result_type);
8013
8014 if (result_type == Primitive::kPrimLong && Primitive::IsIntegralType(input_type)) {
8015 Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
8016 Register dst_low = locations->Out().AsRegisterPairLow<Register>();
8017 Register src = locations->InAt(0).AsRegister<Register>();
8018
8019 if (dst_low != src) {
8020 __ Move(dst_low, src);
8021 }
8022 __ Sra(dst_high, src, 31);
8023 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
8024 Register dst = locations->Out().AsRegister<Register>();
8025 Register src = (input_type == Primitive::kPrimLong)
8026 ? locations->InAt(0).AsRegisterPairLow<Register>()
8027 : locations->InAt(0).AsRegister<Register>();
8028
8029 switch (result_type) {
8030 case Primitive::kPrimChar:
8031 __ Andi(dst, src, 0xFFFF);
8032 break;
8033 case Primitive::kPrimByte:
8034 if (has_sign_extension) {
8035 __ Seb(dst, src);
8036 } else {
8037 __ Sll(dst, src, 24);
8038 __ Sra(dst, dst, 24);
8039 }
8040 break;
8041 case Primitive::kPrimShort:
8042 if (has_sign_extension) {
8043 __ Seh(dst, src);
8044 } else {
8045 __ Sll(dst, src, 16);
8046 __ Sra(dst, dst, 16);
8047 }
8048 break;
8049 case Primitive::kPrimInt:
8050 if (dst != src) {
8051 __ Move(dst, src);
8052 }
8053 break;
8054
8055 default:
8056 LOG(FATAL) << "Unexpected type conversion from " << input_type
8057 << " to " << result_type;
8058 }
8059 } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
8060 if (input_type == Primitive::kPrimLong) {
8061 if (isR6) {
8062 // cvt.s.l/cvt.d.l requires MIPSR2+ with FR=1. MIPS32R6 is implemented as a secondary
8063 // architecture on top of MIPS64R6, which has FR=1, and therefore can use the instruction.
8064 Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
8065 Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
8066 FRegister dst = locations->Out().AsFpuRegister<FRegister>();
8067 __ Mtc1(src_low, FTMP);
8068 __ Mthc1(src_high, FTMP);
8069 if (result_type == Primitive::kPrimFloat) {
8070 __ Cvtsl(dst, FTMP);
8071 } else {
8072 __ Cvtdl(dst, FTMP);
8073 }
8074 } else {
8075 QuickEntrypointEnum entrypoint = (result_type == Primitive::kPrimFloat) ? kQuickL2f
8076 : kQuickL2d;
8077 codegen_->InvokeRuntime(entrypoint, conversion, conversion->GetDexPc());
8078 if (result_type == Primitive::kPrimFloat) {
8079 CheckEntrypointTypes<kQuickL2f, float, int64_t>();
8080 } else {
8081 CheckEntrypointTypes<kQuickL2d, double, int64_t>();
8082 }
8083 }
8084 } else {
8085 Register src = locations->InAt(0).AsRegister<Register>();
8086 FRegister dst = locations->Out().AsFpuRegister<FRegister>();
8087 __ Mtc1(src, FTMP);
8088 if (result_type == Primitive::kPrimFloat) {
8089 __ Cvtsw(dst, FTMP);
8090 } else {
8091 __ Cvtdw(dst, FTMP);
8092 }
8093 }
8094 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
8095 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
8096 if (result_type == Primitive::kPrimLong) {
8097 if (isR6) {
8098 // trunc.l.s/trunc.l.d requires MIPSR2+ with FR=1. MIPS32R6 is implemented as a secondary
8099 // architecture on top of MIPS64R6, which has FR=1, and therefore can use the instruction.
8100 FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
8101 Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
8102 Register dst_low = locations->Out().AsRegisterPairLow<Register>();
8103 MipsLabel truncate;
8104 MipsLabel done;
8105
8106 // When NAN2008=0 (R2 and before), the truncate instruction produces the maximum positive
8107 // value when the input is either a NaN or is outside of the range of the output type
8108 // after the truncation. IOW, the three special cases (NaN, too small, too big) produce
8109 // the same result.
8110 //
8111 // When NAN2008=1 (R6), the truncate instruction caps the output at the minimum/maximum
8112 // value of the output type if the input is outside of the range after the truncation or
8113 // produces 0 when the input is a NaN. IOW, the three special cases produce three distinct
8114 // results. This matches the desired float/double-to-int/long conversion exactly.
8115 //
8116 // So, NAN2008 affects handling of negative values and NaNs by the truncate instruction.
8117 //
8118 // The following code supports both NAN2008=0 and NAN2008=1 behaviors of the truncate
8119 // instruction, the reason being that the emulator implements NAN2008=0 on MIPS64R6,
8120 // even though it must be NAN2008=1 on R6.
8121 //
8122 // The code takes care of the different behaviors by first comparing the input to the
8123 // minimum output value (-2**-63 for truncating to long, -2**-31 for truncating to int).
8124 // If the input is greater than or equal to the minimum, it procedes to the truncate
8125 // instruction, which will handle such an input the same way irrespective of NAN2008.
8126 // Otherwise the input is compared to itself to determine whether it is a NaN or not
8127 // in order to return either zero or the minimum value.
8128 //
8129 // TODO: simplify this when the emulator correctly implements NAN2008=1 behavior of the
8130 // truncate instruction for MIPS64R6.
8131 if (input_type == Primitive::kPrimFloat) {
8132 uint32_t min_val = bit_cast<uint32_t, float>(std::numeric_limits<int64_t>::min());
8133 __ LoadConst32(TMP, min_val);
8134 __ Mtc1(TMP, FTMP);
8135 __ CmpLeS(FTMP, FTMP, src);
8136 } else {
8137 uint64_t min_val = bit_cast<uint64_t, double>(std::numeric_limits<int64_t>::min());
8138 __ LoadConst32(TMP, High32Bits(min_val));
8139 __ Mtc1(ZERO, FTMP);
8140 __ Mthc1(TMP, FTMP);
8141 __ CmpLeD(FTMP, FTMP, src);
8142 }
8143
8144 __ Bc1nez(FTMP, &truncate);
8145
8146 if (input_type == Primitive::kPrimFloat) {
8147 __ CmpEqS(FTMP, src, src);
8148 } else {
8149 __ CmpEqD(FTMP, src, src);
8150 }
8151 __ Move(dst_low, ZERO);
8152 __ LoadConst32(dst_high, std::numeric_limits<int32_t>::min());
8153 __ Mfc1(TMP, FTMP);
8154 __ And(dst_high, dst_high, TMP);
8155
8156 __ B(&done);
8157
8158 __ Bind(&truncate);
8159
8160 if (input_type == Primitive::kPrimFloat) {
8161 __ TruncLS(FTMP, src);
8162 } else {
8163 __ TruncLD(FTMP, src);
8164 }
8165 __ Mfc1(dst_low, FTMP);
8166 __ Mfhc1(dst_high, FTMP);
8167
8168 __ Bind(&done);
8169 } else {
8170 QuickEntrypointEnum entrypoint = (input_type == Primitive::kPrimFloat) ? kQuickF2l
8171 : kQuickD2l;
8172 codegen_->InvokeRuntime(entrypoint, conversion, conversion->GetDexPc());
8173 if (input_type == Primitive::kPrimFloat) {
8174 CheckEntrypointTypes<kQuickF2l, int64_t, float>();
8175 } else {
8176 CheckEntrypointTypes<kQuickD2l, int64_t, double>();
8177 }
8178 }
8179 } else {
8180 FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
8181 Register dst = locations->Out().AsRegister<Register>();
8182 MipsLabel truncate;
8183 MipsLabel done;
8184
8185 // The following code supports both NAN2008=0 and NAN2008=1 behaviors of the truncate
8186 // instruction, the reason being that the emulator implements NAN2008=0 on MIPS64R6,
8187 // even though it must be NAN2008=1 on R6.
8188 //
8189 // For details see the large comment above for the truncation of float/double to long on R6.
8190 //
8191 // TODO: simplify this when the emulator correctly implements NAN2008=1 behavior of the
8192 // truncate instruction for MIPS64R6.
8193 if (input_type == Primitive::kPrimFloat) {
8194 uint32_t min_val = bit_cast<uint32_t, float>(std::numeric_limits<int32_t>::min());
8195 __ LoadConst32(TMP, min_val);
8196 __ Mtc1(TMP, FTMP);
8197 } else {
8198 uint64_t min_val = bit_cast<uint64_t, double>(std::numeric_limits<int32_t>::min());
8199 __ LoadConst32(TMP, High32Bits(min_val));
8200 __ Mtc1(ZERO, FTMP);
8201 __ MoveToFpuHigh(TMP, FTMP);
8202 }
8203
8204 if (isR6) {
8205 if (input_type == Primitive::kPrimFloat) {
8206 __ CmpLeS(FTMP, FTMP, src);
8207 } else {
8208 __ CmpLeD(FTMP, FTMP, src);
8209 }
8210 __ Bc1nez(FTMP, &truncate);
8211
8212 if (input_type == Primitive::kPrimFloat) {
8213 __ CmpEqS(FTMP, src, src);
8214 } else {
8215 __ CmpEqD(FTMP, src, src);
8216 }
8217 __ LoadConst32(dst, std::numeric_limits<int32_t>::min());
8218 __ Mfc1(TMP, FTMP);
8219 __ And(dst, dst, TMP);
8220 } else {
8221 if (input_type == Primitive::kPrimFloat) {
8222 __ ColeS(0, FTMP, src);
8223 } else {
8224 __ ColeD(0, FTMP, src);
8225 }
8226 __ Bc1t(0, &truncate);
8227
8228 if (input_type == Primitive::kPrimFloat) {
8229 __ CeqS(0, src, src);
8230 } else {
8231 __ CeqD(0, src, src);
8232 }
8233 __ LoadConst32(dst, std::numeric_limits<int32_t>::min());
8234 __ Movf(dst, ZERO, 0);
8235 }
8236
8237 __ B(&done);
8238
8239 __ Bind(&truncate);
8240
8241 if (input_type == Primitive::kPrimFloat) {
8242 __ TruncWS(FTMP, src);
8243 } else {
8244 __ TruncWD(FTMP, src);
8245 }
8246 __ Mfc1(dst, FTMP);
8247
8248 __ Bind(&done);
8249 }
8250 } else if (Primitive::IsFloatingPointType(result_type) &&
8251 Primitive::IsFloatingPointType(input_type)) {
8252 FRegister dst = locations->Out().AsFpuRegister<FRegister>();
8253 FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
8254 if (result_type == Primitive::kPrimFloat) {
8255 __ Cvtsd(dst, src);
8256 } else {
8257 __ Cvtds(dst, src);
8258 }
8259 } else {
8260 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
8261 << " to " << result_type;
8262 }
8263 }
8264
VisitUShr(HUShr * ushr)8265 void LocationsBuilderMIPS::VisitUShr(HUShr* ushr) {
8266 HandleShift(ushr);
8267 }
8268
VisitUShr(HUShr * ushr)8269 void InstructionCodeGeneratorMIPS::VisitUShr(HUShr* ushr) {
8270 HandleShift(ushr);
8271 }
8272
VisitXor(HXor * instruction)8273 void LocationsBuilderMIPS::VisitXor(HXor* instruction) {
8274 HandleBinaryOp(instruction);
8275 }
8276
VisitXor(HXor * instruction)8277 void InstructionCodeGeneratorMIPS::VisitXor(HXor* instruction) {
8278 HandleBinaryOp(instruction);
8279 }
8280
VisitBoundType(HBoundType * instruction ATTRIBUTE_UNUSED)8281 void LocationsBuilderMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
8282 // Nothing to do, this should be removed during prepare for register allocator.
8283 LOG(FATAL) << "Unreachable";
8284 }
8285
VisitBoundType(HBoundType * instruction ATTRIBUTE_UNUSED)8286 void InstructionCodeGeneratorMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
8287 // Nothing to do, this should be removed during prepare for register allocator.
8288 LOG(FATAL) << "Unreachable";
8289 }
8290
VisitEqual(HEqual * comp)8291 void LocationsBuilderMIPS::VisitEqual(HEqual* comp) {
8292 HandleCondition(comp);
8293 }
8294
VisitEqual(HEqual * comp)8295 void InstructionCodeGeneratorMIPS::VisitEqual(HEqual* comp) {
8296 HandleCondition(comp);
8297 }
8298
VisitNotEqual(HNotEqual * comp)8299 void LocationsBuilderMIPS::VisitNotEqual(HNotEqual* comp) {
8300 HandleCondition(comp);
8301 }
8302
VisitNotEqual(HNotEqual * comp)8303 void InstructionCodeGeneratorMIPS::VisitNotEqual(HNotEqual* comp) {
8304 HandleCondition(comp);
8305 }
8306
VisitLessThan(HLessThan * comp)8307 void LocationsBuilderMIPS::VisitLessThan(HLessThan* comp) {
8308 HandleCondition(comp);
8309 }
8310
VisitLessThan(HLessThan * comp)8311 void InstructionCodeGeneratorMIPS::VisitLessThan(HLessThan* comp) {
8312 HandleCondition(comp);
8313 }
8314
VisitLessThanOrEqual(HLessThanOrEqual * comp)8315 void LocationsBuilderMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
8316 HandleCondition(comp);
8317 }
8318
VisitLessThanOrEqual(HLessThanOrEqual * comp)8319 void InstructionCodeGeneratorMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
8320 HandleCondition(comp);
8321 }
8322
VisitGreaterThan(HGreaterThan * comp)8323 void LocationsBuilderMIPS::VisitGreaterThan(HGreaterThan* comp) {
8324 HandleCondition(comp);
8325 }
8326
VisitGreaterThan(HGreaterThan * comp)8327 void InstructionCodeGeneratorMIPS::VisitGreaterThan(HGreaterThan* comp) {
8328 HandleCondition(comp);
8329 }
8330
VisitGreaterThanOrEqual(HGreaterThanOrEqual * comp)8331 void LocationsBuilderMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
8332 HandleCondition(comp);
8333 }
8334
VisitGreaterThanOrEqual(HGreaterThanOrEqual * comp)8335 void InstructionCodeGeneratorMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
8336 HandleCondition(comp);
8337 }
8338
VisitBelow(HBelow * comp)8339 void LocationsBuilderMIPS::VisitBelow(HBelow* comp) {
8340 HandleCondition(comp);
8341 }
8342
VisitBelow(HBelow * comp)8343 void InstructionCodeGeneratorMIPS::VisitBelow(HBelow* comp) {
8344 HandleCondition(comp);
8345 }
8346
VisitBelowOrEqual(HBelowOrEqual * comp)8347 void LocationsBuilderMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
8348 HandleCondition(comp);
8349 }
8350
VisitBelowOrEqual(HBelowOrEqual * comp)8351 void InstructionCodeGeneratorMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
8352 HandleCondition(comp);
8353 }
8354
VisitAbove(HAbove * comp)8355 void LocationsBuilderMIPS::VisitAbove(HAbove* comp) {
8356 HandleCondition(comp);
8357 }
8358
VisitAbove(HAbove * comp)8359 void InstructionCodeGeneratorMIPS::VisitAbove(HAbove* comp) {
8360 HandleCondition(comp);
8361 }
8362
VisitAboveOrEqual(HAboveOrEqual * comp)8363 void LocationsBuilderMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
8364 HandleCondition(comp);
8365 }
8366
VisitAboveOrEqual(HAboveOrEqual * comp)8367 void InstructionCodeGeneratorMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
8368 HandleCondition(comp);
8369 }
8370
VisitPackedSwitch(HPackedSwitch * switch_instr)8371 void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
8372 LocationSummary* locations =
8373 new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
8374 locations->SetInAt(0, Location::RequiresRegister());
8375 }
8376
GenPackedSwitchWithCompares(Register value_reg,int32_t lower_bound,uint32_t num_entries,HBasicBlock * switch_block,HBasicBlock * default_block)8377 void InstructionCodeGeneratorMIPS::GenPackedSwitchWithCompares(Register value_reg,
8378 int32_t lower_bound,
8379 uint32_t num_entries,
8380 HBasicBlock* switch_block,
8381 HBasicBlock* default_block) {
8382 // Create a set of compare/jumps.
8383 Register temp_reg = TMP;
8384 __ Addiu32(temp_reg, value_reg, -lower_bound);
8385 // Jump to default if index is negative
8386 // Note: We don't check the case that index is positive while value < lower_bound, because in
8387 // this case, index >= num_entries must be true. So that we can save one branch instruction.
8388 __ Bltz(temp_reg, codegen_->GetLabelOf(default_block));
8389
8390 const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
8391 // Jump to successors[0] if value == lower_bound.
8392 __ Beqz(temp_reg, codegen_->GetLabelOf(successors[0]));
8393 int32_t last_index = 0;
8394 for (; num_entries - last_index > 2; last_index += 2) {
8395 __ Addiu(temp_reg, temp_reg, -2);
8396 // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
8397 __ Bltz(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
8398 // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
8399 __ Beqz(temp_reg, codegen_->GetLabelOf(successors[last_index + 2]));
8400 }
8401 if (num_entries - last_index == 2) {
8402 // The last missing case_value.
8403 __ Addiu(temp_reg, temp_reg, -1);
8404 __ Beqz(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
8405 }
8406
8407 // And the default for any other value.
8408 if (!codegen_->GoesToNextBlock(switch_block, default_block)) {
8409 __ B(codegen_->GetLabelOf(default_block));
8410 }
8411 }
8412
GenTableBasedPackedSwitch(Register value_reg,Register constant_area,int32_t lower_bound,uint32_t num_entries,HBasicBlock * switch_block,HBasicBlock * default_block)8413 void InstructionCodeGeneratorMIPS::GenTableBasedPackedSwitch(Register value_reg,
8414 Register constant_area,
8415 int32_t lower_bound,
8416 uint32_t num_entries,
8417 HBasicBlock* switch_block,
8418 HBasicBlock* default_block) {
8419 // Create a jump table.
8420 std::vector<MipsLabel*> labels(num_entries);
8421 const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
8422 for (uint32_t i = 0; i < num_entries; i++) {
8423 labels[i] = codegen_->GetLabelOf(successors[i]);
8424 }
8425 JumpTable* table = __ CreateJumpTable(std::move(labels));
8426
8427 // Is the value in range?
8428 __ Addiu32(TMP, value_reg, -lower_bound);
8429 if (IsInt<16>(static_cast<int32_t>(num_entries))) {
8430 __ Sltiu(AT, TMP, num_entries);
8431 __ Beqz(AT, codegen_->GetLabelOf(default_block));
8432 } else {
8433 __ LoadConst32(AT, num_entries);
8434 __ Bgeu(TMP, AT, codegen_->GetLabelOf(default_block));
8435 }
8436
8437 // We are in the range of the table.
8438 // Load the target address from the jump table, indexing by the value.
8439 __ LoadLabelAddress(AT, constant_area, table->GetLabel());
8440 __ ShiftAndAdd(TMP, TMP, AT, 2, TMP);
8441 __ Lw(TMP, TMP, 0);
8442 // Compute the absolute target address by adding the table start address
8443 // (the table contains offsets to targets relative to its start).
8444 __ Addu(TMP, TMP, AT);
8445 // And jump.
8446 __ Jr(TMP);
8447 __ NopIfNoReordering();
8448 }
8449
VisitPackedSwitch(HPackedSwitch * switch_instr)8450 void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
8451 int32_t lower_bound = switch_instr->GetStartValue();
8452 uint32_t num_entries = switch_instr->GetNumEntries();
8453 LocationSummary* locations = switch_instr->GetLocations();
8454 Register value_reg = locations->InAt(0).AsRegister<Register>();
8455 HBasicBlock* switch_block = switch_instr->GetBlock();
8456 HBasicBlock* default_block = switch_instr->GetDefaultBlock();
8457
8458 if (codegen_->GetInstructionSetFeatures().IsR6() &&
8459 num_entries > kPackedSwitchJumpTableThreshold) {
8460 // R6 uses PC-relative addressing to access the jump table.
8461 // R2, OTOH, requires an HMipsComputeBaseMethodAddress input to access
8462 // the jump table and it is implemented by changing HPackedSwitch to
8463 // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress.
8464 // See VisitMipsPackedSwitch() for the table-based implementation on R2.
8465 GenTableBasedPackedSwitch(value_reg,
8466 ZERO,
8467 lower_bound,
8468 num_entries,
8469 switch_block,
8470 default_block);
8471 } else {
8472 GenPackedSwitchWithCompares(value_reg,
8473 lower_bound,
8474 num_entries,
8475 switch_block,
8476 default_block);
8477 }
8478 }
8479
VisitMipsPackedSwitch(HMipsPackedSwitch * switch_instr)8480 void LocationsBuilderMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) {
8481 LocationSummary* locations =
8482 new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
8483 locations->SetInAt(0, Location::RequiresRegister());
8484 // Constant area pointer (HMipsComputeBaseMethodAddress).
8485 locations->SetInAt(1, Location::RequiresRegister());
8486 }
8487
VisitMipsPackedSwitch(HMipsPackedSwitch * switch_instr)8488 void InstructionCodeGeneratorMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) {
8489 int32_t lower_bound = switch_instr->GetStartValue();
8490 uint32_t num_entries = switch_instr->GetNumEntries();
8491 LocationSummary* locations = switch_instr->GetLocations();
8492 Register value_reg = locations->InAt(0).AsRegister<Register>();
8493 Register constant_area = locations->InAt(1).AsRegister<Register>();
8494 HBasicBlock* switch_block = switch_instr->GetBlock();
8495 HBasicBlock* default_block = switch_instr->GetDefaultBlock();
8496
8497 // This is an R2-only path. HPackedSwitch has been changed to
8498 // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress
8499 // required to address the jump table relative to PC.
8500 GenTableBasedPackedSwitch(value_reg,
8501 constant_area,
8502 lower_bound,
8503 num_entries,
8504 switch_block,
8505 default_block);
8506 }
8507
VisitMipsComputeBaseMethodAddress(HMipsComputeBaseMethodAddress * insn)8508 void LocationsBuilderMIPS::VisitMipsComputeBaseMethodAddress(
8509 HMipsComputeBaseMethodAddress* insn) {
8510 LocationSummary* locations =
8511 new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
8512 locations->SetOut(Location::RequiresRegister());
8513 }
8514
VisitMipsComputeBaseMethodAddress(HMipsComputeBaseMethodAddress * insn)8515 void InstructionCodeGeneratorMIPS::VisitMipsComputeBaseMethodAddress(
8516 HMipsComputeBaseMethodAddress* insn) {
8517 LocationSummary* locations = insn->GetLocations();
8518 Register reg = locations->Out().AsRegister<Register>();
8519
8520 CHECK(!codegen_->GetInstructionSetFeatures().IsR6());
8521
8522 // Generate a dummy PC-relative call to obtain PC.
8523 __ Nal();
8524 // Grab the return address off RA.
8525 __ Move(reg, RA);
8526 // TODO: Can we share this code with that of VisitMipsDexCacheArraysBase()?
8527
8528 // Remember this offset (the obtained PC value) for later use with constant area.
8529 __ BindPcRelBaseLabel();
8530 }
8531
VisitMipsDexCacheArraysBase(HMipsDexCacheArraysBase * base)8532 void LocationsBuilderMIPS::VisitMipsDexCacheArraysBase(HMipsDexCacheArraysBase* base) {
8533 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(base);
8534 locations->SetOut(Location::RequiresRegister());
8535 }
8536
VisitMipsDexCacheArraysBase(HMipsDexCacheArraysBase * base)8537 void InstructionCodeGeneratorMIPS::VisitMipsDexCacheArraysBase(HMipsDexCacheArraysBase* base) {
8538 Register reg = base->GetLocations()->Out().AsRegister<Register>();
8539 CodeGeneratorMIPS::PcRelativePatchInfo* info =
8540 codegen_->NewPcRelativeDexCacheArrayPatch(base->GetDexFile(), base->GetElementOffset());
8541 CHECK(!codegen_->GetInstructionSetFeatures().IsR6());
8542 bool reordering = __ SetReorder(false);
8543 // TODO: Reuse MipsComputeBaseMethodAddress on R2 instead of passing ZERO to force emitting NAL.
8544 codegen_->EmitPcRelativeAddressPlaceholderHigh(info, reg, ZERO);
8545 __ Addiu(reg, reg, /* placeholder */ 0x5678);
8546 __ SetReorder(reordering);
8547 }
8548
VisitInvokeUnresolved(HInvokeUnresolved * invoke)8549 void LocationsBuilderMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
8550 // The trampoline uses the same calling convention as dex calling conventions,
8551 // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
8552 // the method_idx.
8553 HandleInvoke(invoke);
8554 }
8555
VisitInvokeUnresolved(HInvokeUnresolved * invoke)8556 void InstructionCodeGeneratorMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
8557 codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
8558 }
8559
VisitClassTableGet(HClassTableGet * instruction)8560 void LocationsBuilderMIPS::VisitClassTableGet(HClassTableGet* instruction) {
8561 LocationSummary* locations =
8562 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
8563 locations->SetInAt(0, Location::RequiresRegister());
8564 locations->SetOut(Location::RequiresRegister());
8565 }
8566
VisitClassTableGet(HClassTableGet * instruction)8567 void InstructionCodeGeneratorMIPS::VisitClassTableGet(HClassTableGet* instruction) {
8568 LocationSummary* locations = instruction->GetLocations();
8569 if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) {
8570 uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
8571 instruction->GetIndex(), kMipsPointerSize).SizeValue();
8572 __ LoadFromOffset(kLoadWord,
8573 locations->Out().AsRegister<Register>(),
8574 locations->InAt(0).AsRegister<Register>(),
8575 method_offset);
8576 } else {
8577 uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
8578 instruction->GetIndex(), kMipsPointerSize));
8579 __ LoadFromOffset(kLoadWord,
8580 locations->Out().AsRegister<Register>(),
8581 locations->InAt(0).AsRegister<Register>(),
8582 mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
8583 __ LoadFromOffset(kLoadWord,
8584 locations->Out().AsRegister<Register>(),
8585 locations->Out().AsRegister<Register>(),
8586 method_offset);
8587 }
8588 }
8589
8590 #undef __
8591 #undef QUICK_ENTRY_POINT
8592
8593 } // namespace mips
8594 } // namespace art
8595