1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/ast/scopes.h"
6 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/osr.h"
11 #include "src/mips/macro-assembler-mips.h"
12 
13 namespace v8 {
14 namespace internal {
15 namespace compiler {
16 
17 #define __ masm()->
18 
19 
20 // TODO(plind): Possibly avoid using these lithium names.
21 #define kScratchReg kLithiumScratchReg
22 #define kScratchReg2 kLithiumScratchReg2
23 #define kScratchDoubleReg kLithiumScratchDouble
24 
25 
26 // TODO(plind): consider renaming these macros.
27 #define TRACE_MSG(msg)                                                      \
28   PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
29          __LINE__)
30 
31 #define TRACE_UNIMPL()                                                       \
32   PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
33          __LINE__)
34 
35 
36 // Adds Mips-specific methods to convert InstructionOperands.
37 class MipsOperandConverter final : public InstructionOperandConverter {
38  public:
MipsOperandConverter(CodeGenerator * gen,Instruction * instr)39   MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
40       : InstructionOperandConverter(gen, instr) {}
41 
OutputSingleRegister(size_t index=0)42   FloatRegister OutputSingleRegister(size_t index = 0) {
43     return ToSingleRegister(instr_->OutputAt(index));
44   }
45 
InputSingleRegister(size_t index)46   FloatRegister InputSingleRegister(size_t index) {
47     return ToSingleRegister(instr_->InputAt(index));
48   }
49 
ToSingleRegister(InstructionOperand * op)50   FloatRegister ToSingleRegister(InstructionOperand* op) {
51     // Single (Float) and Double register namespace is same on MIPS,
52     // both are typedefs of FPURegister.
53     return ToDoubleRegister(op);
54   }
55 
InputOrZeroDoubleRegister(size_t index)56   DoubleRegister InputOrZeroDoubleRegister(size_t index) {
57     if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
58 
59     return InputDoubleRegister(index);
60   }
61 
InputOrZeroSingleRegister(size_t index)62   DoubleRegister InputOrZeroSingleRegister(size_t index) {
63     if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
64 
65     return InputSingleRegister(index);
66   }
67 
InputImmediate(size_t index)68   Operand InputImmediate(size_t index) {
69     Constant constant = ToConstant(instr_->InputAt(index));
70     switch (constant.type()) {
71       case Constant::kInt32:
72         return Operand(constant.ToInt32());
73       case Constant::kInt64:
74         return Operand(constant.ToInt64());
75       case Constant::kFloat32:
76         return Operand(
77             isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
78       case Constant::kFloat64:
79         return Operand(
80             isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
81       case Constant::kExternalReference:
82       case Constant::kHeapObject:
83         // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
84         //    maybe not done on arm due to const pool ??
85         break;
86       case Constant::kRpoNumber:
87         UNREACHABLE();  // TODO(titzer): RPO immediates on mips?
88         break;
89     }
90     UNREACHABLE();
91     return Operand(zero_reg);
92   }
93 
InputOperand(size_t index)94   Operand InputOperand(size_t index) {
95     InstructionOperand* op = instr_->InputAt(index);
96     if (op->IsRegister()) {
97       return Operand(ToRegister(op));
98     }
99     return InputImmediate(index);
100   }
101 
MemoryOperand(size_t * first_index)102   MemOperand MemoryOperand(size_t* first_index) {
103     const size_t index = *first_index;
104     switch (AddressingModeField::decode(instr_->opcode())) {
105       case kMode_None:
106         break;
107       case kMode_MRI:
108         *first_index += 2;
109         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
110       case kMode_MRR:
111         // TODO(plind): r6 address mode, to be implemented ...
112         UNREACHABLE();
113     }
114     UNREACHABLE();
115     return MemOperand(no_reg);
116   }
117 
MemoryOperand(size_t index=0)118   MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
119 
ToMemOperand(InstructionOperand * op) const120   MemOperand ToMemOperand(InstructionOperand* op) const {
121     DCHECK_NOT_NULL(op);
122     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
123     FrameOffset offset = frame_access_state()->GetFrameOffset(
124         AllocatedOperand::cast(op)->index());
125     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
126   }
127 };
128 
129 
HasRegisterInput(Instruction * instr,size_t index)130 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
131   return instr->InputAt(index)->IsRegister();
132 }
133 
134 
135 namespace {
136 
137 class OutOfLineLoadSingle final : public OutOfLineCode {
138  public:
OutOfLineLoadSingle(CodeGenerator * gen,FloatRegister result)139   OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
140       : OutOfLineCode(gen), result_(result) {}
141 
Generate()142   void Generate() final {
143     __ Move(result_, std::numeric_limits<float>::quiet_NaN());
144   }
145 
146  private:
147   FloatRegister const result_;
148 };
149 
150 
151 class OutOfLineLoadDouble final : public OutOfLineCode {
152  public:
OutOfLineLoadDouble(CodeGenerator * gen,DoubleRegister result)153   OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
154       : OutOfLineCode(gen), result_(result) {}
155 
Generate()156   void Generate() final {
157     __ Move(result_, std::numeric_limits<double>::quiet_NaN());
158   }
159 
160  private:
161   DoubleRegister const result_;
162 };
163 
164 
165 class OutOfLineLoadInteger final : public OutOfLineCode {
166  public:
OutOfLineLoadInteger(CodeGenerator * gen,Register result)167   OutOfLineLoadInteger(CodeGenerator* gen, Register result)
168       : OutOfLineCode(gen), result_(result) {}
169 
Generate()170   void Generate() final { __ mov(result_, zero_reg); }
171 
172  private:
173   Register const result_;
174 };
175 
176 
177 class OutOfLineRound : public OutOfLineCode {
178  public:
OutOfLineRound(CodeGenerator * gen,DoubleRegister result)179   OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
180       : OutOfLineCode(gen), result_(result) {}
181 
Generate()182   void Generate() final {
183     // Handle rounding to zero case where sign has to be preserved.
184     // High bits of double input already in kScratchReg.
185     __ dsrl(at, kScratchReg, 31);
186     __ dsll(at, at, 31);
187     __ mthc1(at, result_);
188   }
189 
190  private:
191   DoubleRegister const result_;
192 };
193 
194 
195 class OutOfLineRound32 : public OutOfLineCode {
196  public:
OutOfLineRound32(CodeGenerator * gen,DoubleRegister result)197   OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
198       : OutOfLineCode(gen), result_(result) {}
199 
Generate()200   void Generate() final {
201     // Handle rounding to zero case where sign has to be preserved.
202     // High bits of float input already in kScratchReg.
203     __ srl(at, kScratchReg, 31);
204     __ sll(at, at, 31);
205     __ mtc1(at, result_);
206   }
207 
208  private:
209   DoubleRegister const result_;
210 };
211 
212 
213 class OutOfLineRecordWrite final : public OutOfLineCode {
214  public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register index,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)215   OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
216                        Register value, Register scratch0, Register scratch1,
217                        RecordWriteMode mode)
218       : OutOfLineCode(gen),
219         object_(object),
220         index_(index),
221         value_(value),
222         scratch0_(scratch0),
223         scratch1_(scratch1),
224         mode_(mode) {}
225 
Generate()226   void Generate() final {
227     if (mode_ > RecordWriteMode::kValueIsPointer) {
228       __ JumpIfSmi(value_, exit());
229     }
230     if (mode_ > RecordWriteMode::kValueIsMap) {
231       __ CheckPageFlag(value_, scratch0_,
232                        MemoryChunk::kPointersToHereAreInterestingMask, eq,
233                        exit());
234     }
235     SaveFPRegsMode const save_fp_mode =
236         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
237     // TODO(turbofan): Once we get frame elision working, we need to save
238     // and restore lr properly here if the frame was elided.
239     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
240                          EMIT_REMEMBERED_SET, save_fp_mode);
241     __ Daddu(scratch1_, object_, index_);
242     __ CallStub(&stub);
243   }
244 
245  private:
246   Register const object_;
247   Register const index_;
248   Register const value_;
249   Register const scratch0_;
250   Register const scratch1_;
251   RecordWriteMode const mode_;
252 };
253 
254 
FlagsConditionToConditionCmp(FlagsCondition condition)255 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
256   switch (condition) {
257     case kEqual:
258       return eq;
259     case kNotEqual:
260       return ne;
261     case kSignedLessThan:
262       return lt;
263     case kSignedGreaterThanOrEqual:
264       return ge;
265     case kSignedLessThanOrEqual:
266       return le;
267     case kSignedGreaterThan:
268       return gt;
269     case kUnsignedLessThan:
270       return lo;
271     case kUnsignedGreaterThanOrEqual:
272       return hs;
273     case kUnsignedLessThanOrEqual:
274       return ls;
275     case kUnsignedGreaterThan:
276       return hi;
277     case kUnorderedEqual:
278     case kUnorderedNotEqual:
279       break;
280     default:
281       break;
282   }
283   UNREACHABLE();
284   return kNoCondition;
285 }
286 
287 
FlagsConditionToConditionTst(FlagsCondition condition)288 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
289   switch (condition) {
290     case kNotEqual:
291       return ne;
292     case kEqual:
293       return eq;
294     default:
295       break;
296   }
297   UNREACHABLE();
298   return kNoCondition;
299 }
300 
301 
FlagsConditionToConditionOvf(FlagsCondition condition)302 Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
303   switch (condition) {
304     case kOverflow:
305       return ne;
306     case kNotOverflow:
307       return eq;
308     default:
309       break;
310   }
311   UNREACHABLE();
312   return kNoCondition;
313 }
314 
315 
FlagsConditionToConditionCmpFPU(bool & predicate,FlagsCondition condition)316 FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
317                                              FlagsCondition condition) {
318   switch (condition) {
319     case kEqual:
320       predicate = true;
321       return EQ;
322     case kNotEqual:
323       predicate = false;
324       return EQ;
325     case kUnsignedLessThan:
326       predicate = true;
327       return OLT;
328     case kUnsignedGreaterThanOrEqual:
329       predicate = false;
330       return ULT;
331     case kUnsignedLessThanOrEqual:
332       predicate = true;
333       return OLE;
334     case kUnsignedGreaterThan:
335       predicate = false;
336       return ULE;
337     case kUnorderedEqual:
338     case kUnorderedNotEqual:
339       predicate = true;
340       break;
341     default:
342       predicate = true;
343       break;
344   }
345   UNREACHABLE();
346   return kNoFPUCondition;
347 }
348 
349 }  // namespace
350 
351 
352 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr)                         \
353   do {                                                                        \
354     auto result = i.Output##width##Register();                                \
355     auto ool = new (zone()) OutOfLineLoad##width(this, result);               \
356     if (instr->InputAt(0)->IsRegister()) {                                    \
357       auto offset = i.InputRegister(0);                                       \
358       __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
359       __ Daddu(kScratchReg, i.InputRegister(2), offset);                      \
360       __ asm_instr(result, MemOperand(kScratchReg, 0));                       \
361     } else {                                                                  \
362       int offset = static_cast<int>(i.InputOperand(0).immediate());           \
363       __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
364       __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
365     }                                                                         \
366     __ bind(ool->exit());                                                     \
367   } while (0)
368 
369 
370 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                              \
371   do {                                                                        \
372     auto result = i.OutputRegister();                                         \
373     auto ool = new (zone()) OutOfLineLoadInteger(this, result);               \
374     if (instr->InputAt(0)->IsRegister()) {                                    \
375       auto offset = i.InputRegister(0);                                       \
376       __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
377       __ Daddu(kScratchReg, i.InputRegister(2), offset);                      \
378       __ asm_instr(result, MemOperand(kScratchReg, 0));                       \
379     } else {                                                                  \
380       int offset = static_cast<int>(i.InputOperand(0).immediate());           \
381       __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
382       __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
383     }                                                                         \
384     __ bind(ool->exit());                                                     \
385   } while (0)
386 
387 
388 #define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr)                 \
389   do {                                                                 \
390     Label done;                                                        \
391     if (instr->InputAt(0)->IsRegister()) {                             \
392       auto offset = i.InputRegister(0);                                \
393       auto value = i.Input##width##Register(2);                        \
394       __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
395       __ Daddu(kScratchReg, i.InputRegister(3), offset);               \
396       __ asm_instr(value, MemOperand(kScratchReg, 0));                 \
397     } else {                                                           \
398       int offset = static_cast<int>(i.InputOperand(0).immediate());    \
399       auto value = i.Input##width##Register(2);                        \
400       __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
401       __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
402     }                                                                  \
403     __ bind(&done);                                                    \
404   } while (0)
405 
406 
407 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                      \
408   do {                                                                 \
409     Label done;                                                        \
410     if (instr->InputAt(0)->IsRegister()) {                             \
411       auto offset = i.InputRegister(0);                                \
412       auto value = i.InputRegister(2);                                 \
413       __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
414       __ Daddu(kScratchReg, i.InputRegister(3), offset);               \
415       __ asm_instr(value, MemOperand(kScratchReg, 0));                 \
416     } else {                                                           \
417       int offset = static_cast<int>(i.InputOperand(0).immediate());    \
418       auto value = i.InputRegister(2);                                 \
419       __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
420       __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
421     }                                                                  \
422     __ bind(&done);                                                    \
423   } while (0)
424 
425 
426 #define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode)                                  \
427   if (kArchVariant == kMips64r6) {                                             \
428     __ cfc1(kScratchReg, FCSR);                                                \
429     __ li(at, Operand(mode_##mode));                                           \
430     __ ctc1(at, FCSR);                                                         \
431     __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));             \
432     __ ctc1(kScratchReg, FCSR);                                                \
433   } else {                                                                     \
434     auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister());    \
435     Label done;                                                                \
436     __ mfhc1(kScratchReg, i.InputDoubleRegister(0));                           \
437     __ Ext(at, kScratchReg, HeapNumber::kExponentShift,                        \
438            HeapNumber::kExponentBits);                                         \
439     __ Branch(USE_DELAY_SLOT, &done, hs, at,                                   \
440               Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
441     __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));              \
442     __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));         \
443     __ dmfc1(at, i.OutputDoubleRegister());                                    \
444     __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg));        \
445     __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());            \
446     __ bind(ool->exit());                                                      \
447     __ bind(&done);                                                            \
448   }
449 
450 #define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode)                                   \
451   if (kArchVariant == kMips64r6) {                                            \
452     __ cfc1(kScratchReg, FCSR);                                               \
453     __ li(at, Operand(mode_##mode));                                          \
454     __ ctc1(at, FCSR);                                                        \
455     __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));            \
456     __ ctc1(kScratchReg, FCSR);                                               \
457   } else {                                                                    \
458     int32_t kFloat32ExponentBias = 127;                                       \
459     int32_t kFloat32MantissaBits = 23;                                        \
460     int32_t kFloat32ExponentBits = 8;                                         \
461     auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
462     Label done;                                                               \
463     __ mfc1(kScratchReg, i.InputDoubleRegister(0));                           \
464     __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits);      \
465     __ Branch(USE_DELAY_SLOT, &done, hs, at,                                  \
466               Operand(kFloat32ExponentBias + kFloat32MantissaBits));          \
467     __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));             \
468     __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));        \
469     __ mfc1(at, i.OutputDoubleRegister());                                    \
470     __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg));       \
471     __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister());           \
472     __ bind(ool->exit());                                                     \
473     __ bind(&done);                                                           \
474   }
475 
AssembleDeconstructActivationRecord(int stack_param_delta)476 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
477   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
478   if (sp_slot_delta > 0) {
479     __ daddiu(sp, sp, sp_slot_delta * kPointerSize);
480   }
481   frame_access_state()->SetFrameAccessToDefault();
482 }
483 
484 
AssemblePrepareTailCall(int stack_param_delta)485 void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
486   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
487   if (sp_slot_delta < 0) {
488     __ Dsubu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
489     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
490   }
491   if (frame()->needs_frame()) {
492     __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
493     __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
494   }
495   frame_access_state()->SetFrameAccessToSP();
496 }
497 
498 
499 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)500 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
501   MipsOperandConverter i(this, instr);
502   InstructionCode opcode = instr->opcode();
503 
504   switch (ArchOpcodeField::decode(opcode)) {
505     case kArchCallCodeObject: {
506       EnsureSpaceForLazyDeopt();
507       if (instr->InputAt(0)->IsImmediate()) {
508         __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
509                 RelocInfo::CODE_TARGET);
510       } else {
511         __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
512         __ Call(at);
513       }
514       RecordCallPosition(instr);
515       frame_access_state()->ClearSPDelta();
516       break;
517     }
518     case kArchTailCallCodeObject: {
519       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
520       AssembleDeconstructActivationRecord(stack_param_delta);
521       if (instr->InputAt(0)->IsImmediate()) {
522         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
523                 RelocInfo::CODE_TARGET);
524       } else {
525         __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
526         __ Jump(at);
527       }
528       frame_access_state()->ClearSPDelta();
529       break;
530     }
531     case kArchCallJSFunction: {
532       EnsureSpaceForLazyDeopt();
533       Register func = i.InputRegister(0);
534       if (FLAG_debug_code) {
535         // Check the function's context matches the context argument.
536         __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
537         __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
538       }
539       __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
540       __ Call(at);
541       RecordCallPosition(instr);
542       frame_access_state()->ClearSPDelta();
543       break;
544     }
545     case kArchTailCallJSFunction: {
546       Register func = i.InputRegister(0);
547       if (FLAG_debug_code) {
548         // Check the function's context matches the context argument.
549         __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
550         __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
551       }
552       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
553       AssembleDeconstructActivationRecord(stack_param_delta);
554       __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
555       __ Jump(at);
556       frame_access_state()->ClearSPDelta();
557       break;
558     }
559     case kArchLazyBailout: {
560       EnsureSpaceForLazyDeopt();
561       RecordCallPosition(instr);
562       break;
563     }
564     case kArchPrepareCallCFunction: {
565       int const num_parameters = MiscField::decode(instr->opcode());
566       __ PrepareCallCFunction(num_parameters, kScratchReg);
567       // Frame alignment requires using FP-relative frame addressing.
568       frame_access_state()->SetFrameAccessToFP();
569       break;
570     }
571     case kArchPrepareTailCall:
572       AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
573       break;
574     case kArchCallCFunction: {
575       int const num_parameters = MiscField::decode(instr->opcode());
576       if (instr->InputAt(0)->IsImmediate()) {
577         ExternalReference ref = i.InputExternalReference(0);
578         __ CallCFunction(ref, num_parameters);
579       } else {
580         Register func = i.InputRegister(0);
581         __ CallCFunction(func, num_parameters);
582       }
583       frame_access_state()->SetFrameAccessToDefault();
584       frame_access_state()->ClearSPDelta();
585       break;
586     }
587     case kArchJmp:
588       AssembleArchJump(i.InputRpo(0));
589       break;
590     case kArchLookupSwitch:
591       AssembleArchLookupSwitch(instr);
592       break;
593     case kArchTableSwitch:
594       AssembleArchTableSwitch(instr);
595       break;
596     case kArchNop:
597     case kArchThrowTerminator:
598       // don't emit code for nops.
599       break;
600     case kArchDeoptimize: {
601       int deopt_state_id =
602           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
603       Deoptimizer::BailoutType bailout_type =
604           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
605       AssembleDeoptimizerCall(deopt_state_id, bailout_type);
606       break;
607     }
608     case kArchRet:
609       AssembleReturn();
610       break;
611     case kArchStackPointer:
612       __ mov(i.OutputRegister(), sp);
613       break;
614     case kArchFramePointer:
615       __ mov(i.OutputRegister(), fp);
616       break;
617     case kArchTruncateDoubleToI:
618       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
619       break;
620     case kArchStoreWithWriteBarrier: {
621       RecordWriteMode mode =
622           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
623       Register object = i.InputRegister(0);
624       Register index = i.InputRegister(1);
625       Register value = i.InputRegister(2);
626       Register scratch0 = i.TempRegister(0);
627       Register scratch1 = i.TempRegister(1);
628       auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
629                                                    scratch0, scratch1, mode);
630       __ Daddu(at, object, index);
631       __ sd(value, MemOperand(at));
632       __ CheckPageFlag(object, scratch0,
633                        MemoryChunk::kPointersFromHereAreInterestingMask, ne,
634                        ool->entry());
635       __ bind(ool->exit());
636       break;
637     }
638     case kMips64Add:
639       __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
640       break;
641     case kMips64Dadd:
642       __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
643       break;
644     case kMips64DaddOvf:
645       // Pseudo-instruction used for overflow/branch. No opcode emitted here.
646       break;
647     case kMips64Sub:
648       __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
649       break;
650     case kMips64Dsub:
651       __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
652       break;
653     case kMips64DsubOvf:
654       // Pseudo-instruction used for overflow/branch. No opcode emitted here.
655       break;
656     case kMips64Mul:
657       __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
658       break;
659     case kMips64MulHigh:
660       __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
661       break;
662     case kMips64MulHighU:
663       __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
664       break;
665     case kMips64DMulHigh:
666       __ Dmulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
667       break;
668     case kMips64Div:
669       __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
670       if (kArchVariant == kMips64r6) {
671         __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
672       } else {
673         __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
674       }
675       break;
676     case kMips64DivU:
677       __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
678       if (kArchVariant == kMips64r6) {
679         __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
680       } else {
681         __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
682       }
683       break;
684     case kMips64Mod:
685       __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
686       break;
687     case kMips64ModU:
688       __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
689       break;
690     case kMips64Dmul:
691       __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
692       break;
693     case kMips64Ddiv:
694       __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
695       if (kArchVariant == kMips64r6) {
696         __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
697       } else {
698         __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
699       }
700       break;
701     case kMips64DdivU:
702       __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
703       if (kArchVariant == kMips64r6) {
704         __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
705       } else {
706         __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
707       }
708       break;
709     case kMips64Dmod:
710       __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
711       break;
712     case kMips64DmodU:
713       __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
714       break;
715     case kMips64And:
716       __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
717       break;
718     case kMips64Or:
719       __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
720       break;
721     case kMips64Nor:
722       if (instr->InputAt(1)->IsRegister()) {
723         __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
724       } else {
725         DCHECK(i.InputOperand(1).immediate() == 0);
726         __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
727       }
728       break;
729     case kMips64Xor:
730       __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
731       break;
732     case kMips64Clz:
733       __ Clz(i.OutputRegister(), i.InputRegister(0));
734       break;
735     case kMips64Dclz:
736       __ dclz(i.OutputRegister(), i.InputRegister(0));
737       break;
738     case kMips64Shl:
739       if (instr->InputAt(1)->IsRegister()) {
740         __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
741       } else {
742         int64_t imm = i.InputOperand(1).immediate();
743         __ sll(i.OutputRegister(), i.InputRegister(0),
744                static_cast<uint16_t>(imm));
745       }
746       break;
747     case kMips64Shr:
748       if (instr->InputAt(1)->IsRegister()) {
749         __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
750       } else {
751         int64_t imm = i.InputOperand(1).immediate();
752         __ srl(i.OutputRegister(), i.InputRegister(0),
753                static_cast<uint16_t>(imm));
754       }
755       break;
756     case kMips64Sar:
757       if (instr->InputAt(1)->IsRegister()) {
758         __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
759       } else {
760         int64_t imm = i.InputOperand(1).immediate();
761         __ sra(i.OutputRegister(), i.InputRegister(0),
762                static_cast<uint16_t>(imm));
763       }
764       break;
765     case kMips64Ext:
766       __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
767              i.InputInt8(2));
768       break;
769     case kMips64Ins:
770       if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
771         __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
772       } else {
773         __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
774                i.InputInt8(2));
775       }
776       break;
777     case kMips64Dext: {
778       int16_t pos = i.InputInt8(1);
779       int16_t size = i.InputInt8(2);
780       if (size > 0 && size <= 32 && pos >= 0 && pos < 32) {
781         __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
782                 i.InputInt8(2));
783       } else if (size > 32 && size <= 64 && pos > 0 && pos < 32) {
784         __ Dextm(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
785                  i.InputInt8(2));
786       } else {
787         DCHECK(size > 0 && size <= 32 && pos >= 32 && pos < 64);
788         __ Dextu(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
789                  i.InputInt8(2));
790       }
791       break;
792     }
793     case kMips64Dins:
794       if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
795         __ Dins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
796       } else {
797         __ Dins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
798                 i.InputInt8(2));
799       }
800       break;
801     case kMips64Dshl:
802       if (instr->InputAt(1)->IsRegister()) {
803         __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
804       } else {
805         int64_t imm = i.InputOperand(1).immediate();
806         if (imm < 32) {
807           __ dsll(i.OutputRegister(), i.InputRegister(0),
808                   static_cast<uint16_t>(imm));
809         } else {
810           __ dsll32(i.OutputRegister(), i.InputRegister(0),
811                     static_cast<uint16_t>(imm - 32));
812         }
813       }
814       break;
815     case kMips64Dshr:
816       if (instr->InputAt(1)->IsRegister()) {
817         __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
818       } else {
819         int64_t imm = i.InputOperand(1).immediate();
820         if (imm < 32) {
821           __ dsrl(i.OutputRegister(), i.InputRegister(0),
822                   static_cast<uint16_t>(imm));
823         } else {
824           __ dsrl32(i.OutputRegister(), i.InputRegister(0),
825                     static_cast<uint16_t>(imm - 32));
826         }
827       }
828       break;
829     case kMips64Dsar:
830       if (instr->InputAt(1)->IsRegister()) {
831         __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
832       } else {
833         int64_t imm = i.InputOperand(1).immediate();
834         if (imm < 32) {
835           __ dsra(i.OutputRegister(), i.InputRegister(0), imm);
836         } else {
837           __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
838         }
839       }
840       break;
841     case kMips64Ror:
842       __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
843       break;
844     case kMips64Dror:
845       __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
846       break;
847     case kMips64Tst:
848       // Pseudo-instruction used for cmp/branch. No opcode emitted here.
849       break;
850     case kMips64Cmp:
851       // Pseudo-instruction used for cmp/branch. No opcode emitted here.
852       break;
853     case kMips64Mov:
854       // TODO(plind): Should we combine mov/li like this, or use separate instr?
855       //    - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
856       if (HasRegisterInput(instr, 0)) {
857         __ mov(i.OutputRegister(), i.InputRegister(0));
858       } else {
859         __ li(i.OutputRegister(), i.InputOperand(0));
860       }
861       break;
862 
863     case kMips64CmpS:
864       // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
865       break;
866     case kMips64AddS:
867       // TODO(plind): add special case: combine mult & add.
868       __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
869                i.InputDoubleRegister(1));
870       break;
871     case kMips64SubS:
872       __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
873                i.InputDoubleRegister(1));
874       break;
875     case kMips64MulS:
876       // TODO(plind): add special case: right op is -1.0, see arm port.
877       __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
878                i.InputDoubleRegister(1));
879       break;
880     case kMips64DivS:
881       __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
882                i.InputDoubleRegister(1));
883       break;
884     case kMips64ModS: {
885       // TODO(bmeurer): We should really get rid of this special instruction,
886       // and generate a CallAddress instruction instead.
887       FrameScope scope(masm(), StackFrame::MANUAL);
888       __ PrepareCallCFunction(0, 2, kScratchReg);
889       __ MovToFloatParameters(i.InputDoubleRegister(0),
890                               i.InputDoubleRegister(1));
891       // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
892       __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
893                        0, 2);
894       // Move the result in the double result register.
895       __ MovFromFloatResult(i.OutputSingleRegister());
896       break;
897     }
898     case kMips64AbsS:
899       __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
900       break;
901     case kMips64SqrtS: {
902       __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
903       break;
904     }
905     case kMips64MaxS:
906       __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
907                i.InputDoubleRegister(1));
908       break;
909     case kMips64MinS:
910       __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
911                i.InputDoubleRegister(1));
912       break;
913     case kMips64CmpD:
914       // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
915       break;
916     case kMips64AddD:
917       // TODO(plind): add special case: combine mult & add.
918       __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
919                i.InputDoubleRegister(1));
920       break;
921     case kMips64SubD:
922       __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
923                i.InputDoubleRegister(1));
924       break;
925     case kMips64MulD:
926       // TODO(plind): add special case: right op is -1.0, see arm port.
927       __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
928                i.InputDoubleRegister(1));
929       break;
930     case kMips64DivD:
931       __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
932                i.InputDoubleRegister(1));
933       break;
934     case kMips64ModD: {
935       // TODO(bmeurer): We should really get rid of this special instruction,
936       // and generate a CallAddress instruction instead.
937       FrameScope scope(masm(), StackFrame::MANUAL);
938       __ PrepareCallCFunction(0, 2, kScratchReg);
939       __ MovToFloatParameters(i.InputDoubleRegister(0),
940                               i.InputDoubleRegister(1));
941       __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
942                        0, 2);
943       // Move the result in the double result register.
944       __ MovFromFloatResult(i.OutputDoubleRegister());
945       break;
946     }
947     case kMips64AbsD:
948       __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
949       break;
950     case kMips64SqrtD: {
951       __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
952       break;
953     }
954     case kMips64MaxD:
955       __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
956                i.InputDoubleRegister(1));
957       break;
958     case kMips64MinD:
959       __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
960                i.InputDoubleRegister(1));
961       break;
962     case kMips64Float64RoundDown: {
963       ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
964       break;
965     }
966     case kMips64Float32RoundDown: {
967       ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
968       break;
969     }
970     case kMips64Float64RoundTruncate: {
971       ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
972       break;
973     }
974     case kMips64Float32RoundTruncate: {
975       ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
976       break;
977     }
978     case kMips64Float64RoundUp: {
979       ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
980       break;
981     }
982     case kMips64Float32RoundUp: {
983       ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
984       break;
985     }
986     case kMips64Float64RoundTiesEven: {
987       ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
988       break;
989     }
990     case kMips64Float32RoundTiesEven: {
991       ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
992       break;
993     }
994     case kMips64Float64Max: {
995       // (b < a) ? a : b
996       if (kArchVariant == kMips64r6) {
997         __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
998                  i.InputDoubleRegister(0));
999         __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
1000                  i.InputDoubleRegister(0));
1001       } else {
1002         __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1003         // Left operand is result, passthrough if false.
1004         __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1005       }
1006       break;
1007     }
1008     case kMips64Float64Min: {
1009       // (a < b) ? a : b
1010       if (kArchVariant == kMips64r6) {
1011         __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1012                  i.InputDoubleRegister(1));
1013         __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
1014                  i.InputDoubleRegister(0));
1015       } else {
1016         __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
1017         // Right operand is result, passthrough if false.
1018         __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1019       }
1020       break;
1021     }
1022     case kMips64Float32Max: {
1023       // (b < a) ? a : b
1024       if (kArchVariant == kMips64r6) {
1025         __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
1026                  i.InputDoubleRegister(0));
1027         __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
1028                  i.InputDoubleRegister(0));
1029       } else {
1030         __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1031         // Left operand is result, passthrough if false.
1032         __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1033       }
1034       break;
1035     }
1036     case kMips64Float32Min: {
1037       // (a < b) ? a : b
1038       if (kArchVariant == kMips64r6) {
1039         __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1040                  i.InputDoubleRegister(1));
1041         __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
1042                  i.InputDoubleRegister(0));
1043       } else {
1044         __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
1045         // Right operand is result, passthrough if false.
1046         __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1047       }
1048       break;
1049     }
1050     case kMips64CvtSD:
1051       __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
1052       break;
1053     case kMips64CvtDS:
1054       __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
1055       break;
1056     case kMips64CvtDW: {
1057       FPURegister scratch = kScratchDoubleReg;
1058       __ mtc1(i.InputRegister(0), scratch);
1059       __ cvt_d_w(i.OutputDoubleRegister(), scratch);
1060       break;
1061     }
1062     case kMips64CvtSW: {
1063       FPURegister scratch = kScratchDoubleReg;
1064       __ mtc1(i.InputRegister(0), scratch);
1065       __ cvt_s_w(i.OutputDoubleRegister(), scratch);
1066       break;
1067     }
1068     case kMips64CvtSL: {
1069       FPURegister scratch = kScratchDoubleReg;
1070       __ dmtc1(i.InputRegister(0), scratch);
1071       __ cvt_s_l(i.OutputDoubleRegister(), scratch);
1072       break;
1073     }
1074     case kMips64CvtDL: {
1075       FPURegister scratch = kScratchDoubleReg;
1076       __ dmtc1(i.InputRegister(0), scratch);
1077       __ cvt_d_l(i.OutputDoubleRegister(), scratch);
1078       break;
1079     }
1080     case kMips64CvtDUw: {
1081       __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
1082       break;
1083     }
1084     case kMips64CvtDUl: {
1085       __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
1086       break;
1087     }
1088     case kMips64CvtSUl: {
1089       __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
1090       break;
1091     }
1092     case kMips64FloorWD: {
1093       FPURegister scratch = kScratchDoubleReg;
1094       __ floor_w_d(scratch, i.InputDoubleRegister(0));
1095       __ mfc1(i.OutputRegister(), scratch);
1096       break;
1097     }
1098     case kMips64CeilWD: {
1099       FPURegister scratch = kScratchDoubleReg;
1100       __ ceil_w_d(scratch, i.InputDoubleRegister(0));
1101       __ mfc1(i.OutputRegister(), scratch);
1102       break;
1103     }
1104     case kMips64RoundWD: {
1105       FPURegister scratch = kScratchDoubleReg;
1106       __ round_w_d(scratch, i.InputDoubleRegister(0));
1107       __ mfc1(i.OutputRegister(), scratch);
1108       break;
1109     }
1110     case kMips64TruncWD: {
1111       FPURegister scratch = kScratchDoubleReg;
1112       // Other arches use round to zero here, so we follow.
1113       __ trunc_w_d(scratch, i.InputDoubleRegister(0));
1114       __ mfc1(i.OutputRegister(), scratch);
1115       break;
1116     }
1117     case kMips64FloorWS: {
1118       FPURegister scratch = kScratchDoubleReg;
1119       __ floor_w_s(scratch, i.InputDoubleRegister(0));
1120       __ mfc1(i.OutputRegister(), scratch);
1121       break;
1122     }
1123     case kMips64CeilWS: {
1124       FPURegister scratch = kScratchDoubleReg;
1125       __ ceil_w_s(scratch, i.InputDoubleRegister(0));
1126       __ mfc1(i.OutputRegister(), scratch);
1127       break;
1128     }
1129     case kMips64RoundWS: {
1130       FPURegister scratch = kScratchDoubleReg;
1131       __ round_w_s(scratch, i.InputDoubleRegister(0));
1132       __ mfc1(i.OutputRegister(), scratch);
1133       break;
1134     }
1135     case kMips64TruncWS: {
1136       FPURegister scratch = kScratchDoubleReg;
1137       __ trunc_w_s(scratch, i.InputDoubleRegister(0));
1138       __ mfc1(i.OutputRegister(), scratch);
1139       break;
1140     }
1141     case kMips64TruncLS: {
1142       FPURegister scratch = kScratchDoubleReg;
1143       Register tmp_fcsr = kScratchReg;
1144       Register result = kScratchReg2;
1145 
1146       bool load_status = instr->OutputCount() > 1;
1147       if (load_status) {
1148         // Save FCSR.
1149         __ cfc1(tmp_fcsr, FCSR);
1150         // Clear FPU flags.
1151         __ ctc1(zero_reg, FCSR);
1152       }
1153       // Other arches use round to zero here, so we follow.
1154       __ trunc_l_s(scratch, i.InputDoubleRegister(0));
1155       __ dmfc1(i.OutputRegister(), scratch);
1156       if (load_status) {
1157         __ cfc1(result, FCSR);
1158         // Check for overflow and NaNs.
1159         __ andi(result, result,
1160                 (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
1161         __ Slt(result, zero_reg, result);
1162         __ xori(result, result, 1);
1163         __ mov(i.OutputRegister(1), result);
1164         // Restore FCSR
1165         __ ctc1(tmp_fcsr, FCSR);
1166       }
1167       break;
1168     }
1169     case kMips64TruncLD: {
1170       FPURegister scratch = kScratchDoubleReg;
1171       Register tmp_fcsr = kScratchReg;
1172       Register result = kScratchReg2;
1173 
1174       bool load_status = instr->OutputCount() > 1;
1175       if (load_status) {
1176         // Save FCSR.
1177         __ cfc1(tmp_fcsr, FCSR);
1178         // Clear FPU flags.
1179         __ ctc1(zero_reg, FCSR);
1180       }
1181       // Other arches use round to zero here, so we follow.
1182       __ trunc_l_d(scratch, i.InputDoubleRegister(0));
1183       __ dmfc1(i.OutputRegister(0), scratch);
1184       if (load_status) {
1185         __ cfc1(result, FCSR);
1186         // Check for overflow and NaNs.
1187         __ andi(result, result,
1188                 (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
1189         __ Slt(result, zero_reg, result);
1190         __ xori(result, result, 1);
1191         __ mov(i.OutputRegister(1), result);
1192         // Restore FCSR
1193         __ ctc1(tmp_fcsr, FCSR);
1194       }
1195       break;
1196     }
1197     case kMips64TruncUwD: {
1198       FPURegister scratch = kScratchDoubleReg;
1199       // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
1200       __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
1201       break;
1202     }
1203     case kMips64TruncUlS: {
1204       FPURegister scratch = kScratchDoubleReg;
1205       Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
1206       // TODO(plind): Fix wrong param order of Trunc_ul_s() macro-asm function.
1207       __ Trunc_ul_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch,
1208                     result);
1209       break;
1210     }
1211     case kMips64TruncUlD: {
1212       FPURegister scratch = kScratchDoubleReg;
1213       Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
1214       // TODO(plind): Fix wrong param order of Trunc_ul_d() macro-asm function.
1215       __ Trunc_ul_d(i.InputDoubleRegister(0), i.OutputRegister(0), scratch,
1216                     result);
1217       break;
1218     }
1219     case kMips64BitcastDL:
1220       __ dmfc1(i.OutputRegister(), i.InputDoubleRegister(0));
1221       break;
1222     case kMips64BitcastLD:
1223       __ dmtc1(i.InputRegister(0), i.OutputDoubleRegister());
1224       break;
1225     case kMips64Float64ExtractLowWord32:
1226       __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
1227       break;
1228     case kMips64Float64ExtractHighWord32:
1229       __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
1230       break;
1231     case kMips64Float64InsertLowWord32:
1232       __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
1233       break;
1234     case kMips64Float64InsertHighWord32:
1235       __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
1236       break;
1237     // ... more basic instructions ...
1238 
1239     case kMips64Lbu:
1240       __ lbu(i.OutputRegister(), i.MemoryOperand());
1241       break;
1242     case kMips64Lb:
1243       __ lb(i.OutputRegister(), i.MemoryOperand());
1244       break;
1245     case kMips64Sb:
1246       __ sb(i.InputRegister(2), i.MemoryOperand());
1247       break;
1248     case kMips64Lhu:
1249       __ lhu(i.OutputRegister(), i.MemoryOperand());
1250       break;
1251     case kMips64Lh:
1252       __ lh(i.OutputRegister(), i.MemoryOperand());
1253       break;
1254     case kMips64Sh:
1255       __ sh(i.InputRegister(2), i.MemoryOperand());
1256       break;
1257     case kMips64Lw:
1258       __ lw(i.OutputRegister(), i.MemoryOperand());
1259       break;
1260     case kMips64Ld:
1261       __ ld(i.OutputRegister(), i.MemoryOperand());
1262       break;
1263     case kMips64Sw:
1264       __ sw(i.InputRegister(2), i.MemoryOperand());
1265       break;
1266     case kMips64Sd:
1267       __ sd(i.InputRegister(2), i.MemoryOperand());
1268       break;
1269     case kMips64Lwc1: {
1270       __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
1271       break;
1272     }
1273     case kMips64Swc1: {
1274       size_t index = 0;
1275       MemOperand operand = i.MemoryOperand(&index);
1276       __ swc1(i.InputSingleRegister(index), operand);
1277       break;
1278     }
1279     case kMips64Ldc1:
1280       __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
1281       break;
1282     case kMips64Sdc1:
1283       __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
1284       break;
1285     case kMips64Push:
1286       if (instr->InputAt(0)->IsDoubleRegister()) {
1287         __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1288         __ Subu(sp, sp, Operand(kDoubleSize));
1289         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1290       } else {
1291         __ Push(i.InputRegister(0));
1292         frame_access_state()->IncreaseSPDelta(1);
1293       }
1294       break;
1295     case kMips64StackClaim: {
1296       __ Dsubu(sp, sp, Operand(i.InputInt32(0)));
1297       frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
1298       break;
1299     }
1300     case kMips64StoreToStackSlot: {
1301       if (instr->InputAt(0)->IsDoubleRegister()) {
1302         __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
1303       } else {
1304         __ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
1305       }
1306       break;
1307     }
1308     case kCheckedLoadInt8:
1309       ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
1310       break;
1311     case kCheckedLoadUint8:
1312       ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
1313       break;
1314     case kCheckedLoadInt16:
1315       ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
1316       break;
1317     case kCheckedLoadUint16:
1318       ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
1319       break;
1320     case kCheckedLoadWord32:
1321       ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
1322       break;
1323     case kCheckedLoadWord64:
1324       ASSEMBLE_CHECKED_LOAD_INTEGER(ld);
1325       break;
1326     case kCheckedLoadFloat32:
1327       ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
1328       break;
1329     case kCheckedLoadFloat64:
1330       ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
1331       break;
1332     case kCheckedStoreWord8:
1333       ASSEMBLE_CHECKED_STORE_INTEGER(sb);
1334       break;
1335     case kCheckedStoreWord16:
1336       ASSEMBLE_CHECKED_STORE_INTEGER(sh);
1337       break;
1338     case kCheckedStoreWord32:
1339       ASSEMBLE_CHECKED_STORE_INTEGER(sw);
1340       break;
1341     case kCheckedStoreWord64:
1342       ASSEMBLE_CHECKED_STORE_INTEGER(sd);
1343       break;
1344     case kCheckedStoreFloat32:
1345       ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
1346       break;
1347     case kCheckedStoreFloat64:
1348       ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
1349       break;
1350   }
1351 }  // NOLINT(readability/fn_size)
1352 
1353 
1354 #define UNSUPPORTED_COND(opcode, condition)                                  \
1355   OFStream out(stdout);                                                      \
1356   out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
1357   UNIMPLEMENTED();
1358 
convertCondition(FlagsCondition condition,Condition & cc)1359 static bool convertCondition(FlagsCondition condition, Condition& cc) {
1360   switch (condition) {
1361     case kEqual:
1362       cc = eq;
1363       return true;
1364     case kNotEqual:
1365       cc = ne;
1366       return true;
1367     case kUnsignedLessThan:
1368       cc = lt;
1369       return true;
1370     case kUnsignedGreaterThanOrEqual:
1371       cc = uge;
1372       return true;
1373     case kUnsignedLessThanOrEqual:
1374       cc = le;
1375       return true;
1376     case kUnsignedGreaterThan:
1377       cc = ugt;
1378       return true;
1379     default:
1380       break;
1381   }
1382   return false;
1383 }
1384 
1385 
1386 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)1387 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1388   MipsOperandConverter i(this, instr);
1389   Label* tlabel = branch->true_label;
1390   Label* flabel = branch->false_label;
1391   Condition cc = kNoCondition;
1392   // MIPS does not have condition code flags, so compare and branch are
1393   // implemented differently than on the other arch's. The compare operations
1394   // emit mips psuedo-instructions, which are handled here by branch
1395   // instructions that do the actual comparison. Essential that the input
1396   // registers to compare pseudo-op are not modified before this branch op, as
1397   // they are tested here.
1398 
1399   if (instr->arch_opcode() == kMips64Tst) {
1400     cc = FlagsConditionToConditionTst(branch->condition);
1401     __ And(at, i.InputRegister(0), i.InputOperand(1));
1402     __ Branch(tlabel, cc, at, Operand(zero_reg));
1403   } else if (instr->arch_opcode() == kMips64Dadd ||
1404              instr->arch_opcode() == kMips64Dsub) {
1405     cc = FlagsConditionToConditionOvf(branch->condition);
1406     __ dsra32(kScratchReg, i.OutputRegister(), 0);
1407     __ sra(at, i.OutputRegister(), 31);
1408     __ Branch(tlabel, cc, at, Operand(kScratchReg));
1409   } else if (instr->arch_opcode() == kMips64DaddOvf) {
1410     switch (branch->condition) {
1411       case kOverflow:
1412         __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
1413                          i.InputOperand(1), tlabel, flabel);
1414         break;
1415       case kNotOverflow:
1416         __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
1417                          i.InputOperand(1), flabel, tlabel);
1418         break;
1419       default:
1420         UNSUPPORTED_COND(kMips64DaddOvf, branch->condition);
1421         break;
1422     }
1423   } else if (instr->arch_opcode() == kMips64DsubOvf) {
1424     switch (branch->condition) {
1425       case kOverflow:
1426         __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
1427                          i.InputOperand(1), tlabel, flabel);
1428         break;
1429       case kNotOverflow:
1430         __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
1431                          i.InputOperand(1), flabel, tlabel);
1432         break;
1433       default:
1434         UNSUPPORTED_COND(kMips64DsubOvf, branch->condition);
1435         break;
1436     }
1437   } else if (instr->arch_opcode() == kMips64Cmp) {
1438     cc = FlagsConditionToConditionCmp(branch->condition);
1439     __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
1440   } else if (instr->arch_opcode() == kMips64CmpS) {
1441     if (!convertCondition(branch->condition, cc)) {
1442       UNSUPPORTED_COND(kMips64CmpS, branch->condition);
1443     }
1444     FPURegister left = i.InputOrZeroSingleRegister(0);
1445     FPURegister right = i.InputOrZeroSingleRegister(1);
1446     if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
1447         !__ IsDoubleZeroRegSet()) {
1448       __ Move(kDoubleRegZero, 0.0);
1449     }
1450     __ BranchF32(tlabel, nullptr, cc, left, right);
1451   } else if (instr->arch_opcode() == kMips64CmpD) {
1452     if (!convertCondition(branch->condition, cc)) {
1453       UNSUPPORTED_COND(kMips64CmpD, branch->condition);
1454     }
1455     FPURegister left = i.InputOrZeroDoubleRegister(0);
1456     FPURegister right = i.InputOrZeroDoubleRegister(1);
1457     if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
1458         !__ IsDoubleZeroRegSet()) {
1459       __ Move(kDoubleRegZero, 0.0);
1460     }
1461     __ BranchF64(tlabel, nullptr, cc, left, right);
1462   } else {
1463     PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
1464            instr->arch_opcode());
1465     UNIMPLEMENTED();
1466   }
1467   if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
1468 }
1469 
1470 
AssembleArchJump(RpoNumber target)1471 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1472   if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
1473 }
1474 
1475 
1476 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)1477 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1478                                         FlagsCondition condition) {
1479   MipsOperandConverter i(this, instr);
1480   Label done;
1481 
1482   // Materialize a full 32-bit 1 or 0 value. The result register is always the
1483   // last output of the instruction.
1484   Label false_value;
1485   DCHECK_NE(0u, instr->OutputCount());
1486   Register result = i.OutputRegister(instr->OutputCount() - 1);
1487   Condition cc = kNoCondition;
1488   // MIPS does not have condition code flags, so compare and branch are
1489   // implemented differently than on the other arch's. The compare operations
1490   // emit mips pseudo-instructions, which are checked and handled here.
1491 
1492   if (instr->arch_opcode() == kMips64Tst) {
1493     cc = FlagsConditionToConditionTst(condition);
1494     __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
1495     __ Sltu(result, zero_reg, kScratchReg);
1496     if (cc == eq) {
1497       // Sltu produces 0 for equality, invert the result.
1498       __ xori(result, result, 1);
1499     }
1500     return;
1501   } else if (instr->arch_opcode() == kMips64Dadd ||
1502              instr->arch_opcode() == kMips64Dsub) {
1503     cc = FlagsConditionToConditionOvf(condition);
1504     // Check for overflow creates 1 or 0 for result.
1505     __ dsrl32(kScratchReg, i.OutputRegister(), 31);
1506     __ srl(at, i.OutputRegister(), 31);
1507     __ xor_(result, kScratchReg, at);
1508     if (cc == eq)  // Toggle result for not overflow.
1509       __ xori(result, result, 1);
1510     return;
1511   } else if (instr->arch_opcode() == kMips64DaddOvf ||
1512              instr->arch_opcode() == kMips64DsubOvf) {
1513     Label flabel, tlabel;
1514     switch (instr->arch_opcode()) {
1515       case kMips64DaddOvf:
1516         __ DaddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
1517                            i.InputOperand(1), &flabel);
1518 
1519         break;
1520       case kMips64DsubOvf:
1521         __ DsubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
1522                            i.InputOperand(1), &flabel);
1523         break;
1524       default:
1525         UNREACHABLE();
1526         break;
1527     }
1528     __ li(result, 1);
1529     __ Branch(&tlabel);
1530     __ bind(&flabel);
1531     __ li(result, 0);
1532     __ bind(&tlabel);
1533   } else if (instr->arch_opcode() == kMips64Cmp) {
1534     cc = FlagsConditionToConditionCmp(condition);
1535     switch (cc) {
1536       case eq:
1537       case ne: {
1538         Register left = i.InputRegister(0);
1539         Operand right = i.InputOperand(1);
1540         Register select;
1541         if (instr->InputAt(1)->IsImmediate() && right.immediate() == 0) {
1542           // Pass left operand if right is zero.
1543           select = left;
1544         } else {
1545           __ Dsubu(kScratchReg, left, right);
1546           select = kScratchReg;
1547         }
1548         __ Sltu(result, zero_reg, select);
1549         if (cc == eq) {
1550           // Sltu produces 0 for equality, invert the result.
1551           __ xori(result, result, 1);
1552         }
1553       } break;
1554       case lt:
1555       case ge: {
1556         Register left = i.InputRegister(0);
1557         Operand right = i.InputOperand(1);
1558         __ Slt(result, left, right);
1559         if (cc == ge) {
1560           __ xori(result, result, 1);
1561         }
1562       } break;
1563       case gt:
1564       case le: {
1565         Register left = i.InputRegister(1);
1566         Operand right = i.InputOperand(0);
1567         __ Slt(result, left, right);
1568         if (cc == le) {
1569           __ xori(result, result, 1);
1570         }
1571       } break;
1572       case lo:
1573       case hs: {
1574         Register left = i.InputRegister(0);
1575         Operand right = i.InputOperand(1);
1576         __ Sltu(result, left, right);
1577         if (cc == hs) {
1578           __ xori(result, result, 1);
1579         }
1580       } break;
1581       case hi:
1582       case ls: {
1583         Register left = i.InputRegister(1);
1584         Operand right = i.InputOperand(0);
1585         __ Sltu(result, left, right);
1586         if (cc == ls) {
1587           __ xori(result, result, 1);
1588         }
1589       } break;
1590       default:
1591         UNREACHABLE();
1592     }
1593     return;
1594   } else if (instr->arch_opcode() == kMips64CmpD ||
1595              instr->arch_opcode() == kMips64CmpS) {
1596     FPURegister left = i.InputOrZeroDoubleRegister(0);
1597     FPURegister right = i.InputOrZeroDoubleRegister(1);
1598     if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
1599         !__ IsDoubleZeroRegSet()) {
1600       __ Move(kDoubleRegZero, 0.0);
1601     }
1602     bool predicate;
1603     FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
1604     if (kArchVariant != kMips64r6) {
1605       __ li(result, Operand(1));
1606       if (instr->arch_opcode() == kMips64CmpD) {
1607         __ c(cc, D, left, right);
1608       } else {
1609         DCHECK(instr->arch_opcode() == kMips64CmpS);
1610         __ c(cc, S, left, right);
1611       }
1612       if (predicate) {
1613         __ Movf(result, zero_reg);
1614       } else {
1615         __ Movt(result, zero_reg);
1616       }
1617     } else {
1618       if (instr->arch_opcode() == kMips64CmpD) {
1619         __ cmp(cc, L, kDoubleCompareReg, left, right);
1620       } else {
1621         DCHECK(instr->arch_opcode() == kMips64CmpS);
1622         __ cmp(cc, W, kDoubleCompareReg, left, right);
1623       }
1624       __ dmfc1(result, kDoubleCompareReg);
1625       __ andi(result, result, 1);  // Cmp returns all 1's/0's, use only LSB.
1626 
1627       if (!predicate)  // Toggle result for not equal.
1628         __ xori(result, result, 1);
1629     }
1630     return;
1631   } else {
1632     PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
1633            instr->arch_opcode());
1634     TRACE_UNIMPL();
1635     UNIMPLEMENTED();
1636   }
1637 }
1638 
1639 
AssembleArchLookupSwitch(Instruction * instr)1640 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1641   MipsOperandConverter i(this, instr);
1642   Register input = i.InputRegister(0);
1643   for (size_t index = 2; index < instr->InputCount(); index += 2) {
1644     __ li(at, Operand(i.InputInt32(index + 0)));
1645     __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
1646   }
1647   __ nop();  // Branch delay slot of the last beq.
1648   AssembleArchJump(i.InputRpo(1));
1649 }
1650 
1651 
AssembleArchTableSwitch(Instruction * instr)1652 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1653   MipsOperandConverter i(this, instr);
1654   Register input = i.InputRegister(0);
1655   size_t const case_count = instr->InputCount() - 2;
1656   Label here;
1657 
1658   __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
1659   __ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7);
1660   // Ensure that dd-ed labels use 8 byte aligned addresses.
1661   __ Align(8);
1662   __ bal(&here);
1663   __ dsll(at, input, 3);  // Branch delay slot.
1664   __ bind(&here);
1665   __ daddu(at, at, ra);
1666   __ ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
1667   __ jr(at);
1668   __ nop();  // Branch delay slot nop.
1669   for (size_t index = 0; index < case_count; ++index) {
1670     __ dd(GetLabel(i.InputRpo(index + 2)));
1671   }
1672 }
1673 
1674 
AssembleDeoptimizerCall(int deoptimization_id,Deoptimizer::BailoutType bailout_type)1675 void CodeGenerator::AssembleDeoptimizerCall(
1676     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1677   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1678       isolate(), deoptimization_id, bailout_type);
1679   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1680 }
1681 
1682 
AssemblePrologue()1683 void CodeGenerator::AssemblePrologue() {
1684   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1685   if (descriptor->IsCFunctionCall()) {
1686     __ Push(ra, fp);
1687     __ mov(fp, sp);
1688   } else if (descriptor->IsJSFunctionCall()) {
1689     __ Prologue(this->info()->GeneratePreagedPrologue());
1690   } else if (frame()->needs_frame()) {
1691     __ StubPrologue();
1692   } else {
1693     frame()->SetElidedFrameSizeInSlots(0);
1694   }
1695   frame_access_state()->SetFrameAccessToDefault();
1696 
1697   int stack_shrink_slots = frame()->GetSpillSlotCount();
1698   if (info()->is_osr()) {
1699     // TurboFan OSR-compiled functions cannot be entered directly.
1700     __ Abort(kShouldNotDirectlyEnterOsrFunction);
1701 
1702     // Unoptimized code jumps directly to this entrypoint while the unoptimized
1703     // frame is still on the stack. Optimized code uses OSR values directly from
1704     // the unoptimized frame. Thus, all that needs to be done is to allocate the
1705     // remaining stack slots.
1706     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1707     osr_pc_offset_ = __ pc_offset();
1708     // TODO(titzer): cannot address target function == local #-1
1709     __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1710     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
1711   }
1712 
1713   if (stack_shrink_slots > 0) {
1714     __ Dsubu(sp, sp, Operand(stack_shrink_slots * kPointerSize));
1715   }
1716 
1717   const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
1718   if (saves_fpu != 0) {
1719     // Save callee-saved FPU registers.
1720     __ MultiPushFPU(saves_fpu);
1721     int count = base::bits::CountPopulation32(saves_fpu);
1722     DCHECK(kNumCalleeSavedFPU == count);
1723     frame()->AllocateSavedCalleeRegisterSlots(count *
1724                                               (kDoubleSize / kPointerSize));
1725   }
1726 
1727   const RegList saves = descriptor->CalleeSavedRegisters();
1728   if (saves != 0) {
1729     // Save callee-saved registers.
1730     __ MultiPush(saves);
1731     // kNumCalleeSaved includes the fp register, but the fp register
1732     // is saved separately in TF.
1733     int count = base::bits::CountPopulation32(saves);
1734     DCHECK(kNumCalleeSaved == count + 1);
1735     frame()->AllocateSavedCalleeRegisterSlots(count);
1736   }
1737 }
1738 
1739 
AssembleReturn()1740 void CodeGenerator::AssembleReturn() {
1741   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1742 
1743   // Restore GP registers.
1744   const RegList saves = descriptor->CalleeSavedRegisters();
1745   if (saves != 0) {
1746     __ MultiPop(saves);
1747   }
1748 
1749   // Restore FPU registers.
1750   const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
1751   if (saves_fpu != 0) {
1752     __ MultiPopFPU(saves_fpu);
1753   }
1754 
1755   if (descriptor->IsCFunctionCall()) {
1756     __ mov(sp, fp);
1757     __ Pop(ra, fp);
1758   } else if (frame()->needs_frame()) {
1759     // Canonicalize JSFunction return sites for now.
1760     if (return_label_.is_bound()) {
1761       __ Branch(&return_label_);
1762       return;
1763     } else {
1764       __ bind(&return_label_);
1765       __ mov(sp, fp);
1766       __ Pop(ra, fp);
1767     }
1768   }
1769   int pop_count = static_cast<int>(descriptor->StackParameterCount());
1770   if (pop_count != 0) {
1771     __ DropAndRet(pop_count);
1772   } else {
1773     __ Ret();
1774   }
1775 }
1776 
1777 
AssembleMove(InstructionOperand * source,InstructionOperand * destination)1778 void CodeGenerator::AssembleMove(InstructionOperand* source,
1779                                  InstructionOperand* destination) {
1780   MipsOperandConverter g(this, nullptr);
1781   // Dispatch on the source and destination operand kinds.  Not all
1782   // combinations are possible.
1783   if (source->IsRegister()) {
1784     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1785     Register src = g.ToRegister(source);
1786     if (destination->IsRegister()) {
1787       __ mov(g.ToRegister(destination), src);
1788     } else {
1789       __ sd(src, g.ToMemOperand(destination));
1790     }
1791   } else if (source->IsStackSlot()) {
1792     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1793     MemOperand src = g.ToMemOperand(source);
1794     if (destination->IsRegister()) {
1795       __ ld(g.ToRegister(destination), src);
1796     } else {
1797       Register temp = kScratchReg;
1798       __ ld(temp, src);
1799       __ sd(temp, g.ToMemOperand(destination));
1800     }
1801   } else if (source->IsConstant()) {
1802     Constant src = g.ToConstant(source);
1803     if (destination->IsRegister() || destination->IsStackSlot()) {
1804       Register dst =
1805           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1806       switch (src.type()) {
1807         case Constant::kInt32:
1808           __ li(dst, Operand(src.ToInt32()));
1809           break;
1810         case Constant::kFloat32:
1811           __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1812           break;
1813         case Constant::kInt64:
1814           __ li(dst, Operand(src.ToInt64()));
1815           break;
1816         case Constant::kFloat64:
1817           __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1818           break;
1819         case Constant::kExternalReference:
1820           __ li(dst, Operand(src.ToExternalReference()));
1821           break;
1822         case Constant::kHeapObject: {
1823           Handle<HeapObject> src_object = src.ToHeapObject();
1824           Heap::RootListIndex index;
1825           int offset;
1826           if (IsMaterializableFromFrame(src_object, &offset)) {
1827             __ ld(dst, MemOperand(fp, offset));
1828           } else if (IsMaterializableFromRoot(src_object, &index)) {
1829             __ LoadRoot(dst, index);
1830           } else {
1831             __ li(dst, src_object);
1832           }
1833           break;
1834         }
1835         case Constant::kRpoNumber:
1836           UNREACHABLE();  // TODO(titzer): loading RPO numbers on mips64.
1837           break;
1838       }
1839       if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
1840     } else if (src.type() == Constant::kFloat32) {
1841       if (destination->IsDoubleStackSlot()) {
1842         MemOperand dst = g.ToMemOperand(destination);
1843         __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
1844         __ sw(at, dst);
1845       } else {
1846         FloatRegister dst = g.ToSingleRegister(destination);
1847         __ Move(dst, src.ToFloat32());
1848       }
1849     } else {
1850       DCHECK_EQ(Constant::kFloat64, src.type());
1851       DoubleRegister dst = destination->IsDoubleRegister()
1852                                ? g.ToDoubleRegister(destination)
1853                                : kScratchDoubleReg;
1854       __ Move(dst, src.ToFloat64());
1855       if (destination->IsDoubleStackSlot()) {
1856         __ sdc1(dst, g.ToMemOperand(destination));
1857       }
1858     }
1859   } else if (source->IsDoubleRegister()) {
1860     FPURegister src = g.ToDoubleRegister(source);
1861     if (destination->IsDoubleRegister()) {
1862       FPURegister dst = g.ToDoubleRegister(destination);
1863       __ Move(dst, src);
1864     } else {
1865       DCHECK(destination->IsDoubleStackSlot());
1866       __ sdc1(src, g.ToMemOperand(destination));
1867     }
1868   } else if (source->IsDoubleStackSlot()) {
1869     DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1870     MemOperand src = g.ToMemOperand(source);
1871     if (destination->IsDoubleRegister()) {
1872       __ ldc1(g.ToDoubleRegister(destination), src);
1873     } else {
1874       FPURegister temp = kScratchDoubleReg;
1875       __ ldc1(temp, src);
1876       __ sdc1(temp, g.ToMemOperand(destination));
1877     }
1878   } else {
1879     UNREACHABLE();
1880   }
1881 }
1882 
1883 
AssembleSwap(InstructionOperand * source,InstructionOperand * destination)1884 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1885                                  InstructionOperand* destination) {
1886   MipsOperandConverter g(this, nullptr);
1887   // Dispatch on the source and destination operand kinds.  Not all
1888   // combinations are possible.
1889   if (source->IsRegister()) {
1890     // Register-register.
1891     Register temp = kScratchReg;
1892     Register src = g.ToRegister(source);
1893     if (destination->IsRegister()) {
1894       Register dst = g.ToRegister(destination);
1895       __ Move(temp, src);
1896       __ Move(src, dst);
1897       __ Move(dst, temp);
1898     } else {
1899       DCHECK(destination->IsStackSlot());
1900       MemOperand dst = g.ToMemOperand(destination);
1901       __ mov(temp, src);
1902       __ ld(src, dst);
1903       __ sd(temp, dst);
1904     }
1905   } else if (source->IsStackSlot()) {
1906     DCHECK(destination->IsStackSlot());
1907     Register temp_0 = kScratchReg;
1908     Register temp_1 = kScratchReg2;
1909     MemOperand src = g.ToMemOperand(source);
1910     MemOperand dst = g.ToMemOperand(destination);
1911     __ ld(temp_0, src);
1912     __ ld(temp_1, dst);
1913     __ sd(temp_0, dst);
1914     __ sd(temp_1, src);
1915   } else if (source->IsDoubleRegister()) {
1916     FPURegister temp = kScratchDoubleReg;
1917     FPURegister src = g.ToDoubleRegister(source);
1918     if (destination->IsDoubleRegister()) {
1919       FPURegister dst = g.ToDoubleRegister(destination);
1920       __ Move(temp, src);
1921       __ Move(src, dst);
1922       __ Move(dst, temp);
1923     } else {
1924       DCHECK(destination->IsDoubleStackSlot());
1925       MemOperand dst = g.ToMemOperand(destination);
1926       __ Move(temp, src);
1927       __ ldc1(src, dst);
1928       __ sdc1(temp, dst);
1929     }
1930   } else if (source->IsDoubleStackSlot()) {
1931     DCHECK(destination->IsDoubleStackSlot());
1932     Register temp_0 = kScratchReg;
1933     FPURegister temp_1 = kScratchDoubleReg;
1934     MemOperand src0 = g.ToMemOperand(source);
1935     MemOperand src1(src0.rm(), src0.offset() + kIntSize);
1936     MemOperand dst0 = g.ToMemOperand(destination);
1937     MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
1938     __ ldc1(temp_1, dst0);  // Save destination in temp_1.
1939     __ lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
1940     __ sw(temp_0, dst0);
1941     __ lw(temp_0, src1);
1942     __ sw(temp_0, dst1);
1943     __ sdc1(temp_1, src0);
1944   } else {
1945     // No other combinations are possible.
1946     UNREACHABLE();
1947   }
1948 }
1949 
1950 
AssembleJumpTable(Label ** targets,size_t target_count)1951 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1952   // On 64-bit MIPS we emit the jump tables inline.
1953   UNREACHABLE();
1954 }
1955 
1956 
AddNopForSmiCodeInlining()1957 void CodeGenerator::AddNopForSmiCodeInlining() {
1958   // Unused on 32-bit ARM. Still exists on 64-bit arm.
1959   // TODO(plind): Unclear when this is called now. Understand, fix if needed.
1960   __ nop();  // Maybe PROPERTY_ACCESS_INLINED?
1961 }
1962 
1963 
EnsureSpaceForLazyDeopt()1964 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1965   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
1966     return;
1967   }
1968 
1969   int space_needed = Deoptimizer::patch_size();
1970   // Ensure that we have enough space after the previous lazy-bailout
1971   // instruction for patching the code here.
1972   int current_pc = masm()->pc_offset();
1973   if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1974     // Block tramoline pool emission for duration of padding.
1975     v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
1976         masm());
1977     int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1978     DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1979     while (padding_size > 0) {
1980       __ nop();
1981       padding_size -= v8::internal::Assembler::kInstrSize;
1982     }
1983   }
1984 }
1985 
1986 #undef __
1987 
1988 }  // namespace compiler
1989 }  // namespace internal
1990 }  // namespace v8
1991