1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6 #include "src/compilation-info.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/osr.h"
11 #include "src/mips/macro-assembler-mips.h"
12
13 namespace v8 {
14 namespace internal {
15 namespace compiler {
16
17 #define __ masm()->
18
19
20 // TODO(plind): Possibly avoid using these lithium names.
21 #define kScratchReg kLithiumScratchReg
22 #define kScratchReg2 kLithiumScratchReg2
23 #define kScratchDoubleReg kLithiumScratchDouble
24
25
26 // TODO(plind): consider renaming these macros.
27 #define TRACE_MSG(msg) \
28 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
29 __LINE__)
30
31 #define TRACE_UNIMPL() \
32 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
33 __LINE__)
34
35
36 // Adds Mips-specific methods to convert InstructionOperands.
37 class MipsOperandConverter final : public InstructionOperandConverter {
38 public:
MipsOperandConverter(CodeGenerator * gen,Instruction * instr)39 MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
40 : InstructionOperandConverter(gen, instr) {}
41
OutputSingleRegister(size_t index=0)42 FloatRegister OutputSingleRegister(size_t index = 0) {
43 return ToSingleRegister(instr_->OutputAt(index));
44 }
45
InputSingleRegister(size_t index)46 FloatRegister InputSingleRegister(size_t index) {
47 return ToSingleRegister(instr_->InputAt(index));
48 }
49
ToSingleRegister(InstructionOperand * op)50 FloatRegister ToSingleRegister(InstructionOperand* op) {
51 // Single (Float) and Double register namespace is same on MIPS,
52 // both are typedefs of FPURegister.
53 return ToDoubleRegister(op);
54 }
55
InputOrZeroRegister(size_t index)56 Register InputOrZeroRegister(size_t index) {
57 if (instr_->InputAt(index)->IsImmediate()) {
58 DCHECK((InputInt32(index) == 0));
59 return zero_reg;
60 }
61 return InputRegister(index);
62 }
63
InputOrZeroDoubleRegister(size_t index)64 DoubleRegister InputOrZeroDoubleRegister(size_t index) {
65 if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
66
67 return InputDoubleRegister(index);
68 }
69
InputOrZeroSingleRegister(size_t index)70 DoubleRegister InputOrZeroSingleRegister(size_t index) {
71 if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
72
73 return InputSingleRegister(index);
74 }
75
InputImmediate(size_t index)76 Operand InputImmediate(size_t index) {
77 Constant constant = ToConstant(instr_->InputAt(index));
78 switch (constant.type()) {
79 case Constant::kInt32:
80 return Operand(constant.ToInt32());
81 case Constant::kInt64:
82 return Operand(constant.ToInt64());
83 case Constant::kFloat32:
84 return Operand(
85 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
86 case Constant::kFloat64:
87 return Operand(
88 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
89 case Constant::kExternalReference:
90 case Constant::kHeapObject:
91 // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
92 // maybe not done on arm due to const pool ??
93 break;
94 case Constant::kRpoNumber:
95 UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
96 break;
97 }
98 UNREACHABLE();
99 return Operand(zero_reg);
100 }
101
InputOperand(size_t index)102 Operand InputOperand(size_t index) {
103 InstructionOperand* op = instr_->InputAt(index);
104 if (op->IsRegister()) {
105 return Operand(ToRegister(op));
106 }
107 return InputImmediate(index);
108 }
109
MemoryOperand(size_t * first_index)110 MemOperand MemoryOperand(size_t* first_index) {
111 const size_t index = *first_index;
112 switch (AddressingModeField::decode(instr_->opcode())) {
113 case kMode_None:
114 break;
115 case kMode_MRI:
116 *first_index += 2;
117 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
118 case kMode_MRR:
119 // TODO(plind): r6 address mode, to be implemented ...
120 UNREACHABLE();
121 }
122 UNREACHABLE();
123 return MemOperand(no_reg);
124 }
125
MemoryOperand(size_t index=0)126 MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
127
ToMemOperand(InstructionOperand * op) const128 MemOperand ToMemOperand(InstructionOperand* op) const {
129 DCHECK_NOT_NULL(op);
130 DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
131 return SlotToMemOperand(AllocatedOperand::cast(op)->index());
132 }
133
SlotToMemOperand(int slot) const134 MemOperand SlotToMemOperand(int slot) const {
135 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
136 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
137 }
138 };
139
140
HasRegisterInput(Instruction * instr,size_t index)141 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
142 return instr->InputAt(index)->IsRegister();
143 }
144
145
146 namespace {
147
148 class OutOfLineLoadSingle final : public OutOfLineCode {
149 public:
OutOfLineLoadSingle(CodeGenerator * gen,FloatRegister result)150 OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
151 : OutOfLineCode(gen), result_(result) {}
152
Generate()153 void Generate() final {
154 __ Move(result_, std::numeric_limits<float>::quiet_NaN());
155 }
156
157 private:
158 FloatRegister const result_;
159 };
160
161
162 class OutOfLineLoadDouble final : public OutOfLineCode {
163 public:
OutOfLineLoadDouble(CodeGenerator * gen,DoubleRegister result)164 OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
165 : OutOfLineCode(gen), result_(result) {}
166
Generate()167 void Generate() final {
168 __ Move(result_, std::numeric_limits<double>::quiet_NaN());
169 }
170
171 private:
172 DoubleRegister const result_;
173 };
174
175
176 class OutOfLineLoadInteger final : public OutOfLineCode {
177 public:
OutOfLineLoadInteger(CodeGenerator * gen,Register result)178 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
179 : OutOfLineCode(gen), result_(result) {}
180
Generate()181 void Generate() final { __ mov(result_, zero_reg); }
182
183 private:
184 Register const result_;
185 };
186
187
188 class OutOfLineRound : public OutOfLineCode {
189 public:
OutOfLineRound(CodeGenerator * gen,DoubleRegister result)190 OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
191 : OutOfLineCode(gen), result_(result) {}
192
Generate()193 void Generate() final {
194 // Handle rounding to zero case where sign has to be preserved.
195 // High bits of double input already in kScratchReg.
196 __ dsrl(at, kScratchReg, 31);
197 __ dsll(at, at, 31);
198 __ mthc1(at, result_);
199 }
200
201 private:
202 DoubleRegister const result_;
203 };
204
205
206 class OutOfLineRound32 : public OutOfLineCode {
207 public:
OutOfLineRound32(CodeGenerator * gen,DoubleRegister result)208 OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
209 : OutOfLineCode(gen), result_(result) {}
210
Generate()211 void Generate() final {
212 // Handle rounding to zero case where sign has to be preserved.
213 // High bits of float input already in kScratchReg.
214 __ srl(at, kScratchReg, 31);
215 __ sll(at, at, 31);
216 __ mtc1(at, result_);
217 }
218
219 private:
220 DoubleRegister const result_;
221 };
222
223
224 class OutOfLineRecordWrite final : public OutOfLineCode {
225 public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register index,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)226 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
227 Register value, Register scratch0, Register scratch1,
228 RecordWriteMode mode)
229 : OutOfLineCode(gen),
230 object_(object),
231 index_(index),
232 value_(value),
233 scratch0_(scratch0),
234 scratch1_(scratch1),
235 mode_(mode),
236 must_save_lr_(!gen->frame_access_state()->has_frame()) {}
237
Generate()238 void Generate() final {
239 if (mode_ > RecordWriteMode::kValueIsPointer) {
240 __ JumpIfSmi(value_, exit());
241 }
242 __ CheckPageFlag(value_, scratch0_,
243 MemoryChunk::kPointersToHereAreInterestingMask, eq,
244 exit());
245 RememberedSetAction const remembered_set_action =
246 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
247 : OMIT_REMEMBERED_SET;
248 SaveFPRegsMode const save_fp_mode =
249 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
250 if (must_save_lr_) {
251 // We need to save and restore ra if the frame was elided.
252 __ Push(ra);
253 }
254 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
255 remembered_set_action, save_fp_mode);
256 __ Daddu(scratch1_, object_, index_);
257 __ CallStub(&stub);
258 if (must_save_lr_) {
259 __ Pop(ra);
260 }
261 }
262
263 private:
264 Register const object_;
265 Register const index_;
266 Register const value_;
267 Register const scratch0_;
268 Register const scratch1_;
269 RecordWriteMode const mode_;
270 bool must_save_lr_;
271 };
272
273
FlagsConditionToConditionCmp(FlagsCondition condition)274 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
275 switch (condition) {
276 case kEqual:
277 return eq;
278 case kNotEqual:
279 return ne;
280 case kSignedLessThan:
281 return lt;
282 case kSignedGreaterThanOrEqual:
283 return ge;
284 case kSignedLessThanOrEqual:
285 return le;
286 case kSignedGreaterThan:
287 return gt;
288 case kUnsignedLessThan:
289 return lo;
290 case kUnsignedGreaterThanOrEqual:
291 return hs;
292 case kUnsignedLessThanOrEqual:
293 return ls;
294 case kUnsignedGreaterThan:
295 return hi;
296 case kUnorderedEqual:
297 case kUnorderedNotEqual:
298 break;
299 default:
300 break;
301 }
302 UNREACHABLE();
303 return kNoCondition;
304 }
305
306
FlagsConditionToConditionTst(FlagsCondition condition)307 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
308 switch (condition) {
309 case kNotEqual:
310 return ne;
311 case kEqual:
312 return eq;
313 default:
314 break;
315 }
316 UNREACHABLE();
317 return kNoCondition;
318 }
319
320
FlagsConditionToConditionOvf(FlagsCondition condition)321 Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
322 switch (condition) {
323 case kOverflow:
324 return ne;
325 case kNotOverflow:
326 return eq;
327 default:
328 break;
329 }
330 UNREACHABLE();
331 return kNoCondition;
332 }
333
334
FlagsConditionToConditionCmpFPU(bool & predicate,FlagsCondition condition)335 FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
336 FlagsCondition condition) {
337 switch (condition) {
338 case kEqual:
339 predicate = true;
340 return EQ;
341 case kNotEqual:
342 predicate = false;
343 return EQ;
344 case kUnsignedLessThan:
345 predicate = true;
346 return OLT;
347 case kUnsignedGreaterThanOrEqual:
348 predicate = false;
349 return ULT;
350 case kUnsignedLessThanOrEqual:
351 predicate = true;
352 return OLE;
353 case kUnsignedGreaterThan:
354 predicate = false;
355 return ULE;
356 case kUnorderedEqual:
357 case kUnorderedNotEqual:
358 predicate = true;
359 break;
360 default:
361 predicate = true;
362 break;
363 }
364 UNREACHABLE();
365 return kNoFPUCondition;
366 }
367
368 } // namespace
369
370 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
371 do { \
372 auto result = i.Output##width##Register(); \
373 auto ool = new (zone()) OutOfLineLoad##width(this, result); \
374 if (instr->InputAt(0)->IsRegister()) { \
375 auto offset = i.InputRegister(0); \
376 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
377 __ And(kScratchReg, offset, Operand(0xffffffff)); \
378 __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
379 __ asm_instr(result, MemOperand(kScratchReg, 0)); \
380 } else { \
381 int offset = static_cast<int>(i.InputOperand(0).immediate()); \
382 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
383 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
384 } \
385 __ bind(ool->exit()); \
386 } while (0)
387
388 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
389 do { \
390 auto result = i.OutputRegister(); \
391 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
392 if (instr->InputAt(0)->IsRegister()) { \
393 auto offset = i.InputRegister(0); \
394 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
395 __ And(kScratchReg, offset, Operand(0xffffffff)); \
396 __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
397 __ asm_instr(result, MemOperand(kScratchReg, 0)); \
398 } else { \
399 int offset = static_cast<int>(i.InputOperand(0).immediate()); \
400 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
401 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
402 } \
403 __ bind(ool->exit()); \
404 } while (0)
405
406 #define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
407 do { \
408 Label done; \
409 if (instr->InputAt(0)->IsRegister()) { \
410 auto offset = i.InputRegister(0); \
411 auto value = i.InputOrZero##width##Register(2); \
412 if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
413 __ Move(kDoubleRegZero, 0.0); \
414 } \
415 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
416 __ And(kScratchReg, offset, Operand(0xffffffff)); \
417 __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
418 __ asm_instr(value, MemOperand(kScratchReg, 0)); \
419 } else { \
420 int offset = static_cast<int>(i.InputOperand(0).immediate()); \
421 auto value = i.InputOrZero##width##Register(2); \
422 if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
423 __ Move(kDoubleRegZero, 0.0); \
424 } \
425 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
426 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
427 } \
428 __ bind(&done); \
429 } while (0)
430
431 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
432 do { \
433 Label done; \
434 if (instr->InputAt(0)->IsRegister()) { \
435 auto offset = i.InputRegister(0); \
436 auto value = i.InputOrZeroRegister(2); \
437 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
438 __ And(kScratchReg, offset, Operand(0xffffffff)); \
439 __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
440 __ asm_instr(value, MemOperand(kScratchReg, 0)); \
441 } else { \
442 int offset = static_cast<int>(i.InputOperand(0).immediate()); \
443 auto value = i.InputOrZeroRegister(2); \
444 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
445 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
446 } \
447 __ bind(&done); \
448 } while (0)
449
450 #define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
451 if (kArchVariant == kMips64r6) { \
452 __ cfc1(kScratchReg, FCSR); \
453 __ li(at, Operand(mode_##mode)); \
454 __ ctc1(at, FCSR); \
455 __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
456 __ ctc1(kScratchReg, FCSR); \
457 } else { \
458 auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister()); \
459 Label done; \
460 __ mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
461 __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
462 HeapNumber::kExponentBits); \
463 __ Branch(USE_DELAY_SLOT, &done, hs, at, \
464 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
465 __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
466 __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
467 __ dmfc1(at, i.OutputDoubleRegister()); \
468 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
469 __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
470 __ bind(ool->exit()); \
471 __ bind(&done); \
472 }
473
474 #define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode) \
475 if (kArchVariant == kMips64r6) { \
476 __ cfc1(kScratchReg, FCSR); \
477 __ li(at, Operand(mode_##mode)); \
478 __ ctc1(at, FCSR); \
479 __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
480 __ ctc1(kScratchReg, FCSR); \
481 } else { \
482 int32_t kFloat32ExponentBias = 127; \
483 int32_t kFloat32MantissaBits = 23; \
484 int32_t kFloat32ExponentBits = 8; \
485 auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
486 Label done; \
487 __ mfc1(kScratchReg, i.InputDoubleRegister(0)); \
488 __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits); \
489 __ Branch(USE_DELAY_SLOT, &done, hs, at, \
490 Operand(kFloat32ExponentBias + kFloat32MantissaBits)); \
491 __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
492 __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
493 __ mfc1(at, i.OutputDoubleRegister()); \
494 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
495 __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
496 __ bind(ool->exit()); \
497 __ bind(&done); \
498 }
499
500 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
501 do { \
502 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
503 __ sync(); \
504 } while (0)
505
506 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
507 do { \
508 __ sync(); \
509 __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
510 __ sync(); \
511 } while (0)
512
513 #define ASSEMBLE_IEEE754_BINOP(name) \
514 do { \
515 FrameScope scope(masm(), StackFrame::MANUAL); \
516 __ PrepareCallCFunction(0, 2, kScratchReg); \
517 __ MovToFloatParameters(i.InputDoubleRegister(0), \
518 i.InputDoubleRegister(1)); \
519 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
520 0, 2); \
521 /* Move the result in the double result register. */ \
522 __ MovFromFloatResult(i.OutputDoubleRegister()); \
523 } while (0)
524
525 #define ASSEMBLE_IEEE754_UNOP(name) \
526 do { \
527 FrameScope scope(masm(), StackFrame::MANUAL); \
528 __ PrepareCallCFunction(0, 1, kScratchReg); \
529 __ MovToFloatParameter(i.InputDoubleRegister(0)); \
530 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
531 0, 1); \
532 /* Move the result in the double result register. */ \
533 __ MovFromFloatResult(i.OutputDoubleRegister()); \
534 } while (0)
535
AssembleDeconstructFrame()536 void CodeGenerator::AssembleDeconstructFrame() {
537 __ mov(sp, fp);
538 __ Pop(ra, fp);
539 }
540
AssemblePrepareTailCall()541 void CodeGenerator::AssemblePrepareTailCall() {
542 if (frame_access_state()->has_frame()) {
543 __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
544 __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
545 }
546 frame_access_state()->SetFrameAccessToSP();
547 }
548
AssemblePopArgumentsAdaptorFrame(Register args_reg,Register scratch1,Register scratch2,Register scratch3)549 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
550 Register scratch1,
551 Register scratch2,
552 Register scratch3) {
553 DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
554 Label done;
555
556 // Check if current frame is an arguments adaptor frame.
557 __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
558 __ Branch(&done, ne, scratch3,
559 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
560
561 // Load arguments count from current arguments adaptor frame (note, it
562 // does not include receiver).
563 Register caller_args_count_reg = scratch1;
564 __ ld(caller_args_count_reg,
565 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
566 __ SmiUntag(caller_args_count_reg);
567
568 ParameterCount callee_args_count(args_reg);
569 __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
570 scratch3);
571 __ bind(&done);
572 }
573
574 namespace {
575
AdjustStackPointerForTailCall(MacroAssembler * masm,FrameAccessState * state,int new_slot_above_sp,bool allow_shrinkage=true)576 void AdjustStackPointerForTailCall(MacroAssembler* masm,
577 FrameAccessState* state,
578 int new_slot_above_sp,
579 bool allow_shrinkage = true) {
580 int current_sp_offset = state->GetSPToFPSlotCount() +
581 StandardFrameConstants::kFixedSlotCountAboveFp;
582 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
583 if (stack_slot_delta > 0) {
584 masm->Dsubu(sp, sp, stack_slot_delta * kPointerSize);
585 state->IncreaseSPDelta(stack_slot_delta);
586 } else if (allow_shrinkage && stack_slot_delta < 0) {
587 masm->Daddu(sp, sp, -stack_slot_delta * kPointerSize);
588 state->IncreaseSPDelta(stack_slot_delta);
589 }
590 }
591
592 } // namespace
593
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_stack_slot)594 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
595 int first_unused_stack_slot) {
596 AdjustStackPointerForTailCall(masm(), frame_access_state(),
597 first_unused_stack_slot, false);
598 }
599
AssembleTailCallAfterGap(Instruction * instr,int first_unused_stack_slot)600 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
601 int first_unused_stack_slot) {
602 AdjustStackPointerForTailCall(masm(), frame_access_state(),
603 first_unused_stack_slot);
604 }
605
606 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)607 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
608 Instruction* instr) {
609 MipsOperandConverter i(this, instr);
610 InstructionCode opcode = instr->opcode();
611 ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
612 switch (arch_opcode) {
613 case kArchCallCodeObject: {
614 EnsureSpaceForLazyDeopt();
615 if (instr->InputAt(0)->IsImmediate()) {
616 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
617 RelocInfo::CODE_TARGET);
618 } else {
619 __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
620 __ Call(at);
621 }
622 RecordCallPosition(instr);
623 frame_access_state()->ClearSPDelta();
624 break;
625 }
626 case kArchTailCallCodeObjectFromJSFunction:
627 case kArchTailCallCodeObject: {
628 if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
629 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
630 i.TempRegister(0), i.TempRegister(1),
631 i.TempRegister(2));
632 }
633 if (instr->InputAt(0)->IsImmediate()) {
634 __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
635 RelocInfo::CODE_TARGET);
636 } else {
637 __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
638 __ Jump(at);
639 }
640 frame_access_state()->ClearSPDelta();
641 frame_access_state()->SetFrameAccessToDefault();
642 break;
643 }
644 case kArchTailCallAddress: {
645 CHECK(!instr->InputAt(0)->IsImmediate());
646 __ Jump(i.InputRegister(0));
647 frame_access_state()->ClearSPDelta();
648 frame_access_state()->SetFrameAccessToDefault();
649 break;
650 }
651 case kArchCallJSFunction: {
652 EnsureSpaceForLazyDeopt();
653 Register func = i.InputRegister(0);
654 if (FLAG_debug_code) {
655 // Check the function's context matches the context argument.
656 __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
657 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
658 }
659 __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
660 __ Call(at);
661 RecordCallPosition(instr);
662 frame_access_state()->ClearSPDelta();
663 break;
664 }
665 case kArchTailCallJSFunctionFromJSFunction: {
666 Register func = i.InputRegister(0);
667 if (FLAG_debug_code) {
668 // Check the function's context matches the context argument.
669 __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
670 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
671 }
672 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
673 i.TempRegister(0), i.TempRegister(1),
674 i.TempRegister(2));
675 __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
676 __ Jump(at);
677 frame_access_state()->ClearSPDelta();
678 frame_access_state()->SetFrameAccessToDefault();
679 break;
680 }
681 case kArchPrepareCallCFunction: {
682 int const num_parameters = MiscField::decode(instr->opcode());
683 __ PrepareCallCFunction(num_parameters, kScratchReg);
684 // Frame alignment requires using FP-relative frame addressing.
685 frame_access_state()->SetFrameAccessToFP();
686 break;
687 }
688 case kArchPrepareTailCall:
689 AssemblePrepareTailCall();
690 break;
691 case kArchCallCFunction: {
692 int const num_parameters = MiscField::decode(instr->opcode());
693 if (instr->InputAt(0)->IsImmediate()) {
694 ExternalReference ref = i.InputExternalReference(0);
695 __ CallCFunction(ref, num_parameters);
696 } else {
697 Register func = i.InputRegister(0);
698 __ CallCFunction(func, num_parameters);
699 }
700 frame_access_state()->SetFrameAccessToDefault();
701 frame_access_state()->ClearSPDelta();
702 break;
703 }
704 case kArchJmp:
705 AssembleArchJump(i.InputRpo(0));
706 break;
707 case kArchLookupSwitch:
708 AssembleArchLookupSwitch(instr);
709 break;
710 case kArchTableSwitch:
711 AssembleArchTableSwitch(instr);
712 break;
713 case kArchDebugBreak:
714 __ stop("kArchDebugBreak");
715 break;
716 case kArchComment: {
717 Address comment_string = i.InputExternalReference(0).address();
718 __ RecordComment(reinterpret_cast<const char*>(comment_string));
719 break;
720 }
721 case kArchNop:
722 case kArchThrowTerminator:
723 // don't emit code for nops.
724 break;
725 case kArchDeoptimize: {
726 int deopt_state_id =
727 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
728 Deoptimizer::BailoutType bailout_type =
729 Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
730 CodeGenResult result = AssembleDeoptimizerCall(
731 deopt_state_id, bailout_type, current_source_position_);
732 if (result != kSuccess) return result;
733 break;
734 }
735 case kArchRet:
736 AssembleReturn(instr->InputAt(0));
737 break;
738 case kArchStackPointer:
739 __ mov(i.OutputRegister(), sp);
740 break;
741 case kArchFramePointer:
742 __ mov(i.OutputRegister(), fp);
743 break;
744 case kArchParentFramePointer:
745 if (frame_access_state()->has_frame()) {
746 __ ld(i.OutputRegister(), MemOperand(fp, 0));
747 } else {
748 __ mov(i.OutputRegister(), fp);
749 }
750 break;
751 case kArchTruncateDoubleToI:
752 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
753 break;
754 case kArchStoreWithWriteBarrier: {
755 RecordWriteMode mode =
756 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
757 Register object = i.InputRegister(0);
758 Register index = i.InputRegister(1);
759 Register value = i.InputRegister(2);
760 Register scratch0 = i.TempRegister(0);
761 Register scratch1 = i.TempRegister(1);
762 auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
763 scratch0, scratch1, mode);
764 __ Daddu(at, object, index);
765 __ sd(value, MemOperand(at));
766 __ CheckPageFlag(object, scratch0,
767 MemoryChunk::kPointersFromHereAreInterestingMask, ne,
768 ool->entry());
769 __ bind(ool->exit());
770 break;
771 }
772 case kArchStackSlot: {
773 FrameOffset offset =
774 frame_access_state()->GetFrameOffset(i.InputInt32(0));
775 __ Daddu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
776 Operand(offset.offset()));
777 break;
778 }
779 case kIeee754Float64Acos:
780 ASSEMBLE_IEEE754_UNOP(acos);
781 break;
782 case kIeee754Float64Acosh:
783 ASSEMBLE_IEEE754_UNOP(acosh);
784 break;
785 case kIeee754Float64Asin:
786 ASSEMBLE_IEEE754_UNOP(asin);
787 break;
788 case kIeee754Float64Asinh:
789 ASSEMBLE_IEEE754_UNOP(asinh);
790 break;
791 case kIeee754Float64Atan:
792 ASSEMBLE_IEEE754_UNOP(atan);
793 break;
794 case kIeee754Float64Atanh:
795 ASSEMBLE_IEEE754_UNOP(atanh);
796 break;
797 case kIeee754Float64Atan2:
798 ASSEMBLE_IEEE754_BINOP(atan2);
799 break;
800 case kIeee754Float64Cos:
801 ASSEMBLE_IEEE754_UNOP(cos);
802 break;
803 case kIeee754Float64Cosh:
804 ASSEMBLE_IEEE754_UNOP(cosh);
805 break;
806 case kIeee754Float64Cbrt:
807 ASSEMBLE_IEEE754_UNOP(cbrt);
808 break;
809 case kIeee754Float64Exp:
810 ASSEMBLE_IEEE754_UNOP(exp);
811 break;
812 case kIeee754Float64Expm1:
813 ASSEMBLE_IEEE754_UNOP(expm1);
814 break;
815 case kIeee754Float64Log:
816 ASSEMBLE_IEEE754_UNOP(log);
817 break;
818 case kIeee754Float64Log1p:
819 ASSEMBLE_IEEE754_UNOP(log1p);
820 break;
821 case kIeee754Float64Log2:
822 ASSEMBLE_IEEE754_UNOP(log2);
823 break;
824 case kIeee754Float64Log10:
825 ASSEMBLE_IEEE754_UNOP(log10);
826 break;
827 case kIeee754Float64Pow: {
828 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
829 __ CallStub(&stub);
830 break;
831 }
832 case kIeee754Float64Sin:
833 ASSEMBLE_IEEE754_UNOP(sin);
834 break;
835 case kIeee754Float64Sinh:
836 ASSEMBLE_IEEE754_UNOP(sinh);
837 break;
838 case kIeee754Float64Tan:
839 ASSEMBLE_IEEE754_UNOP(tan);
840 break;
841 case kIeee754Float64Tanh:
842 ASSEMBLE_IEEE754_UNOP(tanh);
843 break;
844 case kMips64Add:
845 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
846 break;
847 case kMips64Dadd:
848 __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
849 break;
850 case kMips64DaddOvf:
851 // Pseudo-instruction used for overflow/branch. No opcode emitted here.
852 break;
853 case kMips64Sub:
854 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
855 break;
856 case kMips64Dsub:
857 __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
858 break;
859 case kMips64DsubOvf:
860 // Pseudo-instruction used for overflow/branch. No opcode emitted here.
861 break;
862 case kMips64Mul:
863 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
864 break;
865 case kMips64MulOvf:
866 // Pseudo-instruction used for overflow/branch. No opcode emitted here.
867 break;
868 case kMips64MulHigh:
869 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
870 break;
871 case kMips64MulHighU:
872 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
873 break;
874 case kMips64DMulHigh:
875 __ Dmulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
876 break;
877 case kMips64Div:
878 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
879 if (kArchVariant == kMips64r6) {
880 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
881 } else {
882 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
883 }
884 break;
885 case kMips64DivU:
886 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
887 if (kArchVariant == kMips64r6) {
888 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
889 } else {
890 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
891 }
892 break;
893 case kMips64Mod:
894 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
895 break;
896 case kMips64ModU:
897 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
898 break;
899 case kMips64Dmul:
900 __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
901 break;
902 case kMips64Ddiv:
903 __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
904 if (kArchVariant == kMips64r6) {
905 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
906 } else {
907 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
908 }
909 break;
910 case kMips64DdivU:
911 __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
912 if (kArchVariant == kMips64r6) {
913 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
914 } else {
915 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
916 }
917 break;
918 case kMips64Dmod:
919 __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
920 break;
921 case kMips64DmodU:
922 __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
923 break;
924 case kMips64Dlsa:
925 DCHECK(instr->InputAt(2)->IsImmediate());
926 __ Dlsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
927 i.InputInt8(2));
928 break;
929 case kMips64Lsa:
930 DCHECK(instr->InputAt(2)->IsImmediate());
931 __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
932 i.InputInt8(2));
933 break;
934 case kMips64And:
935 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
936 break;
937 case kMips64And32:
938 if (instr->InputAt(1)->IsRegister()) {
939 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
940 __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
941 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
942 } else {
943 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
944 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
945 }
946 break;
947 case kMips64Or:
948 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
949 break;
950 case kMips64Or32:
951 if (instr->InputAt(1)->IsRegister()) {
952 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
953 __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
954 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
955 } else {
956 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
957 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
958 }
959 break;
960 case kMips64Nor:
961 if (instr->InputAt(1)->IsRegister()) {
962 __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
963 } else {
964 DCHECK(i.InputOperand(1).immediate() == 0);
965 __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
966 }
967 break;
968 case kMips64Nor32:
969 if (instr->InputAt(1)->IsRegister()) {
970 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
971 __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
972 __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
973 } else {
974 DCHECK(i.InputOperand(1).immediate() == 0);
975 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
976 __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
977 }
978 break;
979 case kMips64Xor:
980 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
981 break;
982 case kMips64Xor32:
983 if (instr->InputAt(1)->IsRegister()) {
984 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
985 __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
986 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
987 } else {
988 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
989 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
990 }
991 break;
992 case kMips64Clz:
993 __ Clz(i.OutputRegister(), i.InputRegister(0));
994 break;
995 case kMips64Dclz:
996 __ dclz(i.OutputRegister(), i.InputRegister(0));
997 break;
998 case kMips64Ctz: {
999 Register reg1 = kScratchReg;
1000 Register reg2 = kScratchReg2;
1001 Label skip_for_zero;
1002 Label end;
1003 // Branch if the operand is zero
1004 __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
1005 // Find the number of bits before the last bit set to 1.
1006 __ Subu(reg2, zero_reg, i.InputRegister(0));
1007 __ And(reg2, reg2, i.InputRegister(0));
1008 __ clz(reg2, reg2);
1009 // Get the number of bits after the last bit set to 1.
1010 __ li(reg1, 0x1F);
1011 __ Subu(i.OutputRegister(), reg1, reg2);
1012 __ Branch(&end);
1013 __ bind(&skip_for_zero);
1014 // If the operand is zero, return word length as the result.
1015 __ li(i.OutputRegister(), 0x20);
1016 __ bind(&end);
1017 } break;
1018 case kMips64Dctz: {
1019 Register reg1 = kScratchReg;
1020 Register reg2 = kScratchReg2;
1021 Label skip_for_zero;
1022 Label end;
1023 // Branch if the operand is zero
1024 __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
1025 // Find the number of bits before the last bit set to 1.
1026 __ Dsubu(reg2, zero_reg, i.InputRegister(0));
1027 __ And(reg2, reg2, i.InputRegister(0));
1028 __ dclz(reg2, reg2);
1029 // Get the number of bits after the last bit set to 1.
1030 __ li(reg1, 0x3F);
1031 __ Subu(i.OutputRegister(), reg1, reg2);
1032 __ Branch(&end);
1033 __ bind(&skip_for_zero);
1034 // If the operand is zero, return word length as the result.
1035 __ li(i.OutputRegister(), 0x40);
1036 __ bind(&end);
1037 } break;
1038 case kMips64Popcnt: {
1039 Register reg1 = kScratchReg;
1040 Register reg2 = kScratchReg2;
1041 uint32_t m1 = 0x55555555;
1042 uint32_t m2 = 0x33333333;
1043 uint32_t m4 = 0x0f0f0f0f;
1044 uint32_t m8 = 0x00ff00ff;
1045 uint32_t m16 = 0x0000ffff;
1046
1047 // Put count of ones in every 2 bits into those 2 bits.
1048 __ li(at, m1);
1049 __ dsrl(reg1, i.InputRegister(0), 1);
1050 __ And(reg2, i.InputRegister(0), at);
1051 __ And(reg1, reg1, at);
1052 __ Daddu(reg1, reg1, reg2);
1053
1054 // Put count of ones in every 4 bits into those 4 bits.
1055 __ li(at, m2);
1056 __ dsrl(reg2, reg1, 2);
1057 __ And(reg2, reg2, at);
1058 __ And(reg1, reg1, at);
1059 __ Daddu(reg1, reg1, reg2);
1060
1061 // Put count of ones in every 8 bits into those 8 bits.
1062 __ li(at, m4);
1063 __ dsrl(reg2, reg1, 4);
1064 __ And(reg2, reg2, at);
1065 __ And(reg1, reg1, at);
1066 __ Daddu(reg1, reg1, reg2);
1067
1068 // Put count of ones in every 16 bits into those 16 bits.
1069 __ li(at, m8);
1070 __ dsrl(reg2, reg1, 8);
1071 __ And(reg2, reg2, at);
1072 __ And(reg1, reg1, at);
1073 __ Daddu(reg1, reg1, reg2);
1074
1075 // Calculate total number of ones.
1076 __ li(at, m16);
1077 __ dsrl(reg2, reg1, 16);
1078 __ And(reg2, reg2, at);
1079 __ And(reg1, reg1, at);
1080 __ Daddu(i.OutputRegister(), reg1, reg2);
1081 } break;
1082 case kMips64Dpopcnt: {
1083 Register reg1 = kScratchReg;
1084 Register reg2 = kScratchReg2;
1085 uint64_t m1 = 0x5555555555555555;
1086 uint64_t m2 = 0x3333333333333333;
1087 uint64_t m4 = 0x0f0f0f0f0f0f0f0f;
1088 uint64_t m8 = 0x00ff00ff00ff00ff;
1089 uint64_t m16 = 0x0000ffff0000ffff;
1090 uint64_t m32 = 0x00000000ffffffff;
1091
1092 // Put count of ones in every 2 bits into those 2 bits.
1093 __ li(at, m1);
1094 __ dsrl(reg1, i.InputRegister(0), 1);
1095 __ and_(reg2, i.InputRegister(0), at);
1096 __ and_(reg1, reg1, at);
1097 __ Daddu(reg1, reg1, reg2);
1098
1099 // Put count of ones in every 4 bits into those 4 bits.
1100 __ li(at, m2);
1101 __ dsrl(reg2, reg1, 2);
1102 __ and_(reg2, reg2, at);
1103 __ and_(reg1, reg1, at);
1104 __ Daddu(reg1, reg1, reg2);
1105
1106 // Put count of ones in every 8 bits into those 8 bits.
1107 __ li(at, m4);
1108 __ dsrl(reg2, reg1, 4);
1109 __ and_(reg2, reg2, at);
1110 __ and_(reg1, reg1, at);
1111 __ Daddu(reg1, reg1, reg2);
1112
1113 // Put count of ones in every 16 bits into those 16 bits.
1114 __ li(at, m8);
1115 __ dsrl(reg2, reg1, 8);
1116 __ and_(reg2, reg2, at);
1117 __ and_(reg1, reg1, at);
1118 __ Daddu(reg1, reg1, reg2);
1119
1120 // Put count of ones in every 32 bits into those 32 bits.
1121 __ li(at, m16);
1122 __ dsrl(reg2, reg1, 16);
1123 __ and_(reg2, reg2, at);
1124 __ and_(reg1, reg1, at);
1125 __ Daddu(reg1, reg1, reg2);
1126
1127 // Calculate total number of ones.
1128 __ li(at, m32);
1129 __ dsrl32(reg2, reg1, 0);
1130 __ and_(reg2, reg2, at);
1131 __ and_(reg1, reg1, at);
1132 __ Daddu(i.OutputRegister(), reg1, reg2);
1133 } break;
1134 case kMips64Shl:
1135 if (instr->InputAt(1)->IsRegister()) {
1136 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1137 } else {
1138 int64_t imm = i.InputOperand(1).immediate();
1139 __ sll(i.OutputRegister(), i.InputRegister(0),
1140 static_cast<uint16_t>(imm));
1141 }
1142 break;
1143 case kMips64Shr:
1144 if (instr->InputAt(1)->IsRegister()) {
1145 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1146 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1147 } else {
1148 int64_t imm = i.InputOperand(1).immediate();
1149 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1150 __ srl(i.OutputRegister(), i.InputRegister(0),
1151 static_cast<uint16_t>(imm));
1152 }
1153 break;
1154 case kMips64Sar:
1155 if (instr->InputAt(1)->IsRegister()) {
1156 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1157 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1158 } else {
1159 int64_t imm = i.InputOperand(1).immediate();
1160 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1161 __ sra(i.OutputRegister(), i.InputRegister(0),
1162 static_cast<uint16_t>(imm));
1163 }
1164 break;
1165 case kMips64Ext:
1166 __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1167 i.InputInt8(2));
1168 break;
1169 case kMips64Ins:
1170 if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
1171 __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
1172 } else {
1173 __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1174 i.InputInt8(2));
1175 }
1176 break;
1177 case kMips64Dext: {
1178 int16_t pos = i.InputInt8(1);
1179 int16_t size = i.InputInt8(2);
1180 if (size > 0 && size <= 32 && pos >= 0 && pos < 32) {
1181 __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1182 i.InputInt8(2));
1183 } else if (size > 32 && size <= 64 && pos > 0 && pos < 32) {
1184 __ Dextm(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1185 i.InputInt8(2));
1186 } else {
1187 DCHECK(size > 0 && size <= 32 && pos >= 32 && pos < 64);
1188 __ Dextu(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1189 i.InputInt8(2));
1190 }
1191 break;
1192 }
1193 case kMips64Dins:
1194 if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
1195 __ Dins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
1196 } else {
1197 __ Dins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1198 i.InputInt8(2));
1199 }
1200 break;
1201 case kMips64Dshl:
1202 if (instr->InputAt(1)->IsRegister()) {
1203 __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1204 } else {
1205 int64_t imm = i.InputOperand(1).immediate();
1206 if (imm < 32) {
1207 __ dsll(i.OutputRegister(), i.InputRegister(0),
1208 static_cast<uint16_t>(imm));
1209 } else {
1210 __ dsll32(i.OutputRegister(), i.InputRegister(0),
1211 static_cast<uint16_t>(imm - 32));
1212 }
1213 }
1214 break;
1215 case kMips64Dshr:
1216 if (instr->InputAt(1)->IsRegister()) {
1217 __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1218 } else {
1219 int64_t imm = i.InputOperand(1).immediate();
1220 if (imm < 32) {
1221 __ dsrl(i.OutputRegister(), i.InputRegister(0),
1222 static_cast<uint16_t>(imm));
1223 } else {
1224 __ dsrl32(i.OutputRegister(), i.InputRegister(0),
1225 static_cast<uint16_t>(imm - 32));
1226 }
1227 }
1228 break;
1229 case kMips64Dsar:
1230 if (instr->InputAt(1)->IsRegister()) {
1231 __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1232 } else {
1233 int64_t imm = i.InputOperand(1).immediate();
1234 if (imm < 32) {
1235 __ dsra(i.OutputRegister(), i.InputRegister(0), imm);
1236 } else {
1237 __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
1238 }
1239 }
1240 break;
1241 case kMips64Ror:
1242 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1243 break;
1244 case kMips64Dror:
1245 __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1246 break;
1247 case kMips64Tst:
1248 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
1249 break;
1250 case kMips64Cmp:
1251 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
1252 break;
1253 case kMips64Mov:
1254 // TODO(plind): Should we combine mov/li like this, or use separate instr?
1255 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
1256 if (HasRegisterInput(instr, 0)) {
1257 __ mov(i.OutputRegister(), i.InputRegister(0));
1258 } else {
1259 __ li(i.OutputRegister(), i.InputOperand(0));
1260 }
1261 break;
1262
1263 case kMips64CmpS:
1264 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
1265 break;
1266 case kMips64AddS:
1267 // TODO(plind): add special case: combine mult & add.
1268 __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1269 i.InputDoubleRegister(1));
1270 break;
1271 case kMips64SubS:
1272 __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1273 i.InputDoubleRegister(1));
1274 break;
1275 case kMips64MulS:
1276 // TODO(plind): add special case: right op is -1.0, see arm port.
1277 __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1278 i.InputDoubleRegister(1));
1279 break;
1280 case kMips64DivS:
1281 __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1282 i.InputDoubleRegister(1));
1283 break;
1284 case kMips64ModS: {
1285 // TODO(bmeurer): We should really get rid of this special instruction,
1286 // and generate a CallAddress instruction instead.
1287 FrameScope scope(masm(), StackFrame::MANUAL);
1288 __ PrepareCallCFunction(0, 2, kScratchReg);
1289 __ MovToFloatParameters(i.InputDoubleRegister(0),
1290 i.InputDoubleRegister(1));
1291 // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
1292 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
1293 0, 2);
1294 // Move the result in the double result register.
1295 __ MovFromFloatResult(i.OutputSingleRegister());
1296 break;
1297 }
1298 case kMips64AbsS:
1299 __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1300 break;
1301 case kMips64NegS:
1302 __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1303 break;
1304 case kMips64SqrtS: {
1305 __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1306 break;
1307 }
1308 case kMips64MaxS:
1309 __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1310 i.InputDoubleRegister(1));
1311 break;
1312 case kMips64MinS:
1313 __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1314 i.InputDoubleRegister(1));
1315 break;
1316 case kMips64CmpD:
1317 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
1318 break;
1319 case kMips64AddD:
1320 // TODO(plind): add special case: combine mult & add.
1321 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1322 i.InputDoubleRegister(1));
1323 break;
1324 case kMips64SubD:
1325 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1326 i.InputDoubleRegister(1));
1327 break;
1328 case kMips64MaddS:
1329 __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
1330 i.InputFloatRegister(1), i.InputFloatRegister(2));
1331 break;
1332 case kMips64MaddD:
1333 __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1334 i.InputDoubleRegister(1), i.InputDoubleRegister(2));
1335 break;
1336 case kMips64MaddfS:
1337 __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
1338 i.InputFloatRegister(2));
1339 break;
1340 case kMips64MaddfD:
1341 __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
1342 i.InputDoubleRegister(2));
1343 break;
1344 case kMips64MsubS:
1345 __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
1346 i.InputFloatRegister(1), i.InputFloatRegister(2));
1347 break;
1348 case kMips64MsubD:
1349 __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1350 i.InputDoubleRegister(1), i.InputDoubleRegister(2));
1351 break;
1352 case kMips64MsubfS:
1353 __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
1354 i.InputFloatRegister(2));
1355 break;
1356 case kMips64MsubfD:
1357 __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
1358 i.InputDoubleRegister(2));
1359 break;
1360 case kMips64MulD:
1361 // TODO(plind): add special case: right op is -1.0, see arm port.
1362 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1363 i.InputDoubleRegister(1));
1364 break;
1365 case kMips64DivD:
1366 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1367 i.InputDoubleRegister(1));
1368 break;
1369 case kMips64ModD: {
1370 // TODO(bmeurer): We should really get rid of this special instruction,
1371 // and generate a CallAddress instruction instead.
1372 FrameScope scope(masm(), StackFrame::MANUAL);
1373 __ PrepareCallCFunction(0, 2, kScratchReg);
1374 __ MovToFloatParameters(i.InputDoubleRegister(0),
1375 i.InputDoubleRegister(1));
1376 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
1377 0, 2);
1378 // Move the result in the double result register.
1379 __ MovFromFloatResult(i.OutputDoubleRegister());
1380 break;
1381 }
1382 case kMips64AbsD:
1383 __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1384 break;
1385 case kMips64NegD:
1386 __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1387 break;
1388 case kMips64SqrtD: {
1389 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1390 break;
1391 }
1392 case kMips64MaxD:
1393 __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1394 i.InputDoubleRegister(1));
1395 break;
1396 case kMips64MinD:
1397 __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1398 i.InputDoubleRegister(1));
1399 break;
1400 case kMips64Float64RoundDown: {
1401 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
1402 break;
1403 }
1404 case kMips64Float32RoundDown: {
1405 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
1406 break;
1407 }
1408 case kMips64Float64RoundTruncate: {
1409 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
1410 break;
1411 }
1412 case kMips64Float32RoundTruncate: {
1413 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
1414 break;
1415 }
1416 case kMips64Float64RoundUp: {
1417 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
1418 break;
1419 }
1420 case kMips64Float32RoundUp: {
1421 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
1422 break;
1423 }
1424 case kMips64Float64RoundTiesEven: {
1425 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
1426 break;
1427 }
1428 case kMips64Float32RoundTiesEven: {
1429 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
1430 break;
1431 }
1432 case kMips64Float32Max: {
1433 Label compare_nan, done_compare;
1434 __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
1435 i.InputSingleRegister(1), &compare_nan);
1436 __ Branch(&done_compare);
1437 __ bind(&compare_nan);
1438 __ Move(i.OutputSingleRegister(),
1439 std::numeric_limits<float>::quiet_NaN());
1440 __ bind(&done_compare);
1441 break;
1442 }
1443 case kMips64Float64Max: {
1444 Label compare_nan, done_compare;
1445 __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1446 i.InputDoubleRegister(1), &compare_nan);
1447 __ Branch(&done_compare);
1448 __ bind(&compare_nan);
1449 __ Move(i.OutputDoubleRegister(),
1450 std::numeric_limits<double>::quiet_NaN());
1451 __ bind(&done_compare);
1452 break;
1453 }
1454 case kMips64Float32Min: {
1455 Label compare_nan, done_compare;
1456 __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
1457 i.InputSingleRegister(1), &compare_nan);
1458 __ Branch(&done_compare);
1459 __ bind(&compare_nan);
1460 __ Move(i.OutputSingleRegister(),
1461 std::numeric_limits<float>::quiet_NaN());
1462 __ bind(&done_compare);
1463 break;
1464 }
1465 case kMips64Float64Min: {
1466 Label compare_nan, done_compare;
1467 __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1468 i.InputDoubleRegister(1), &compare_nan);
1469 __ Branch(&done_compare);
1470 __ bind(&compare_nan);
1471 __ Move(i.OutputDoubleRegister(),
1472 std::numeric_limits<double>::quiet_NaN());
1473 __ bind(&done_compare);
1474 break;
1475 }
1476 case kMips64Float64SilenceNaN:
1477 __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1478 break;
1479 case kMips64CvtSD:
1480 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
1481 break;
1482 case kMips64CvtDS:
1483 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
1484 break;
1485 case kMips64CvtDW: {
1486 FPURegister scratch = kScratchDoubleReg;
1487 __ mtc1(i.InputRegister(0), scratch);
1488 __ cvt_d_w(i.OutputDoubleRegister(), scratch);
1489 break;
1490 }
1491 case kMips64CvtSW: {
1492 FPURegister scratch = kScratchDoubleReg;
1493 __ mtc1(i.InputRegister(0), scratch);
1494 __ cvt_s_w(i.OutputDoubleRegister(), scratch);
1495 break;
1496 }
1497 case kMips64CvtSUw: {
1498 __ Cvt_s_uw(i.OutputDoubleRegister(), i.InputRegister(0));
1499 break;
1500 }
1501 case kMips64CvtSL: {
1502 FPURegister scratch = kScratchDoubleReg;
1503 __ dmtc1(i.InputRegister(0), scratch);
1504 __ cvt_s_l(i.OutputDoubleRegister(), scratch);
1505 break;
1506 }
1507 case kMips64CvtDL: {
1508 FPURegister scratch = kScratchDoubleReg;
1509 __ dmtc1(i.InputRegister(0), scratch);
1510 __ cvt_d_l(i.OutputDoubleRegister(), scratch);
1511 break;
1512 }
1513 case kMips64CvtDUw: {
1514 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
1515 break;
1516 }
1517 case kMips64CvtDUl: {
1518 __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
1519 break;
1520 }
1521 case kMips64CvtSUl: {
1522 __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
1523 break;
1524 }
1525 case kMips64FloorWD: {
1526 FPURegister scratch = kScratchDoubleReg;
1527 __ floor_w_d(scratch, i.InputDoubleRegister(0));
1528 __ mfc1(i.OutputRegister(), scratch);
1529 break;
1530 }
1531 case kMips64CeilWD: {
1532 FPURegister scratch = kScratchDoubleReg;
1533 __ ceil_w_d(scratch, i.InputDoubleRegister(0));
1534 __ mfc1(i.OutputRegister(), scratch);
1535 break;
1536 }
1537 case kMips64RoundWD: {
1538 FPURegister scratch = kScratchDoubleReg;
1539 __ round_w_d(scratch, i.InputDoubleRegister(0));
1540 __ mfc1(i.OutputRegister(), scratch);
1541 break;
1542 }
1543 case kMips64TruncWD: {
1544 FPURegister scratch = kScratchDoubleReg;
1545 // Other arches use round to zero here, so we follow.
1546 __ trunc_w_d(scratch, i.InputDoubleRegister(0));
1547 __ mfc1(i.OutputRegister(), scratch);
1548 break;
1549 }
1550 case kMips64FloorWS: {
1551 FPURegister scratch = kScratchDoubleReg;
1552 __ floor_w_s(scratch, i.InputDoubleRegister(0));
1553 __ mfc1(i.OutputRegister(), scratch);
1554 break;
1555 }
1556 case kMips64CeilWS: {
1557 FPURegister scratch = kScratchDoubleReg;
1558 __ ceil_w_s(scratch, i.InputDoubleRegister(0));
1559 __ mfc1(i.OutputRegister(), scratch);
1560 break;
1561 }
1562 case kMips64RoundWS: {
1563 FPURegister scratch = kScratchDoubleReg;
1564 __ round_w_s(scratch, i.InputDoubleRegister(0));
1565 __ mfc1(i.OutputRegister(), scratch);
1566 break;
1567 }
1568 case kMips64TruncWS: {
1569 FPURegister scratch = kScratchDoubleReg;
1570 __ trunc_w_s(scratch, i.InputDoubleRegister(0));
1571 __ mfc1(i.OutputRegister(), scratch);
1572 // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1573 // because INT32_MIN allows easier out-of-bounds detection.
1574 __ addiu(kScratchReg, i.OutputRegister(), 1);
1575 __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
1576 __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
1577 break;
1578 }
1579 case kMips64TruncLS: {
1580 FPURegister scratch = kScratchDoubleReg;
1581 Register tmp_fcsr = kScratchReg;
1582 Register result = kScratchReg2;
1583
1584 bool load_status = instr->OutputCount() > 1;
1585 if (load_status) {
1586 // Save FCSR.
1587 __ cfc1(tmp_fcsr, FCSR);
1588 // Clear FPU flags.
1589 __ ctc1(zero_reg, FCSR);
1590 }
1591 // Other arches use round to zero here, so we follow.
1592 __ trunc_l_s(scratch, i.InputDoubleRegister(0));
1593 __ dmfc1(i.OutputRegister(), scratch);
1594 if (load_status) {
1595 __ cfc1(result, FCSR);
1596 // Check for overflow and NaNs.
1597 __ andi(result, result,
1598 (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
1599 __ Slt(result, zero_reg, result);
1600 __ xori(result, result, 1);
1601 __ mov(i.OutputRegister(1), result);
1602 // Restore FCSR
1603 __ ctc1(tmp_fcsr, FCSR);
1604 }
1605 break;
1606 }
1607 case kMips64TruncLD: {
1608 FPURegister scratch = kScratchDoubleReg;
1609 Register tmp_fcsr = kScratchReg;
1610 Register result = kScratchReg2;
1611
1612 bool load_status = instr->OutputCount() > 1;
1613 if (load_status) {
1614 // Save FCSR.
1615 __ cfc1(tmp_fcsr, FCSR);
1616 // Clear FPU flags.
1617 __ ctc1(zero_reg, FCSR);
1618 }
1619 // Other arches use round to zero here, so we follow.
1620 __ trunc_l_d(scratch, i.InputDoubleRegister(0));
1621 __ dmfc1(i.OutputRegister(0), scratch);
1622 if (load_status) {
1623 __ cfc1(result, FCSR);
1624 // Check for overflow and NaNs.
1625 __ andi(result, result,
1626 (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
1627 __ Slt(result, zero_reg, result);
1628 __ xori(result, result, 1);
1629 __ mov(i.OutputRegister(1), result);
1630 // Restore FCSR
1631 __ ctc1(tmp_fcsr, FCSR);
1632 }
1633 break;
1634 }
1635 case kMips64TruncUwD: {
1636 FPURegister scratch = kScratchDoubleReg;
1637 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
1638 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
1639 break;
1640 }
1641 case kMips64TruncUwS: {
1642 FPURegister scratch = kScratchDoubleReg;
1643 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
1644 __ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
1645 // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1646 // because 0 allows easier out-of-bounds detection.
1647 __ addiu(kScratchReg, i.OutputRegister(), 1);
1648 __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
1649 break;
1650 }
1651 case kMips64TruncUlS: {
1652 FPURegister scratch = kScratchDoubleReg;
1653 Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
1654 // TODO(plind): Fix wrong param order of Trunc_ul_s() macro-asm function.
1655 __ Trunc_ul_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch,
1656 result);
1657 break;
1658 }
1659 case kMips64TruncUlD: {
1660 FPURegister scratch = kScratchDoubleReg;
1661 Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
1662 // TODO(plind): Fix wrong param order of Trunc_ul_d() macro-asm function.
1663 __ Trunc_ul_d(i.InputDoubleRegister(0), i.OutputRegister(0), scratch,
1664 result);
1665 break;
1666 }
1667 case kMips64BitcastDL:
1668 __ dmfc1(i.OutputRegister(), i.InputDoubleRegister(0));
1669 break;
1670 case kMips64BitcastLD:
1671 __ dmtc1(i.InputRegister(0), i.OutputDoubleRegister());
1672 break;
1673 case kMips64Float64ExtractLowWord32:
1674 __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
1675 break;
1676 case kMips64Float64ExtractHighWord32:
1677 __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
1678 break;
1679 case kMips64Float64InsertLowWord32:
1680 __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
1681 break;
1682 case kMips64Float64InsertHighWord32:
1683 __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
1684 break;
1685 // ... more basic instructions ...
1686
1687 case kMips64Seb:
1688 __ seb(i.OutputRegister(), i.InputRegister(0));
1689 break;
1690 case kMips64Seh:
1691 __ seh(i.OutputRegister(), i.InputRegister(0));
1692 break;
1693 case kMips64Lbu:
1694 __ lbu(i.OutputRegister(), i.MemoryOperand());
1695 break;
1696 case kMips64Lb:
1697 __ lb(i.OutputRegister(), i.MemoryOperand());
1698 break;
1699 case kMips64Sb:
1700 __ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
1701 break;
1702 case kMips64Lhu:
1703 __ lhu(i.OutputRegister(), i.MemoryOperand());
1704 break;
1705 case kMips64Ulhu:
1706 __ Ulhu(i.OutputRegister(), i.MemoryOperand());
1707 break;
1708 case kMips64Lh:
1709 __ lh(i.OutputRegister(), i.MemoryOperand());
1710 break;
1711 case kMips64Ulh:
1712 __ Ulh(i.OutputRegister(), i.MemoryOperand());
1713 break;
1714 case kMips64Sh:
1715 __ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
1716 break;
1717 case kMips64Ush:
1718 __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
1719 break;
1720 case kMips64Lw:
1721 __ lw(i.OutputRegister(), i.MemoryOperand());
1722 break;
1723 case kMips64Ulw:
1724 __ Ulw(i.OutputRegister(), i.MemoryOperand());
1725 break;
1726 case kMips64Lwu:
1727 __ lwu(i.OutputRegister(), i.MemoryOperand());
1728 break;
1729 case kMips64Ulwu:
1730 __ Ulwu(i.OutputRegister(), i.MemoryOperand());
1731 break;
1732 case kMips64Ld:
1733 __ ld(i.OutputRegister(), i.MemoryOperand());
1734 break;
1735 case kMips64Uld:
1736 __ Uld(i.OutputRegister(), i.MemoryOperand());
1737 break;
1738 case kMips64Sw:
1739 __ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
1740 break;
1741 case kMips64Usw:
1742 __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
1743 break;
1744 case kMips64Sd:
1745 __ sd(i.InputOrZeroRegister(2), i.MemoryOperand());
1746 break;
1747 case kMips64Usd:
1748 __ Usd(i.InputOrZeroRegister(2), i.MemoryOperand());
1749 break;
1750 case kMips64Lwc1: {
1751 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
1752 break;
1753 }
1754 case kMips64Ulwc1: {
1755 __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
1756 break;
1757 }
1758 case kMips64Swc1: {
1759 size_t index = 0;
1760 MemOperand operand = i.MemoryOperand(&index);
1761 FPURegister ft = i.InputOrZeroSingleRegister(index);
1762 if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
1763 __ Move(kDoubleRegZero, 0.0);
1764 }
1765 __ swc1(ft, operand);
1766 break;
1767 }
1768 case kMips64Uswc1: {
1769 size_t index = 0;
1770 MemOperand operand = i.MemoryOperand(&index);
1771 FPURegister ft = i.InputOrZeroSingleRegister(index);
1772 if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
1773 __ Move(kDoubleRegZero, 0.0);
1774 }
1775 __ Uswc1(ft, operand, kScratchReg);
1776 break;
1777 }
1778 case kMips64Ldc1:
1779 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
1780 break;
1781 case kMips64Uldc1:
1782 __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
1783 break;
1784 case kMips64Sdc1: {
1785 FPURegister ft = i.InputOrZeroDoubleRegister(2);
1786 if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
1787 __ Move(kDoubleRegZero, 0.0);
1788 }
1789 __ sdc1(ft, i.MemoryOperand());
1790 break;
1791 }
1792 case kMips64Usdc1: {
1793 FPURegister ft = i.InputOrZeroDoubleRegister(2);
1794 if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
1795 __ Move(kDoubleRegZero, 0.0);
1796 }
1797 __ Usdc1(ft, i.MemoryOperand(), kScratchReg);
1798 break;
1799 }
1800 case kMips64Push:
1801 if (instr->InputAt(0)->IsFPRegister()) {
1802 __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1803 __ Subu(sp, sp, Operand(kDoubleSize));
1804 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1805 } else {
1806 __ Push(i.InputRegister(0));
1807 frame_access_state()->IncreaseSPDelta(1);
1808 }
1809 break;
1810 case kMips64StackClaim: {
1811 __ Dsubu(sp, sp, Operand(i.InputInt32(0)));
1812 frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
1813 break;
1814 }
1815 case kMips64StoreToStackSlot: {
1816 if (instr->InputAt(0)->IsFPRegister()) {
1817 __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
1818 } else {
1819 __ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
1820 }
1821 break;
1822 }
1823 case kMips64ByteSwap64: {
1824 __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8);
1825 break;
1826 }
1827 case kMips64ByteSwap32: {
1828 __ ByteSwapUnsigned(i.OutputRegister(0), i.InputRegister(0), 4);
1829 __ dsrl32(i.OutputRegister(0), i.OutputRegister(0), 0);
1830 break;
1831 }
1832 case kCheckedLoadInt8:
1833 ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
1834 break;
1835 case kCheckedLoadUint8:
1836 ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
1837 break;
1838 case kCheckedLoadInt16:
1839 ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
1840 break;
1841 case kCheckedLoadUint16:
1842 ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
1843 break;
1844 case kCheckedLoadWord32:
1845 ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
1846 break;
1847 case kCheckedLoadWord64:
1848 ASSEMBLE_CHECKED_LOAD_INTEGER(ld);
1849 break;
1850 case kCheckedLoadFloat32:
1851 ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
1852 break;
1853 case kCheckedLoadFloat64:
1854 ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
1855 break;
1856 case kCheckedStoreWord8:
1857 ASSEMBLE_CHECKED_STORE_INTEGER(sb);
1858 break;
1859 case kCheckedStoreWord16:
1860 ASSEMBLE_CHECKED_STORE_INTEGER(sh);
1861 break;
1862 case kCheckedStoreWord32:
1863 ASSEMBLE_CHECKED_STORE_INTEGER(sw);
1864 break;
1865 case kCheckedStoreWord64:
1866 ASSEMBLE_CHECKED_STORE_INTEGER(sd);
1867 break;
1868 case kCheckedStoreFloat32:
1869 ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
1870 break;
1871 case kCheckedStoreFloat64:
1872 ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
1873 break;
1874 case kAtomicLoadInt8:
1875 ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
1876 break;
1877 case kAtomicLoadUint8:
1878 ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
1879 break;
1880 case kAtomicLoadInt16:
1881 ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
1882 break;
1883 case kAtomicLoadUint16:
1884 ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
1885 break;
1886 case kAtomicLoadWord32:
1887 ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
1888 break;
1889 case kAtomicStoreWord8:
1890 ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
1891 break;
1892 case kAtomicStoreWord16:
1893 ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
1894 break;
1895 case kAtomicStoreWord32:
1896 ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
1897 break;
1898 case kMips64AssertEqual:
1899 __ Assert(eq, static_cast<BailoutReason>(i.InputOperand(2).immediate()),
1900 i.InputRegister(0), Operand(i.InputRegister(1)));
1901 break;
1902 }
1903 return kSuccess;
1904 } // NOLINT(readability/fn_size)
1905
1906
1907 #define UNSUPPORTED_COND(opcode, condition) \
1908 OFStream out(stdout); \
1909 out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
1910 UNIMPLEMENTED();
1911
convertCondition(FlagsCondition condition,Condition & cc)1912 static bool convertCondition(FlagsCondition condition, Condition& cc) {
1913 switch (condition) {
1914 case kEqual:
1915 cc = eq;
1916 return true;
1917 case kNotEqual:
1918 cc = ne;
1919 return true;
1920 case kUnsignedLessThan:
1921 cc = lt;
1922 return true;
1923 case kUnsignedGreaterThanOrEqual:
1924 cc = uge;
1925 return true;
1926 case kUnsignedLessThanOrEqual:
1927 cc = le;
1928 return true;
1929 case kUnsignedGreaterThan:
1930 cc = ugt;
1931 return true;
1932 default:
1933 break;
1934 }
1935 return false;
1936 }
1937
1938
1939 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)1940 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1941 MipsOperandConverter i(this, instr);
1942 Label* tlabel = branch->true_label;
1943 Label* flabel = branch->false_label;
1944 Condition cc = kNoCondition;
1945 // MIPS does not have condition code flags, so compare and branch are
1946 // implemented differently than on the other arch's. The compare operations
1947 // emit mips psuedo-instructions, which are handled here by branch
1948 // instructions that do the actual comparison. Essential that the input
1949 // registers to compare pseudo-op are not modified before this branch op, as
1950 // they are tested here.
1951
1952 if (instr->arch_opcode() == kMips64Tst) {
1953 cc = FlagsConditionToConditionTst(branch->condition);
1954 __ And(at, i.InputRegister(0), i.InputOperand(1));
1955 __ Branch(tlabel, cc, at, Operand(zero_reg));
1956 } else if (instr->arch_opcode() == kMips64Dadd ||
1957 instr->arch_opcode() == kMips64Dsub) {
1958 cc = FlagsConditionToConditionOvf(branch->condition);
1959 __ dsra32(kScratchReg, i.OutputRegister(), 0);
1960 __ sra(at, i.OutputRegister(), 31);
1961 __ Branch(tlabel, cc, at, Operand(kScratchReg));
1962 } else if (instr->arch_opcode() == kMips64DaddOvf) {
1963 switch (branch->condition) {
1964 case kOverflow:
1965 __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
1966 i.InputOperand(1), tlabel, flabel);
1967 break;
1968 case kNotOverflow:
1969 __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
1970 i.InputOperand(1), flabel, tlabel);
1971 break;
1972 default:
1973 UNSUPPORTED_COND(kMips64DaddOvf, branch->condition);
1974 break;
1975 }
1976 } else if (instr->arch_opcode() == kMips64DsubOvf) {
1977 switch (branch->condition) {
1978 case kOverflow:
1979 __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
1980 i.InputOperand(1), tlabel, flabel);
1981 break;
1982 case kNotOverflow:
1983 __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
1984 i.InputOperand(1), flabel, tlabel);
1985 break;
1986 default:
1987 UNSUPPORTED_COND(kMips64DsubOvf, branch->condition);
1988 break;
1989 }
1990 } else if (instr->arch_opcode() == kMips64MulOvf) {
1991 switch (branch->condition) {
1992 case kOverflow: {
1993 __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
1994 i.InputOperand(1), tlabel, flabel, kScratchReg);
1995 } break;
1996 case kNotOverflow: {
1997 __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
1998 i.InputOperand(1), flabel, tlabel, kScratchReg);
1999 } break;
2000 default:
2001 UNSUPPORTED_COND(kMips64MulOvf, branch->condition);
2002 break;
2003 }
2004 } else if (instr->arch_opcode() == kMips64Cmp) {
2005 cc = FlagsConditionToConditionCmp(branch->condition);
2006 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
2007 } else if (instr->arch_opcode() == kMips64CmpS) {
2008 if (!convertCondition(branch->condition, cc)) {
2009 UNSUPPORTED_COND(kMips64CmpS, branch->condition);
2010 }
2011 FPURegister left = i.InputOrZeroSingleRegister(0);
2012 FPURegister right = i.InputOrZeroSingleRegister(1);
2013 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
2014 !__ IsDoubleZeroRegSet()) {
2015 __ Move(kDoubleRegZero, 0.0);
2016 }
2017 __ BranchF32(tlabel, nullptr, cc, left, right);
2018 } else if (instr->arch_opcode() == kMips64CmpD) {
2019 if (!convertCondition(branch->condition, cc)) {
2020 UNSUPPORTED_COND(kMips64CmpD, branch->condition);
2021 }
2022 FPURegister left = i.InputOrZeroDoubleRegister(0);
2023 FPURegister right = i.InputOrZeroDoubleRegister(1);
2024 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
2025 !__ IsDoubleZeroRegSet()) {
2026 __ Move(kDoubleRegZero, 0.0);
2027 }
2028 __ BranchF64(tlabel, nullptr, cc, left, right);
2029 } else {
2030 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
2031 instr->arch_opcode());
2032 UNIMPLEMENTED();
2033 }
2034 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
2035 }
2036
2037
AssembleArchJump(RpoNumber target)2038 void CodeGenerator::AssembleArchJump(RpoNumber target) {
2039 if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
2040 }
2041
2042
2043 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)2044 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
2045 FlagsCondition condition) {
2046 MipsOperandConverter i(this, instr);
2047 Label done;
2048
2049 // Materialize a full 32-bit 1 or 0 value. The result register is always the
2050 // last output of the instruction.
2051 Label false_value;
2052 DCHECK_NE(0u, instr->OutputCount());
2053 Register result = i.OutputRegister(instr->OutputCount() - 1);
2054 Condition cc = kNoCondition;
2055 // MIPS does not have condition code flags, so compare and branch are
2056 // implemented differently than on the other arch's. The compare operations
2057 // emit mips pseudo-instructions, which are checked and handled here.
2058
2059 if (instr->arch_opcode() == kMips64Tst) {
2060 cc = FlagsConditionToConditionTst(condition);
2061 if (instr->InputAt(1)->IsImmediate() &&
2062 base::bits::IsPowerOfTwo64(i.InputOperand(1).immediate())) {
2063 uint16_t pos =
2064 base::bits::CountTrailingZeros64(i.InputOperand(1).immediate());
2065 __ ExtractBits(result, i.InputRegister(0), pos, 1);
2066 } else {
2067 __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
2068 __ Sltu(result, zero_reg, kScratchReg);
2069 }
2070 if (cc == eq) {
2071 // Sltu produces 0 for equality, invert the result.
2072 __ xori(result, result, 1);
2073 }
2074 return;
2075 } else if (instr->arch_opcode() == kMips64Dadd ||
2076 instr->arch_opcode() == kMips64Dsub) {
2077 cc = FlagsConditionToConditionOvf(condition);
2078 // Check for overflow creates 1 or 0 for result.
2079 __ dsrl32(kScratchReg, i.OutputRegister(), 31);
2080 __ srl(at, i.OutputRegister(), 31);
2081 __ xor_(result, kScratchReg, at);
2082 if (cc == eq) // Toggle result for not overflow.
2083 __ xori(result, result, 1);
2084 return;
2085 } else if (instr->arch_opcode() == kMips64DaddOvf ||
2086 instr->arch_opcode() == kMips64DsubOvf ||
2087 instr->arch_opcode() == kMips64MulOvf) {
2088 Label flabel, tlabel;
2089 switch (instr->arch_opcode()) {
2090 case kMips64DaddOvf:
2091 __ DaddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
2092 i.InputOperand(1), &flabel);
2093
2094 break;
2095 case kMips64DsubOvf:
2096 __ DsubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
2097 i.InputOperand(1), &flabel);
2098 break;
2099 case kMips64MulOvf:
2100 __ MulBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
2101 i.InputOperand(1), &flabel, kScratchReg);
2102 break;
2103 default:
2104 UNREACHABLE();
2105 break;
2106 }
2107 __ li(result, 1);
2108 __ Branch(&tlabel);
2109 __ bind(&flabel);
2110 __ li(result, 0);
2111 __ bind(&tlabel);
2112 } else if (instr->arch_opcode() == kMips64Cmp) {
2113 cc = FlagsConditionToConditionCmp(condition);
2114 switch (cc) {
2115 case eq:
2116 case ne: {
2117 Register left = i.InputRegister(0);
2118 Operand right = i.InputOperand(1);
2119 Register select;
2120 if (instr->InputAt(1)->IsImmediate() && right.immediate() == 0) {
2121 // Pass left operand if right is zero.
2122 select = left;
2123 } else {
2124 __ Dsubu(kScratchReg, left, right);
2125 select = kScratchReg;
2126 }
2127 __ Sltu(result, zero_reg, select);
2128 if (cc == eq) {
2129 // Sltu produces 0 for equality, invert the result.
2130 __ xori(result, result, 1);
2131 }
2132 } break;
2133 case lt:
2134 case ge: {
2135 Register left = i.InputRegister(0);
2136 Operand right = i.InputOperand(1);
2137 __ Slt(result, left, right);
2138 if (cc == ge) {
2139 __ xori(result, result, 1);
2140 }
2141 } break;
2142 case gt:
2143 case le: {
2144 Register left = i.InputRegister(1);
2145 Operand right = i.InputOperand(0);
2146 __ Slt(result, left, right);
2147 if (cc == le) {
2148 __ xori(result, result, 1);
2149 }
2150 } break;
2151 case lo:
2152 case hs: {
2153 Register left = i.InputRegister(0);
2154 Operand right = i.InputOperand(1);
2155 __ Sltu(result, left, right);
2156 if (cc == hs) {
2157 __ xori(result, result, 1);
2158 }
2159 } break;
2160 case hi:
2161 case ls: {
2162 Register left = i.InputRegister(1);
2163 Operand right = i.InputOperand(0);
2164 __ Sltu(result, left, right);
2165 if (cc == ls) {
2166 __ xori(result, result, 1);
2167 }
2168 } break;
2169 default:
2170 UNREACHABLE();
2171 }
2172 return;
2173 } else if (instr->arch_opcode() == kMips64CmpD ||
2174 instr->arch_opcode() == kMips64CmpS) {
2175 FPURegister left = i.InputOrZeroDoubleRegister(0);
2176 FPURegister right = i.InputOrZeroDoubleRegister(1);
2177 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
2178 !__ IsDoubleZeroRegSet()) {
2179 __ Move(kDoubleRegZero, 0.0);
2180 }
2181 bool predicate;
2182 FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
2183 if (kArchVariant != kMips64r6) {
2184 __ li(result, Operand(1));
2185 if (instr->arch_opcode() == kMips64CmpD) {
2186 __ c(cc, D, left, right);
2187 } else {
2188 DCHECK(instr->arch_opcode() == kMips64CmpS);
2189 __ c(cc, S, left, right);
2190 }
2191 if (predicate) {
2192 __ Movf(result, zero_reg);
2193 } else {
2194 __ Movt(result, zero_reg);
2195 }
2196 } else {
2197 if (instr->arch_opcode() == kMips64CmpD) {
2198 __ cmp(cc, L, kDoubleCompareReg, left, right);
2199 } else {
2200 DCHECK(instr->arch_opcode() == kMips64CmpS);
2201 __ cmp(cc, W, kDoubleCompareReg, left, right);
2202 }
2203 __ dmfc1(result, kDoubleCompareReg);
2204 __ andi(result, result, 1); // Cmp returns all 1's/0's, use only LSB.
2205
2206 if (!predicate) // Toggle result for not equal.
2207 __ xori(result, result, 1);
2208 }
2209 return;
2210 } else {
2211 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
2212 instr->arch_opcode());
2213 TRACE_UNIMPL();
2214 UNIMPLEMENTED();
2215 }
2216 }
2217
2218
AssembleArchLookupSwitch(Instruction * instr)2219 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
2220 MipsOperandConverter i(this, instr);
2221 Register input = i.InputRegister(0);
2222 for (size_t index = 2; index < instr->InputCount(); index += 2) {
2223 __ li(at, Operand(i.InputInt32(index + 0)));
2224 __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
2225 }
2226 __ nop(); // Branch delay slot of the last beq.
2227 AssembleArchJump(i.InputRpo(1));
2228 }
2229
AssembleArchTableSwitch(Instruction * instr)2230 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
2231 MipsOperandConverter i(this, instr);
2232 Register input = i.InputRegister(0);
2233 size_t const case_count = instr->InputCount() - 2;
2234
2235 __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
2236 __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
2237 return GetLabel(i.InputRpo(index + 2));
2238 });
2239 }
2240
AssembleDeoptimizerCall(int deoptimization_id,Deoptimizer::BailoutType bailout_type,SourcePosition pos)2241 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
2242 int deoptimization_id, Deoptimizer::BailoutType bailout_type,
2243 SourcePosition pos) {
2244 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
2245 isolate(), deoptimization_id, bailout_type);
2246 if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
2247 DeoptimizeReason deoptimization_reason =
2248 GetDeoptimizationReason(deoptimization_id);
2249 __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
2250 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
2251 return kSuccess;
2252 }
2253
FinishFrame(Frame * frame)2254 void CodeGenerator::FinishFrame(Frame* frame) {
2255 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2256
2257 const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
2258 if (saves_fpu != 0) {
2259 int count = base::bits::CountPopulation32(saves_fpu);
2260 DCHECK(kNumCalleeSavedFPU == count);
2261 frame->AllocateSavedCalleeRegisterSlots(count *
2262 (kDoubleSize / kPointerSize));
2263 }
2264
2265 const RegList saves = descriptor->CalleeSavedRegisters();
2266 if (saves != 0) {
2267 int count = base::bits::CountPopulation32(saves);
2268 DCHECK(kNumCalleeSaved == count + 1);
2269 frame->AllocateSavedCalleeRegisterSlots(count);
2270 }
2271 }
2272
AssembleConstructFrame()2273 void CodeGenerator::AssembleConstructFrame() {
2274 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2275 if (frame_access_state()->has_frame()) {
2276 if (descriptor->IsCFunctionCall()) {
2277 __ Push(ra, fp);
2278 __ mov(fp, sp);
2279 } else if (descriptor->IsJSFunctionCall()) {
2280 __ Prologue(this->info()->GeneratePreagedPrologue());
2281 if (descriptor->PushArgumentCount()) {
2282 __ Push(kJavaScriptCallArgCountRegister);
2283 }
2284 } else {
2285 __ StubPrologue(info()->GetOutputStackFrameType());
2286 }
2287 }
2288
2289 int shrink_slots =
2290 frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
2291
2292 if (info()->is_osr()) {
2293 // TurboFan OSR-compiled functions cannot be entered directly.
2294 __ Abort(kShouldNotDirectlyEnterOsrFunction);
2295
2296 // Unoptimized code jumps directly to this entrypoint while the unoptimized
2297 // frame is still on the stack. Optimized code uses OSR values directly from
2298 // the unoptimized frame. Thus, all that needs to be done is to allocate the
2299 // remaining stack slots.
2300 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
2301 osr_pc_offset_ = __ pc_offset();
2302 shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
2303 }
2304
2305 if (shrink_slots > 0) {
2306 __ Dsubu(sp, sp, Operand(shrink_slots * kPointerSize));
2307 }
2308
2309 const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
2310 if (saves_fpu != 0) {
2311 // Save callee-saved FPU registers.
2312 __ MultiPushFPU(saves_fpu);
2313 DCHECK(kNumCalleeSavedFPU == base::bits::CountPopulation32(saves_fpu));
2314 }
2315
2316 const RegList saves = descriptor->CalleeSavedRegisters();
2317 if (saves != 0) {
2318 // Save callee-saved registers.
2319 __ MultiPush(saves);
2320 DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
2321 }
2322 }
2323
AssembleReturn(InstructionOperand * pop)2324 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
2325 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2326
2327 // Restore GP registers.
2328 const RegList saves = descriptor->CalleeSavedRegisters();
2329 if (saves != 0) {
2330 __ MultiPop(saves);
2331 }
2332
2333 // Restore FPU registers.
2334 const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
2335 if (saves_fpu != 0) {
2336 __ MultiPopFPU(saves_fpu);
2337 }
2338
2339 MipsOperandConverter g(this, nullptr);
2340 if (descriptor->IsCFunctionCall()) {
2341 AssembleDeconstructFrame();
2342 } else if (frame_access_state()->has_frame()) {
2343 // Canonicalize JSFunction return sites for now unless they have an variable
2344 // number of stack slot pops.
2345 if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
2346 if (return_label_.is_bound()) {
2347 __ Branch(&return_label_);
2348 return;
2349 } else {
2350 __ bind(&return_label_);
2351 AssembleDeconstructFrame();
2352 }
2353 } else {
2354 AssembleDeconstructFrame();
2355 }
2356 }
2357 int pop_count = static_cast<int>(descriptor->StackParameterCount());
2358 if (pop->IsImmediate()) {
2359 DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
2360 pop_count += g.ToConstant(pop).ToInt32();
2361 } else {
2362 Register pop_reg = g.ToRegister(pop);
2363 __ dsll(pop_reg, pop_reg, kPointerSizeLog2);
2364 __ Daddu(sp, sp, pop_reg);
2365 }
2366 if (pop_count != 0) {
2367 __ DropAndRet(pop_count);
2368 } else {
2369 __ Ret();
2370 }
2371 }
2372
2373
AssembleMove(InstructionOperand * source,InstructionOperand * destination)2374 void CodeGenerator::AssembleMove(InstructionOperand* source,
2375 InstructionOperand* destination) {
2376 MipsOperandConverter g(this, nullptr);
2377 // Dispatch on the source and destination operand kinds. Not all
2378 // combinations are possible.
2379 if (source->IsRegister()) {
2380 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2381 Register src = g.ToRegister(source);
2382 if (destination->IsRegister()) {
2383 __ mov(g.ToRegister(destination), src);
2384 } else {
2385 __ sd(src, g.ToMemOperand(destination));
2386 }
2387 } else if (source->IsStackSlot()) {
2388 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2389 MemOperand src = g.ToMemOperand(source);
2390 if (destination->IsRegister()) {
2391 __ ld(g.ToRegister(destination), src);
2392 } else {
2393 Register temp = kScratchReg;
2394 __ ld(temp, src);
2395 __ sd(temp, g.ToMemOperand(destination));
2396 }
2397 } else if (source->IsConstant()) {
2398 Constant src = g.ToConstant(source);
2399 if (destination->IsRegister() || destination->IsStackSlot()) {
2400 Register dst =
2401 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
2402 switch (src.type()) {
2403 case Constant::kInt32:
2404 if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
2405 __ li(dst, Operand(src.ToInt32(), src.rmode()));
2406 } else {
2407 __ li(dst, Operand(src.ToInt32()));
2408 }
2409 break;
2410 case Constant::kFloat32:
2411 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
2412 break;
2413 case Constant::kInt64:
2414 if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
2415 src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
2416 __ li(dst, Operand(src.ToInt64(), src.rmode()));
2417 } else {
2418 DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
2419 __ li(dst, Operand(src.ToInt64()));
2420 }
2421 break;
2422 case Constant::kFloat64:
2423 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
2424 break;
2425 case Constant::kExternalReference:
2426 __ li(dst, Operand(src.ToExternalReference()));
2427 break;
2428 case Constant::kHeapObject: {
2429 Handle<HeapObject> src_object = src.ToHeapObject();
2430 Heap::RootListIndex index;
2431 if (IsMaterializableFromRoot(src_object, &index)) {
2432 __ LoadRoot(dst, index);
2433 } else {
2434 __ li(dst, src_object);
2435 }
2436 break;
2437 }
2438 case Constant::kRpoNumber:
2439 UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
2440 break;
2441 }
2442 if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
2443 } else if (src.type() == Constant::kFloat32) {
2444 if (destination->IsFPStackSlot()) {
2445 MemOperand dst = g.ToMemOperand(destination);
2446 if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
2447 __ sw(zero_reg, dst);
2448 } else {
2449 __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
2450 __ sw(at, dst);
2451 }
2452 } else {
2453 DCHECK(destination->IsFPRegister());
2454 FloatRegister dst = g.ToSingleRegister(destination);
2455 __ Move(dst, src.ToFloat32());
2456 }
2457 } else {
2458 DCHECK_EQ(Constant::kFloat64, src.type());
2459 DoubleRegister dst = destination->IsFPRegister()
2460 ? g.ToDoubleRegister(destination)
2461 : kScratchDoubleReg;
2462 __ Move(dst, src.ToFloat64());
2463 if (destination->IsFPStackSlot()) {
2464 __ sdc1(dst, g.ToMemOperand(destination));
2465 }
2466 }
2467 } else if (source->IsFPRegister()) {
2468 FPURegister src = g.ToDoubleRegister(source);
2469 if (destination->IsFPRegister()) {
2470 FPURegister dst = g.ToDoubleRegister(destination);
2471 __ Move(dst, src);
2472 } else {
2473 DCHECK(destination->IsFPStackSlot());
2474 __ sdc1(src, g.ToMemOperand(destination));
2475 }
2476 } else if (source->IsFPStackSlot()) {
2477 DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
2478 MemOperand src = g.ToMemOperand(source);
2479 if (destination->IsFPRegister()) {
2480 __ ldc1(g.ToDoubleRegister(destination), src);
2481 } else {
2482 FPURegister temp = kScratchDoubleReg;
2483 __ ldc1(temp, src);
2484 __ sdc1(temp, g.ToMemOperand(destination));
2485 }
2486 } else {
2487 UNREACHABLE();
2488 }
2489 }
2490
2491
AssembleSwap(InstructionOperand * source,InstructionOperand * destination)2492 void CodeGenerator::AssembleSwap(InstructionOperand* source,
2493 InstructionOperand* destination) {
2494 MipsOperandConverter g(this, nullptr);
2495 // Dispatch on the source and destination operand kinds. Not all
2496 // combinations are possible.
2497 if (source->IsRegister()) {
2498 // Register-register.
2499 Register temp = kScratchReg;
2500 Register src = g.ToRegister(source);
2501 if (destination->IsRegister()) {
2502 Register dst = g.ToRegister(destination);
2503 __ Move(temp, src);
2504 __ Move(src, dst);
2505 __ Move(dst, temp);
2506 } else {
2507 DCHECK(destination->IsStackSlot());
2508 MemOperand dst = g.ToMemOperand(destination);
2509 __ mov(temp, src);
2510 __ ld(src, dst);
2511 __ sd(temp, dst);
2512 }
2513 } else if (source->IsStackSlot()) {
2514 DCHECK(destination->IsStackSlot());
2515 Register temp_0 = kScratchReg;
2516 Register temp_1 = kScratchReg2;
2517 MemOperand src = g.ToMemOperand(source);
2518 MemOperand dst = g.ToMemOperand(destination);
2519 __ ld(temp_0, src);
2520 __ ld(temp_1, dst);
2521 __ sd(temp_0, dst);
2522 __ sd(temp_1, src);
2523 } else if (source->IsFPRegister()) {
2524 FPURegister temp = kScratchDoubleReg;
2525 FPURegister src = g.ToDoubleRegister(source);
2526 if (destination->IsFPRegister()) {
2527 FPURegister dst = g.ToDoubleRegister(destination);
2528 __ Move(temp, src);
2529 __ Move(src, dst);
2530 __ Move(dst, temp);
2531 } else {
2532 DCHECK(destination->IsFPStackSlot());
2533 MemOperand dst = g.ToMemOperand(destination);
2534 __ Move(temp, src);
2535 __ ldc1(src, dst);
2536 __ sdc1(temp, dst);
2537 }
2538 } else if (source->IsFPStackSlot()) {
2539 DCHECK(destination->IsFPStackSlot());
2540 Register temp_0 = kScratchReg;
2541 FPURegister temp_1 = kScratchDoubleReg;
2542 MemOperand src0 = g.ToMemOperand(source);
2543 MemOperand src1(src0.rm(), src0.offset() + kIntSize);
2544 MemOperand dst0 = g.ToMemOperand(destination);
2545 MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
2546 __ ldc1(temp_1, dst0); // Save destination in temp_1.
2547 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
2548 __ sw(temp_0, dst0);
2549 __ lw(temp_0, src1);
2550 __ sw(temp_0, dst1);
2551 __ sdc1(temp_1, src0);
2552 } else {
2553 // No other combinations are possible.
2554 UNREACHABLE();
2555 }
2556 }
2557
2558
AssembleJumpTable(Label ** targets,size_t target_count)2559 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2560 // On 64-bit MIPS we emit the jump tables inline.
2561 UNREACHABLE();
2562 }
2563
2564
EnsureSpaceForLazyDeopt()2565 void CodeGenerator::EnsureSpaceForLazyDeopt() {
2566 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2567 return;
2568 }
2569
2570 int space_needed = Deoptimizer::patch_size();
2571 // Ensure that we have enough space after the previous lazy-bailout
2572 // instruction for patching the code here.
2573 int current_pc = masm()->pc_offset();
2574 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
2575 // Block tramoline pool emission for duration of padding.
2576 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
2577 masm());
2578 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2579 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
2580 while (padding_size > 0) {
2581 __ nop();
2582 padding_size -= v8::internal::Assembler::kInstrSize;
2583 }
2584 }
2585 }
2586
2587 #undef __
2588
2589 } // namespace compiler
2590 } // namespace internal
2591 } // namespace v8
2592