1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6 #include "src/compilation-info.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/osr.h"
11 #include "src/mips/macro-assembler-mips.h"
12
13 namespace v8 {
14 namespace internal {
15 namespace compiler {
16
17 #define __ masm()->
18
19
20 // TODO(plind): Possibly avoid using these lithium names.
21 #define kScratchReg kLithiumScratchReg
22 #define kCompareReg kLithiumScratchReg2
23 #define kScratchReg2 kLithiumScratchReg2
24 #define kScratchDoubleReg kLithiumScratchDouble
25
26
27 // TODO(plind): consider renaming these macros.
28 #define TRACE_MSG(msg) \
29 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
30 __LINE__)
31
32 #define TRACE_UNIMPL() \
33 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
34 __LINE__)
35
36
37 // Adds Mips-specific methods to convert InstructionOperands.
38 class MipsOperandConverter final : public InstructionOperandConverter {
39 public:
MipsOperandConverter(CodeGenerator * gen,Instruction * instr)40 MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
41 : InstructionOperandConverter(gen, instr) {}
42
OutputSingleRegister(size_t index=0)43 FloatRegister OutputSingleRegister(size_t index = 0) {
44 return ToSingleRegister(instr_->OutputAt(index));
45 }
46
InputSingleRegister(size_t index)47 FloatRegister InputSingleRegister(size_t index) {
48 return ToSingleRegister(instr_->InputAt(index));
49 }
50
ToSingleRegister(InstructionOperand * op)51 FloatRegister ToSingleRegister(InstructionOperand* op) {
52 // Single (Float) and Double register namespace is same on MIPS,
53 // both are typedefs of FPURegister.
54 return ToDoubleRegister(op);
55 }
56
InputOrZeroRegister(size_t index)57 Register InputOrZeroRegister(size_t index) {
58 if (instr_->InputAt(index)->IsImmediate()) {
59 DCHECK((InputInt32(index) == 0));
60 return zero_reg;
61 }
62 return InputRegister(index);
63 }
64
InputOrZeroDoubleRegister(size_t index)65 DoubleRegister InputOrZeroDoubleRegister(size_t index) {
66 if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
67
68 return InputDoubleRegister(index);
69 }
70
InputOrZeroSingleRegister(size_t index)71 DoubleRegister InputOrZeroSingleRegister(size_t index) {
72 if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
73
74 return InputSingleRegister(index);
75 }
76
InputImmediate(size_t index)77 Operand InputImmediate(size_t index) {
78 Constant constant = ToConstant(instr_->InputAt(index));
79 switch (constant.type()) {
80 case Constant::kInt32:
81 return Operand(constant.ToInt32());
82 case Constant::kFloat32:
83 return Operand(
84 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
85 case Constant::kFloat64:
86 return Operand(
87 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
88 case Constant::kInt64:
89 case Constant::kExternalReference:
90 case Constant::kHeapObject:
91 // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
92 // maybe not done on arm due to const pool ??
93 break;
94 case Constant::kRpoNumber:
95 UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
96 break;
97 }
98 UNREACHABLE();
99 return Operand(zero_reg);
100 }
101
InputOperand(size_t index)102 Operand InputOperand(size_t index) {
103 InstructionOperand* op = instr_->InputAt(index);
104 if (op->IsRegister()) {
105 return Operand(ToRegister(op));
106 }
107 return InputImmediate(index);
108 }
109
MemoryOperand(size_t * first_index)110 MemOperand MemoryOperand(size_t* first_index) {
111 const size_t index = *first_index;
112 switch (AddressingModeField::decode(instr_->opcode())) {
113 case kMode_None:
114 break;
115 case kMode_MRI:
116 *first_index += 2;
117 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
118 case kMode_MRR:
119 // TODO(plind): r6 address mode, to be implemented ...
120 UNREACHABLE();
121 }
122 UNREACHABLE();
123 return MemOperand(no_reg);
124 }
125
MemoryOperand(size_t index=0)126 MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
127
ToMemOperand(InstructionOperand * op) const128 MemOperand ToMemOperand(InstructionOperand* op) const {
129 DCHECK_NOT_NULL(op);
130 DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
131 return SlotToMemOperand(AllocatedOperand::cast(op)->index());
132 }
133
SlotToMemOperand(int slot) const134 MemOperand SlotToMemOperand(int slot) const {
135 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
136 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
137 }
138 };
139
140
HasRegisterInput(Instruction * instr,size_t index)141 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
142 return instr->InputAt(index)->IsRegister();
143 }
144
145
146 namespace {
147
148 class OutOfLineLoadSingle final : public OutOfLineCode {
149 public:
OutOfLineLoadSingle(CodeGenerator * gen,FloatRegister result)150 OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
151 : OutOfLineCode(gen), result_(result) {}
152
Generate()153 void Generate() final {
154 __ Move(result_, std::numeric_limits<float>::quiet_NaN());
155 }
156
157 private:
158 FloatRegister const result_;
159 };
160
161
162 class OutOfLineLoadDouble final : public OutOfLineCode {
163 public:
OutOfLineLoadDouble(CodeGenerator * gen,DoubleRegister result)164 OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
165 : OutOfLineCode(gen), result_(result) {}
166
Generate()167 void Generate() final {
168 __ Move(result_, std::numeric_limits<double>::quiet_NaN());
169 }
170
171 private:
172 DoubleRegister const result_;
173 };
174
175
176 class OutOfLineLoadInteger final : public OutOfLineCode {
177 public:
OutOfLineLoadInteger(CodeGenerator * gen,Register result)178 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
179 : OutOfLineCode(gen), result_(result) {}
180
Generate()181 void Generate() final { __ mov(result_, zero_reg); }
182
183 private:
184 Register const result_;
185 };
186
187
188 class OutOfLineRound : public OutOfLineCode {
189 public:
OutOfLineRound(CodeGenerator * gen,DoubleRegister result)190 OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
191 : OutOfLineCode(gen), result_(result) {}
192
Generate()193 void Generate() final {
194 // Handle rounding to zero case where sign has to be preserved.
195 // High bits of double input already in kScratchReg.
196 __ srl(at, kScratchReg, 31);
197 __ sll(at, at, 31);
198 __ Mthc1(at, result_);
199 }
200
201 private:
202 DoubleRegister const result_;
203 };
204
205
206 class OutOfLineRound32 : public OutOfLineCode {
207 public:
OutOfLineRound32(CodeGenerator * gen,DoubleRegister result)208 OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
209 : OutOfLineCode(gen), result_(result) {}
210
Generate()211 void Generate() final {
212 // Handle rounding to zero case where sign has to be preserved.
213 // High bits of float input already in kScratchReg.
214 __ srl(at, kScratchReg, 31);
215 __ sll(at, at, 31);
216 __ mtc1(at, result_);
217 }
218
219 private:
220 DoubleRegister const result_;
221 };
222
223
224 class OutOfLineRecordWrite final : public OutOfLineCode {
225 public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register index,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)226 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
227 Register value, Register scratch0, Register scratch1,
228 RecordWriteMode mode)
229 : OutOfLineCode(gen),
230 object_(object),
231 index_(index),
232 value_(value),
233 scratch0_(scratch0),
234 scratch1_(scratch1),
235 mode_(mode),
236 must_save_lr_(!gen->frame_access_state()->has_frame()) {}
237
Generate()238 void Generate() final {
239 if (mode_ > RecordWriteMode::kValueIsPointer) {
240 __ JumpIfSmi(value_, exit());
241 }
242 __ CheckPageFlag(value_, scratch0_,
243 MemoryChunk::kPointersToHereAreInterestingMask, eq,
244 exit());
245 RememberedSetAction const remembered_set_action =
246 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
247 : OMIT_REMEMBERED_SET;
248 SaveFPRegsMode const save_fp_mode =
249 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
250 if (must_save_lr_) {
251 // We need to save and restore ra if the frame was elided.
252 __ Push(ra);
253 }
254 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
255 remembered_set_action, save_fp_mode);
256 __ Addu(scratch1_, object_, index_);
257 __ CallStub(&stub);
258 if (must_save_lr_) {
259 __ Pop(ra);
260 }
261 }
262
263 private:
264 Register const object_;
265 Register const index_;
266 Register const value_;
267 Register const scratch0_;
268 Register const scratch1_;
269 RecordWriteMode const mode_;
270 bool must_save_lr_;
271 };
272
273
FlagsConditionToConditionCmp(FlagsCondition condition)274 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
275 switch (condition) {
276 case kEqual:
277 return eq;
278 case kNotEqual:
279 return ne;
280 case kSignedLessThan:
281 return lt;
282 case kSignedGreaterThanOrEqual:
283 return ge;
284 case kSignedLessThanOrEqual:
285 return le;
286 case kSignedGreaterThan:
287 return gt;
288 case kUnsignedLessThan:
289 return lo;
290 case kUnsignedGreaterThanOrEqual:
291 return hs;
292 case kUnsignedLessThanOrEqual:
293 return ls;
294 case kUnsignedGreaterThan:
295 return hi;
296 case kUnorderedEqual:
297 case kUnorderedNotEqual:
298 break;
299 default:
300 break;
301 }
302 UNREACHABLE();
303 return kNoCondition;
304 }
305
306
FlagsConditionToConditionTst(FlagsCondition condition)307 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
308 switch (condition) {
309 case kNotEqual:
310 return ne;
311 case kEqual:
312 return eq;
313 default:
314 break;
315 }
316 UNREACHABLE();
317 return kNoCondition;
318 }
319
320
FlagsConditionToConditionCmpFPU(bool & predicate,FlagsCondition condition)321 FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
322 FlagsCondition condition) {
323 switch (condition) {
324 case kEqual:
325 predicate = true;
326 return EQ;
327 case kNotEqual:
328 predicate = false;
329 return EQ;
330 case kUnsignedLessThan:
331 predicate = true;
332 return OLT;
333 case kUnsignedGreaterThanOrEqual:
334 predicate = false;
335 return ULT;
336 case kUnsignedLessThanOrEqual:
337 predicate = true;
338 return OLE;
339 case kUnsignedGreaterThan:
340 predicate = false;
341 return ULE;
342 case kUnorderedEqual:
343 case kUnorderedNotEqual:
344 predicate = true;
345 break;
346 default:
347 predicate = true;
348 break;
349 }
350 UNREACHABLE();
351 return kNoFPUCondition;
352 }
353
354 } // namespace
355
356
357 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
358 do { \
359 auto result = i.Output##width##Register(); \
360 auto ool = new (zone()) OutOfLineLoad##width(this, result); \
361 if (instr->InputAt(0)->IsRegister()) { \
362 auto offset = i.InputRegister(0); \
363 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
364 __ addu(kScratchReg, i.InputRegister(2), offset); \
365 __ asm_instr(result, MemOperand(kScratchReg, 0)); \
366 } else { \
367 auto offset = i.InputOperand(0).immediate(); \
368 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
369 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
370 } \
371 __ bind(ool->exit()); \
372 } while (0)
373
374
375 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
376 do { \
377 auto result = i.OutputRegister(); \
378 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
379 if (instr->InputAt(0)->IsRegister()) { \
380 auto offset = i.InputRegister(0); \
381 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
382 __ addu(kScratchReg, i.InputRegister(2), offset); \
383 __ asm_instr(result, MemOperand(kScratchReg, 0)); \
384 } else { \
385 auto offset = i.InputOperand(0).immediate(); \
386 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
387 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
388 } \
389 __ bind(ool->exit()); \
390 } while (0)
391
392 #define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
393 do { \
394 Label done; \
395 if (instr->InputAt(0)->IsRegister()) { \
396 auto offset = i.InputRegister(0); \
397 auto value = i.InputOrZero##width##Register(2); \
398 if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
399 __ Move(kDoubleRegZero, 0.0); \
400 } \
401 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
402 __ addu(kScratchReg, i.InputRegister(3), offset); \
403 __ asm_instr(value, MemOperand(kScratchReg, 0)); \
404 } else { \
405 auto offset = i.InputOperand(0).immediate(); \
406 auto value = i.InputOrZero##width##Register(2); \
407 if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
408 __ Move(kDoubleRegZero, 0.0); \
409 } \
410 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
411 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
412 } \
413 __ bind(&done); \
414 } while (0)
415
416 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
417 do { \
418 Label done; \
419 if (instr->InputAt(0)->IsRegister()) { \
420 auto offset = i.InputRegister(0); \
421 auto value = i.InputOrZeroRegister(2); \
422 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
423 __ addu(kScratchReg, i.InputRegister(3), offset); \
424 __ asm_instr(value, MemOperand(kScratchReg, 0)); \
425 } else { \
426 auto offset = i.InputOperand(0).immediate(); \
427 auto value = i.InputOrZeroRegister(2); \
428 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
429 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
430 } \
431 __ bind(&done); \
432 } while (0)
433
434 #define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
435 if (IsMipsArchVariant(kMips32r6)) { \
436 __ cfc1(kScratchReg, FCSR); \
437 __ li(at, Operand(mode_##mode)); \
438 __ ctc1(at, FCSR); \
439 __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
440 __ ctc1(kScratchReg, FCSR); \
441 } else { \
442 auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister()); \
443 Label done; \
444 __ Mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
445 __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
446 HeapNumber::kExponentBits); \
447 __ Branch(USE_DELAY_SLOT, &done, hs, at, \
448 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
449 __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
450 __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
451 __ Move(at, kScratchReg2, i.OutputDoubleRegister()); \
452 __ or_(at, at, kScratchReg2); \
453 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
454 __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
455 __ bind(ool->exit()); \
456 __ bind(&done); \
457 }
458
459
460 #define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode) \
461 if (IsMipsArchVariant(kMips32r6)) { \
462 __ cfc1(kScratchReg, FCSR); \
463 __ li(at, Operand(mode_##mode)); \
464 __ ctc1(at, FCSR); \
465 __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
466 __ ctc1(kScratchReg, FCSR); \
467 } else { \
468 int32_t kFloat32ExponentBias = 127; \
469 int32_t kFloat32MantissaBits = 23; \
470 int32_t kFloat32ExponentBits = 8; \
471 auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
472 Label done; \
473 __ mfc1(kScratchReg, i.InputDoubleRegister(0)); \
474 __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits); \
475 __ Branch(USE_DELAY_SLOT, &done, hs, at, \
476 Operand(kFloat32ExponentBias + kFloat32MantissaBits)); \
477 __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
478 __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
479 __ mfc1(at, i.OutputDoubleRegister()); \
480 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
481 __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
482 __ bind(ool->exit()); \
483 __ bind(&done); \
484 }
485
486 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
487 do { \
488 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
489 __ sync(); \
490 } while (0)
491
492 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
493 do { \
494 __ sync(); \
495 __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
496 __ sync(); \
497 } while (0)
498
499 #define ASSEMBLE_IEEE754_BINOP(name) \
500 do { \
501 FrameScope scope(masm(), StackFrame::MANUAL); \
502 __ PrepareCallCFunction(0, 2, kScratchReg); \
503 __ MovToFloatParameters(i.InputDoubleRegister(0), \
504 i.InputDoubleRegister(1)); \
505 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
506 0, 2); \
507 /* Move the result in the double result register. */ \
508 __ MovFromFloatResult(i.OutputDoubleRegister()); \
509 } while (0)
510
511 #define ASSEMBLE_IEEE754_UNOP(name) \
512 do { \
513 FrameScope scope(masm(), StackFrame::MANUAL); \
514 __ PrepareCallCFunction(0, 1, kScratchReg); \
515 __ MovToFloatParameter(i.InputDoubleRegister(0)); \
516 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
517 0, 1); \
518 /* Move the result in the double result register. */ \
519 __ MovFromFloatResult(i.OutputDoubleRegister()); \
520 } while (0)
521
AssembleDeconstructFrame()522 void CodeGenerator::AssembleDeconstructFrame() {
523 __ mov(sp, fp);
524 __ Pop(ra, fp);
525 }
526
AssemblePrepareTailCall()527 void CodeGenerator::AssemblePrepareTailCall() {
528 if (frame_access_state()->has_frame()) {
529 __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
530 __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
531 }
532 frame_access_state()->SetFrameAccessToSP();
533 }
534
AssemblePopArgumentsAdaptorFrame(Register args_reg,Register scratch1,Register scratch2,Register scratch3)535 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
536 Register scratch1,
537 Register scratch2,
538 Register scratch3) {
539 DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
540 Label done;
541
542 // Check if current frame is an arguments adaptor frame.
543 __ lw(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
544 __ Branch(&done, ne, scratch1,
545 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
546
547 // Load arguments count from current arguments adaptor frame (note, it
548 // does not include receiver).
549 Register caller_args_count_reg = scratch1;
550 __ lw(caller_args_count_reg,
551 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
552 __ SmiUntag(caller_args_count_reg);
553
554 ParameterCount callee_args_count(args_reg);
555 __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
556 scratch3);
557 __ bind(&done);
558 }
559
560 namespace {
561
AdjustStackPointerForTailCall(MacroAssembler * masm,FrameAccessState * state,int new_slot_above_sp,bool allow_shrinkage=true)562 void AdjustStackPointerForTailCall(MacroAssembler* masm,
563 FrameAccessState* state,
564 int new_slot_above_sp,
565 bool allow_shrinkage = true) {
566 int current_sp_offset = state->GetSPToFPSlotCount() +
567 StandardFrameConstants::kFixedSlotCountAboveFp;
568 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
569 if (stack_slot_delta > 0) {
570 masm->Subu(sp, sp, stack_slot_delta * kPointerSize);
571 state->IncreaseSPDelta(stack_slot_delta);
572 } else if (allow_shrinkage && stack_slot_delta < 0) {
573 masm->Addu(sp, sp, -stack_slot_delta * kPointerSize);
574 state->IncreaseSPDelta(stack_slot_delta);
575 }
576 }
577
578 } // namespace
579
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_stack_slot)580 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
581 int first_unused_stack_slot) {
582 AdjustStackPointerForTailCall(masm(), frame_access_state(),
583 first_unused_stack_slot, false);
584 }
585
AssembleTailCallAfterGap(Instruction * instr,int first_unused_stack_slot)586 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
587 int first_unused_stack_slot) {
588 AdjustStackPointerForTailCall(masm(), frame_access_state(),
589 first_unused_stack_slot);
590 }
591
592 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)593 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
594 Instruction* instr) {
595 MipsOperandConverter i(this, instr);
596 InstructionCode opcode = instr->opcode();
597 ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
598 switch (arch_opcode) {
599 case kArchCallCodeObject: {
600 EnsureSpaceForLazyDeopt();
601 if (instr->InputAt(0)->IsImmediate()) {
602 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
603 RelocInfo::CODE_TARGET);
604 } else {
605 __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
606 __ Call(at);
607 }
608 RecordCallPosition(instr);
609 frame_access_state()->ClearSPDelta();
610 break;
611 }
612 case kArchTailCallCodeObjectFromJSFunction:
613 case kArchTailCallCodeObject: {
614 if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
615 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
616 i.TempRegister(0), i.TempRegister(1),
617 i.TempRegister(2));
618 }
619 if (instr->InputAt(0)->IsImmediate()) {
620 __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
621 RelocInfo::CODE_TARGET);
622 } else {
623 __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
624 __ Jump(at);
625 }
626 frame_access_state()->ClearSPDelta();
627 frame_access_state()->SetFrameAccessToDefault();
628 break;
629 }
630 case kArchTailCallAddress: {
631 CHECK(!instr->InputAt(0)->IsImmediate());
632 __ Jump(i.InputRegister(0));
633 frame_access_state()->ClearSPDelta();
634 frame_access_state()->SetFrameAccessToDefault();
635 break;
636 }
637 case kArchCallJSFunction: {
638 EnsureSpaceForLazyDeopt();
639 Register func = i.InputRegister(0);
640 if (FLAG_debug_code) {
641 // Check the function's context matches the context argument.
642 __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
643 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
644 }
645
646 __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
647 __ Call(at);
648 RecordCallPosition(instr);
649 frame_access_state()->ClearSPDelta();
650 frame_access_state()->SetFrameAccessToDefault();
651 break;
652 }
653 case kArchTailCallJSFunctionFromJSFunction: {
654 Register func = i.InputRegister(0);
655 if (FLAG_debug_code) {
656 // Check the function's context matches the context argument.
657 __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
658 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
659 }
660 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
661 i.TempRegister(0), i.TempRegister(1),
662 i.TempRegister(2));
663 __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
664 __ Jump(at);
665 frame_access_state()->ClearSPDelta();
666 break;
667 }
668 case kArchPrepareCallCFunction: {
669 int const num_parameters = MiscField::decode(instr->opcode());
670 __ PrepareCallCFunction(num_parameters, kScratchReg);
671 // Frame alignment requires using FP-relative frame addressing.
672 frame_access_state()->SetFrameAccessToFP();
673 break;
674 }
675 case kArchPrepareTailCall:
676 AssemblePrepareTailCall();
677 break;
678 case kArchCallCFunction: {
679 int const num_parameters = MiscField::decode(instr->opcode());
680 if (instr->InputAt(0)->IsImmediate()) {
681 ExternalReference ref = i.InputExternalReference(0);
682 __ CallCFunction(ref, num_parameters);
683 } else {
684 Register func = i.InputRegister(0);
685 __ CallCFunction(func, num_parameters);
686 }
687 frame_access_state()->SetFrameAccessToDefault();
688 frame_access_state()->ClearSPDelta();
689 break;
690 }
691 case kArchJmp:
692 AssembleArchJump(i.InputRpo(0));
693 break;
694 case kArchLookupSwitch:
695 AssembleArchLookupSwitch(instr);
696 break;
697 case kArchTableSwitch:
698 AssembleArchTableSwitch(instr);
699 break;
700 case kArchDebugBreak:
701 __ stop("kArchDebugBreak");
702 break;
703 case kArchComment: {
704 Address comment_string = i.InputExternalReference(0).address();
705 __ RecordComment(reinterpret_cast<const char*>(comment_string));
706 break;
707 }
708 case kArchNop:
709 case kArchThrowTerminator:
710 // don't emit code for nops.
711 break;
712 case kArchDeoptimize: {
713 int deopt_state_id =
714 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
715 Deoptimizer::BailoutType bailout_type =
716 Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
717 CodeGenResult result = AssembleDeoptimizerCall(
718 deopt_state_id, bailout_type, current_source_position_);
719 if (result != kSuccess) return result;
720 break;
721 }
722 case kArchRet:
723 AssembleReturn(instr->InputAt(0));
724 break;
725 case kArchStackPointer:
726 __ mov(i.OutputRegister(), sp);
727 break;
728 case kArchFramePointer:
729 __ mov(i.OutputRegister(), fp);
730 break;
731 case kArchParentFramePointer:
732 if (frame_access_state()->has_frame()) {
733 __ lw(i.OutputRegister(), MemOperand(fp, 0));
734 } else {
735 __ mov(i.OutputRegister(), fp);
736 }
737 break;
738 case kArchTruncateDoubleToI:
739 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
740 break;
741 case kArchStoreWithWriteBarrier: {
742 RecordWriteMode mode =
743 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
744 Register object = i.InputRegister(0);
745 Register index = i.InputRegister(1);
746 Register value = i.InputRegister(2);
747 Register scratch0 = i.TempRegister(0);
748 Register scratch1 = i.TempRegister(1);
749 auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
750 scratch0, scratch1, mode);
751 __ Addu(at, object, index);
752 __ sw(value, MemOperand(at));
753 __ CheckPageFlag(object, scratch0,
754 MemoryChunk::kPointersFromHereAreInterestingMask, ne,
755 ool->entry());
756 __ bind(ool->exit());
757 break;
758 }
759 case kArchStackSlot: {
760 FrameOffset offset =
761 frame_access_state()->GetFrameOffset(i.InputInt32(0));
762 __ Addu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
763 Operand(offset.offset()));
764 break;
765 }
766 case kIeee754Float64Acos:
767 ASSEMBLE_IEEE754_UNOP(acos);
768 break;
769 case kIeee754Float64Acosh:
770 ASSEMBLE_IEEE754_UNOP(acosh);
771 break;
772 case kIeee754Float64Asin:
773 ASSEMBLE_IEEE754_UNOP(asin);
774 break;
775 case kIeee754Float64Asinh:
776 ASSEMBLE_IEEE754_UNOP(asinh);
777 break;
778 case kIeee754Float64Atan:
779 ASSEMBLE_IEEE754_UNOP(atan);
780 break;
781 case kIeee754Float64Atanh:
782 ASSEMBLE_IEEE754_UNOP(atanh);
783 break;
784 case kIeee754Float64Atan2:
785 ASSEMBLE_IEEE754_BINOP(atan2);
786 break;
787 case kIeee754Float64Cos:
788 ASSEMBLE_IEEE754_UNOP(cos);
789 break;
790 case kIeee754Float64Cosh:
791 ASSEMBLE_IEEE754_UNOP(cosh);
792 break;
793 case kIeee754Float64Cbrt:
794 ASSEMBLE_IEEE754_UNOP(cbrt);
795 break;
796 case kIeee754Float64Exp:
797 ASSEMBLE_IEEE754_UNOP(exp);
798 break;
799 case kIeee754Float64Expm1:
800 ASSEMBLE_IEEE754_UNOP(expm1);
801 break;
802 case kIeee754Float64Log:
803 ASSEMBLE_IEEE754_UNOP(log);
804 break;
805 case kIeee754Float64Log1p:
806 ASSEMBLE_IEEE754_UNOP(log1p);
807 break;
808 case kIeee754Float64Log10:
809 ASSEMBLE_IEEE754_UNOP(log10);
810 break;
811 case kIeee754Float64Log2:
812 ASSEMBLE_IEEE754_UNOP(log2);
813 break;
814 case kIeee754Float64Pow: {
815 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
816 __ CallStub(&stub);
817 break;
818 }
819 case kIeee754Float64Sin:
820 ASSEMBLE_IEEE754_UNOP(sin);
821 break;
822 case kIeee754Float64Sinh:
823 ASSEMBLE_IEEE754_UNOP(sinh);
824 break;
825 case kIeee754Float64Tan:
826 ASSEMBLE_IEEE754_UNOP(tan);
827 break;
828 case kIeee754Float64Tanh:
829 ASSEMBLE_IEEE754_UNOP(tanh);
830 break;
831 case kMipsAdd:
832 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
833 break;
834 case kMipsAddOvf:
835 // Pseudo-instruction used for overflow/branch. No opcode emitted here.
836 break;
837 case kMipsSub:
838 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
839 break;
840 case kMipsSubOvf:
841 // Pseudo-instruction used for overflow/branch. No opcode emitted here.
842 break;
843 case kMipsMul:
844 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
845 break;
846 case kMipsMulOvf:
847 // Pseudo-instruction used for overflow/branch. No opcode emitted here.
848 break;
849 case kMipsMulHigh:
850 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
851 break;
852 case kMipsMulHighU:
853 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
854 break;
855 case kMipsDiv:
856 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
857 if (IsMipsArchVariant(kMips32r6)) {
858 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
859 } else {
860 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
861 }
862 break;
863 case kMipsDivU:
864 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
865 if (IsMipsArchVariant(kMips32r6)) {
866 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
867 } else {
868 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
869 }
870 break;
871 case kMipsMod:
872 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
873 break;
874 case kMipsModU:
875 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
876 break;
877 case kMipsAnd:
878 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
879 break;
880 case kMipsOr:
881 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
882 break;
883 case kMipsNor:
884 if (instr->InputAt(1)->IsRegister()) {
885 __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
886 } else {
887 DCHECK(i.InputOperand(1).immediate() == 0);
888 __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
889 }
890 break;
891 case kMipsXor:
892 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
893 break;
894 case kMipsClz:
895 __ Clz(i.OutputRegister(), i.InputRegister(0));
896 break;
897 case kMipsCtz: {
898 Register reg1 = kScratchReg;
899 Register reg2 = kScratchReg2;
900 Label skip_for_zero;
901 Label end;
902 // Branch if the operand is zero
903 __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
904 // Find the number of bits before the last bit set to 1.
905 __ Subu(reg2, zero_reg, i.InputRegister(0));
906 __ And(reg2, reg2, i.InputRegister(0));
907 __ clz(reg2, reg2);
908 // Get the number of bits after the last bit set to 1.
909 __ li(reg1, 0x1F);
910 __ Subu(i.OutputRegister(), reg1, reg2);
911 __ Branch(&end);
912 __ bind(&skip_for_zero);
913 // If the operand is zero, return word length as the result.
914 __ li(i.OutputRegister(), 0x20);
915 __ bind(&end);
916 } break;
917 case kMipsPopcnt: {
918 Register reg1 = kScratchReg;
919 Register reg2 = kScratchReg2;
920 uint32_t m1 = 0x55555555;
921 uint32_t m2 = 0x33333333;
922 uint32_t m4 = 0x0f0f0f0f;
923 uint32_t m8 = 0x00ff00ff;
924 uint32_t m16 = 0x0000ffff;
925
926 // Put count of ones in every 2 bits into those 2 bits.
927 __ li(at, m1);
928 __ srl(reg1, i.InputRegister(0), 1);
929 __ And(reg2, i.InputRegister(0), at);
930 __ And(reg1, reg1, at);
931 __ addu(reg1, reg1, reg2);
932
933 // Put count of ones in every 4 bits into those 4 bits.
934 __ li(at, m2);
935 __ srl(reg2, reg1, 2);
936 __ And(reg2, reg2, at);
937 __ And(reg1, reg1, at);
938 __ addu(reg1, reg1, reg2);
939
940 // Put count of ones in every 8 bits into those 8 bits.
941 __ li(at, m4);
942 __ srl(reg2, reg1, 4);
943 __ And(reg2, reg2, at);
944 __ And(reg1, reg1, at);
945 __ addu(reg1, reg1, reg2);
946
947 // Put count of ones in every 16 bits into those 16 bits.
948 __ li(at, m8);
949 __ srl(reg2, reg1, 8);
950 __ And(reg2, reg2, at);
951 __ And(reg1, reg1, at);
952 __ addu(reg1, reg1, reg2);
953
954 // Calculate total number of ones.
955 __ li(at, m16);
956 __ srl(reg2, reg1, 16);
957 __ And(reg2, reg2, at);
958 __ And(reg1, reg1, at);
959 __ addu(i.OutputRegister(), reg1, reg2);
960 } break;
961 case kMipsShl:
962 if (instr->InputAt(1)->IsRegister()) {
963 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
964 } else {
965 int32_t imm = i.InputOperand(1).immediate();
966 __ sll(i.OutputRegister(), i.InputRegister(0), imm);
967 }
968 break;
969 case kMipsShr:
970 if (instr->InputAt(1)->IsRegister()) {
971 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
972 } else {
973 int32_t imm = i.InputOperand(1).immediate();
974 __ srl(i.OutputRegister(), i.InputRegister(0), imm);
975 }
976 break;
977 case kMipsSar:
978 if (instr->InputAt(1)->IsRegister()) {
979 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
980 } else {
981 int32_t imm = i.InputOperand(1).immediate();
982 __ sra(i.OutputRegister(), i.InputRegister(0), imm);
983 }
984 break;
985 case kMipsShlPair: {
986 Register second_output =
987 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
988 if (instr->InputAt(2)->IsRegister()) {
989 __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
990 i.InputRegister(1), i.InputRegister(2));
991 } else {
992 uint32_t imm = i.InputOperand(2).immediate();
993 __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
994 i.InputRegister(1), imm);
995 }
996 } break;
997 case kMipsShrPair: {
998 Register second_output =
999 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1000 if (instr->InputAt(2)->IsRegister()) {
1001 __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1002 i.InputRegister(1), i.InputRegister(2));
1003 } else {
1004 uint32_t imm = i.InputOperand(2).immediate();
1005 __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1006 i.InputRegister(1), imm);
1007 }
1008 } break;
1009 case kMipsSarPair: {
1010 Register second_output =
1011 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1012 if (instr->InputAt(2)->IsRegister()) {
1013 __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1014 i.InputRegister(1), i.InputRegister(2));
1015 } else {
1016 uint32_t imm = i.InputOperand(2).immediate();
1017 __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1018 i.InputRegister(1), imm);
1019 }
1020 } break;
1021 case kMipsExt:
1022 __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1023 i.InputInt8(2));
1024 break;
1025 case kMipsIns:
1026 if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
1027 __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
1028 } else {
1029 __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1030 i.InputInt8(2));
1031 }
1032 break;
1033 case kMipsRor:
1034 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1035 break;
1036 case kMipsTst:
1037 // Pseudo-instruction used for tst/branch. No opcode emitted here.
1038 break;
1039 case kMipsCmp:
1040 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
1041 break;
1042 case kMipsMov:
1043 // TODO(plind): Should we combine mov/li like this, or use separate instr?
1044 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
1045 if (HasRegisterInput(instr, 0)) {
1046 __ mov(i.OutputRegister(), i.InputRegister(0));
1047 } else {
1048 __ li(i.OutputRegister(), i.InputOperand(0));
1049 }
1050 break;
1051 case kMipsLsa:
1052 DCHECK(instr->InputAt(2)->IsImmediate());
1053 __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1054 i.InputInt8(2));
1055 break;
1056 case kMipsCmpS:
1057 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
1058 break;
1059 case kMipsAddS:
1060 // TODO(plind): add special case: combine mult & add.
1061 __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1062 i.InputDoubleRegister(1));
1063 break;
1064 case kMipsSubS:
1065 __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1066 i.InputDoubleRegister(1));
1067 break;
1068 case kMipsMulS:
1069 // TODO(plind): add special case: right op is -1.0, see arm port.
1070 __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1071 i.InputDoubleRegister(1));
1072 break;
1073 case kMipsDivS:
1074 __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1075 i.InputDoubleRegister(1));
1076 break;
1077 case kMipsModS: {
1078 // TODO(bmeurer): We should really get rid of this special instruction,
1079 // and generate a CallAddress instruction instead.
1080 FrameScope scope(masm(), StackFrame::MANUAL);
1081 __ PrepareCallCFunction(0, 2, kScratchReg);
1082 __ MovToFloatParameters(i.InputDoubleRegister(0),
1083 i.InputDoubleRegister(1));
1084 // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
1085 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
1086 0, 2);
1087 // Move the result in the double result register.
1088 __ MovFromFloatResult(i.OutputSingleRegister());
1089 break;
1090 }
1091 case kMipsAbsS:
1092 __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1093 break;
1094 case kMipsSqrtS: {
1095 __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1096 break;
1097 }
1098 case kMipsMaxS:
1099 __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1100 i.InputDoubleRegister(1));
1101 break;
1102 case kMipsMinS:
1103 __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1104 i.InputDoubleRegister(1));
1105 break;
1106 case kMipsCmpD:
1107 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
1108 break;
1109 case kMipsAddPair:
1110 __ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
1111 i.InputRegister(1), i.InputRegister(2), i.InputRegister(3));
1112 break;
1113 case kMipsSubPair:
1114 __ SubPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
1115 i.InputRegister(1), i.InputRegister(2), i.InputRegister(3));
1116 break;
1117 case kMipsMulPair: {
1118 __ Mulu(i.OutputRegister(1), i.OutputRegister(0), i.InputRegister(0),
1119 i.InputRegister(2));
1120 __ mul(kScratchReg, i.InputRegister(0), i.InputRegister(3));
1121 __ mul(kScratchReg2, i.InputRegister(1), i.InputRegister(2));
1122 __ Addu(i.OutputRegister(1), i.OutputRegister(1), kScratchReg);
1123 __ Addu(i.OutputRegister(1), i.OutputRegister(1), kScratchReg2);
1124 } break;
1125 case kMipsAddD:
1126 // TODO(plind): add special case: combine mult & add.
1127 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1128 i.InputDoubleRegister(1));
1129 break;
1130 case kMipsSubD:
1131 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1132 i.InputDoubleRegister(1));
1133 break;
1134 case kMipsMaddS:
1135 __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
1136 i.InputFloatRegister(1), i.InputFloatRegister(2));
1137 break;
1138 case kMipsMaddD:
1139 __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1140 i.InputDoubleRegister(1), i.InputDoubleRegister(2));
1141 break;
1142 case kMipsMaddfS:
1143 __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
1144 i.InputFloatRegister(2));
1145 break;
1146 case kMipsMaddfD:
1147 __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
1148 i.InputDoubleRegister(2));
1149 break;
1150 case kMipsMsubS:
1151 __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
1152 i.InputFloatRegister(1), i.InputFloatRegister(2));
1153 break;
1154 case kMipsMsubD:
1155 __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1156 i.InputDoubleRegister(1), i.InputDoubleRegister(2));
1157 break;
1158 case kMipsMsubfS:
1159 __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
1160 i.InputFloatRegister(2));
1161 break;
1162 case kMipsMsubfD:
1163 __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
1164 i.InputDoubleRegister(2));
1165 break;
1166 case kMipsMulD:
1167 // TODO(plind): add special case: right op is -1.0, see arm port.
1168 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1169 i.InputDoubleRegister(1));
1170 break;
1171 case kMipsDivD:
1172 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1173 i.InputDoubleRegister(1));
1174 break;
1175 case kMipsModD: {
1176 // TODO(bmeurer): We should really get rid of this special instruction,
1177 // and generate a CallAddress instruction instead.
1178 FrameScope scope(masm(), StackFrame::MANUAL);
1179 __ PrepareCallCFunction(0, 2, kScratchReg);
1180 __ MovToFloatParameters(i.InputDoubleRegister(0),
1181 i.InputDoubleRegister(1));
1182 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
1183 0, 2);
1184 // Move the result in the double result register.
1185 __ MovFromFloatResult(i.OutputDoubleRegister());
1186 break;
1187 }
1188 case kMipsAbsD:
1189 __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1190 break;
1191 case kMipsNegS:
1192 __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1193 break;
1194 case kMipsNegD:
1195 __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1196 break;
1197 case kMipsSqrtD: {
1198 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1199 break;
1200 }
1201 case kMipsMaxD:
1202 __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1203 i.InputDoubleRegister(1));
1204 break;
1205 case kMipsMinD:
1206 __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1207 i.InputDoubleRegister(1));
1208 break;
1209 case kMipsFloat64RoundDown: {
1210 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
1211 break;
1212 }
1213 case kMipsFloat32RoundDown: {
1214 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
1215 break;
1216 }
1217 case kMipsFloat64RoundTruncate: {
1218 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
1219 break;
1220 }
1221 case kMipsFloat32RoundTruncate: {
1222 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
1223 break;
1224 }
1225 case kMipsFloat64RoundUp: {
1226 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
1227 break;
1228 }
1229 case kMipsFloat32RoundUp: {
1230 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
1231 break;
1232 }
1233 case kMipsFloat64RoundTiesEven: {
1234 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
1235 break;
1236 }
1237 case kMipsFloat32RoundTiesEven: {
1238 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
1239 break;
1240 }
1241 case kMipsFloat32Max: {
1242 Label compare_nan, done_compare;
1243 __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
1244 i.InputSingleRegister(1), &compare_nan);
1245 __ Branch(&done_compare);
1246 __ bind(&compare_nan);
1247 __ Move(i.OutputSingleRegister(),
1248 std::numeric_limits<float>::quiet_NaN());
1249 __ bind(&done_compare);
1250 break;
1251 }
1252 case kMipsFloat64Max: {
1253 Label compare_nan, done_compare;
1254 __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1255 i.InputDoubleRegister(1), &compare_nan);
1256 __ Branch(&done_compare);
1257 __ bind(&compare_nan);
1258 __ Move(i.OutputDoubleRegister(),
1259 std::numeric_limits<double>::quiet_NaN());
1260 __ bind(&done_compare);
1261 break;
1262 }
1263 case kMipsFloat32Min: {
1264 Label compare_nan, done_compare;
1265 __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
1266 i.InputSingleRegister(1), &compare_nan);
1267 __ Branch(&done_compare);
1268 __ bind(&compare_nan);
1269 __ Move(i.OutputSingleRegister(),
1270 std::numeric_limits<float>::quiet_NaN());
1271 __ bind(&done_compare);
1272 break;
1273 }
1274 case kMipsFloat64Min: {
1275 Label compare_nan, done_compare;
1276 __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1277 i.InputDoubleRegister(1), &compare_nan);
1278 __ Branch(&done_compare);
1279 __ bind(&compare_nan);
1280 __ Move(i.OutputDoubleRegister(),
1281 std::numeric_limits<double>::quiet_NaN());
1282 __ bind(&done_compare);
1283 break;
1284 }
1285 case kMipsCvtSD: {
1286 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
1287 break;
1288 }
1289 case kMipsCvtDS: {
1290 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
1291 break;
1292 }
1293 case kMipsCvtDW: {
1294 FPURegister scratch = kScratchDoubleReg;
1295 __ mtc1(i.InputRegister(0), scratch);
1296 __ cvt_d_w(i.OutputDoubleRegister(), scratch);
1297 break;
1298 }
1299 case kMipsCvtSW: {
1300 FPURegister scratch = kScratchDoubleReg;
1301 __ mtc1(i.InputRegister(0), scratch);
1302 __ cvt_s_w(i.OutputDoubleRegister(), scratch);
1303 break;
1304 }
1305 case kMipsCvtSUw: {
1306 FPURegister scratch = kScratchDoubleReg;
1307 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
1308 __ cvt_s_d(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1309 break;
1310 }
1311 case kMipsCvtDUw: {
1312 FPURegister scratch = kScratchDoubleReg;
1313 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
1314 break;
1315 }
1316 case kMipsFloorWD: {
1317 FPURegister scratch = kScratchDoubleReg;
1318 __ floor_w_d(scratch, i.InputDoubleRegister(0));
1319 __ mfc1(i.OutputRegister(), scratch);
1320 break;
1321 }
1322 case kMipsCeilWD: {
1323 FPURegister scratch = kScratchDoubleReg;
1324 __ ceil_w_d(scratch, i.InputDoubleRegister(0));
1325 __ mfc1(i.OutputRegister(), scratch);
1326 break;
1327 }
1328 case kMipsRoundWD: {
1329 FPURegister scratch = kScratchDoubleReg;
1330 __ round_w_d(scratch, i.InputDoubleRegister(0));
1331 __ mfc1(i.OutputRegister(), scratch);
1332 break;
1333 }
1334 case kMipsTruncWD: {
1335 FPURegister scratch = kScratchDoubleReg;
1336 // Other arches use round to zero here, so we follow.
1337 __ trunc_w_d(scratch, i.InputDoubleRegister(0));
1338 __ mfc1(i.OutputRegister(), scratch);
1339 break;
1340 }
1341 case kMipsFloorWS: {
1342 FPURegister scratch = kScratchDoubleReg;
1343 __ floor_w_s(scratch, i.InputDoubleRegister(0));
1344 __ mfc1(i.OutputRegister(), scratch);
1345 break;
1346 }
1347 case kMipsCeilWS: {
1348 FPURegister scratch = kScratchDoubleReg;
1349 __ ceil_w_s(scratch, i.InputDoubleRegister(0));
1350 __ mfc1(i.OutputRegister(), scratch);
1351 break;
1352 }
1353 case kMipsRoundWS: {
1354 FPURegister scratch = kScratchDoubleReg;
1355 __ round_w_s(scratch, i.InputDoubleRegister(0));
1356 __ mfc1(i.OutputRegister(), scratch);
1357 break;
1358 }
1359 case kMipsTruncWS: {
1360 FPURegister scratch = kScratchDoubleReg;
1361 __ trunc_w_s(scratch, i.InputDoubleRegister(0));
1362 __ mfc1(i.OutputRegister(), scratch);
1363 // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1364 // because INT32_MIN allows easier out-of-bounds detection.
1365 __ addiu(kScratchReg, i.OutputRegister(), 1);
1366 __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
1367 __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
1368 break;
1369 }
1370 case kMipsTruncUwD: {
1371 FPURegister scratch = kScratchDoubleReg;
1372 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
1373 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
1374 break;
1375 }
1376 case kMipsTruncUwS: {
1377 FPURegister scratch = kScratchDoubleReg;
1378 // TODO(plind): Fix wrong param order of Trunc_uw_s() macro-asm function.
1379 __ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
1380 // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1381 // because 0 allows easier out-of-bounds detection.
1382 __ addiu(kScratchReg, i.OutputRegister(), 1);
1383 __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
1384 break;
1385 }
1386 case kMipsFloat64ExtractLowWord32:
1387 __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
1388 break;
1389 case kMipsFloat64ExtractHighWord32:
1390 __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
1391 break;
1392 case kMipsFloat64InsertLowWord32:
1393 __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
1394 break;
1395 case kMipsFloat64InsertHighWord32:
1396 __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
1397 break;
1398 case kMipsFloat64SilenceNaN:
1399 __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1400 break;
1401
1402 // ... more basic instructions ...
1403 case kMipsSeb:
1404 __ Seb(i.OutputRegister(), i.InputRegister(0));
1405 break;
1406 case kMipsSeh:
1407 __ Seh(i.OutputRegister(), i.InputRegister(0));
1408 break;
1409 case kMipsLbu:
1410 __ lbu(i.OutputRegister(), i.MemoryOperand());
1411 break;
1412 case kMipsLb:
1413 __ lb(i.OutputRegister(), i.MemoryOperand());
1414 break;
1415 case kMipsSb:
1416 __ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
1417 break;
1418 case kMipsLhu:
1419 __ lhu(i.OutputRegister(), i.MemoryOperand());
1420 break;
1421 case kMipsUlhu:
1422 __ Ulhu(i.OutputRegister(), i.MemoryOperand());
1423 break;
1424 case kMipsLh:
1425 __ lh(i.OutputRegister(), i.MemoryOperand());
1426 break;
1427 case kMipsUlh:
1428 __ Ulh(i.OutputRegister(), i.MemoryOperand());
1429 break;
1430 case kMipsSh:
1431 __ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
1432 break;
1433 case kMipsUsh:
1434 __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
1435 break;
1436 case kMipsLw:
1437 __ lw(i.OutputRegister(), i.MemoryOperand());
1438 break;
1439 case kMipsUlw:
1440 __ Ulw(i.OutputRegister(), i.MemoryOperand());
1441 break;
1442 case kMipsSw:
1443 __ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
1444 break;
1445 case kMipsUsw:
1446 __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
1447 break;
1448 case kMipsLwc1: {
1449 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
1450 break;
1451 }
1452 case kMipsUlwc1: {
1453 __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
1454 break;
1455 }
1456 case kMipsSwc1: {
1457 size_t index = 0;
1458 MemOperand operand = i.MemoryOperand(&index);
1459 FPURegister ft = i.InputOrZeroSingleRegister(index);
1460 if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
1461 __ Move(kDoubleRegZero, 0.0);
1462 }
1463 __ swc1(ft, operand);
1464 break;
1465 }
1466 case kMipsUswc1: {
1467 size_t index = 0;
1468 MemOperand operand = i.MemoryOperand(&index);
1469 FPURegister ft = i.InputOrZeroSingleRegister(index);
1470 if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
1471 __ Move(kDoubleRegZero, 0.0);
1472 }
1473 __ Uswc1(ft, operand, kScratchReg);
1474 break;
1475 }
1476 case kMipsLdc1:
1477 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
1478 break;
1479 case kMipsUldc1:
1480 __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
1481 break;
1482 case kMipsSdc1: {
1483 FPURegister ft = i.InputOrZeroDoubleRegister(2);
1484 if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
1485 __ Move(kDoubleRegZero, 0.0);
1486 }
1487 __ sdc1(ft, i.MemoryOperand());
1488 break;
1489 }
1490 case kMipsUsdc1: {
1491 FPURegister ft = i.InputOrZeroDoubleRegister(2);
1492 if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
1493 __ Move(kDoubleRegZero, 0.0);
1494 }
1495 __ Usdc1(ft, i.MemoryOperand(), kScratchReg);
1496 break;
1497 }
1498 case kMipsPush:
1499 if (instr->InputAt(0)->IsFPRegister()) {
1500 __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1501 __ Subu(sp, sp, Operand(kDoubleSize));
1502 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1503 } else {
1504 __ Push(i.InputRegister(0));
1505 frame_access_state()->IncreaseSPDelta(1);
1506 }
1507 break;
1508 case kMipsStackClaim: {
1509 __ Subu(sp, sp, Operand(i.InputInt32(0)));
1510 frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
1511 break;
1512 }
1513 case kMipsStoreToStackSlot: {
1514 if (instr->InputAt(0)->IsFPRegister()) {
1515 LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1516 if (op->representation() == MachineRepresentation::kFloat64) {
1517 __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
1518 } else {
1519 DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
1520 __ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1)));
1521 }
1522 } else {
1523 __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
1524 }
1525 break;
1526 }
1527 case kMipsByteSwap32: {
1528 __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
1529 break;
1530 }
1531 case kCheckedLoadInt8:
1532 ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
1533 break;
1534 case kCheckedLoadUint8:
1535 ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
1536 break;
1537 case kCheckedLoadInt16:
1538 ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
1539 break;
1540 case kCheckedLoadUint16:
1541 ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
1542 break;
1543 case kCheckedLoadWord32:
1544 ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
1545 break;
1546 case kCheckedLoadFloat32:
1547 ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
1548 break;
1549 case kCheckedLoadFloat64:
1550 ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
1551 break;
1552 case kCheckedStoreWord8:
1553 ASSEMBLE_CHECKED_STORE_INTEGER(sb);
1554 break;
1555 case kCheckedStoreWord16:
1556 ASSEMBLE_CHECKED_STORE_INTEGER(sh);
1557 break;
1558 case kCheckedStoreWord32:
1559 ASSEMBLE_CHECKED_STORE_INTEGER(sw);
1560 break;
1561 case kCheckedStoreFloat32:
1562 ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
1563 break;
1564 case kCheckedStoreFloat64:
1565 ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
1566 break;
1567 case kCheckedLoadWord64:
1568 case kCheckedStoreWord64:
1569 UNREACHABLE(); // currently unsupported checked int64 load/store.
1570 break;
1571 case kAtomicLoadInt8:
1572 ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
1573 break;
1574 case kAtomicLoadUint8:
1575 ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
1576 break;
1577 case kAtomicLoadInt16:
1578 ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
1579 break;
1580 case kAtomicLoadUint16:
1581 ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
1582 break;
1583 case kAtomicLoadWord32:
1584 ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
1585 break;
1586 case kAtomicStoreWord8:
1587 ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
1588 break;
1589 case kAtomicStoreWord16:
1590 ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
1591 break;
1592 case kAtomicStoreWord32:
1593 ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
1594 break;
1595 }
1596 return kSuccess;
1597 } // NOLINT(readability/fn_size)
1598
1599
1600 #define UNSUPPORTED_COND(opcode, condition) \
1601 OFStream out(stdout); \
1602 out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
1603 UNIMPLEMENTED();
1604
convertCondition(FlagsCondition condition,Condition & cc)1605 static bool convertCondition(FlagsCondition condition, Condition& cc) {
1606 switch (condition) {
1607 case kEqual:
1608 cc = eq;
1609 return true;
1610 case kNotEqual:
1611 cc = ne;
1612 return true;
1613 case kUnsignedLessThan:
1614 cc = lt;
1615 return true;
1616 case kUnsignedGreaterThanOrEqual:
1617 cc = uge;
1618 return true;
1619 case kUnsignedLessThanOrEqual:
1620 cc = le;
1621 return true;
1622 case kUnsignedGreaterThan:
1623 cc = ugt;
1624 return true;
1625 default:
1626 break;
1627 }
1628 return false;
1629 }
1630
1631
1632 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)1633 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1634 MipsOperandConverter i(this, instr);
1635 Label* tlabel = branch->true_label;
1636 Label* flabel = branch->false_label;
1637 Condition cc = kNoCondition;
1638 // MIPS does not have condition code flags, so compare and branch are
1639 // implemented differently than on the other arch's. The compare operations
1640 // emit mips pseudo-instructions, which are handled here by branch
1641 // instructions that do the actual comparison. Essential that the input
1642 // registers to compare pseudo-op are not modified before this branch op, as
1643 // they are tested here.
1644
1645 if (instr->arch_opcode() == kMipsTst) {
1646 cc = FlagsConditionToConditionTst(branch->condition);
1647 __ And(at, i.InputRegister(0), i.InputOperand(1));
1648 __ Branch(tlabel, cc, at, Operand(zero_reg));
1649 } else if (instr->arch_opcode() == kMipsAddOvf) {
1650 switch (branch->condition) {
1651 case kOverflow:
1652 __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
1653 i.InputOperand(1), tlabel, flabel);
1654 break;
1655 case kNotOverflow:
1656 __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
1657 i.InputOperand(1), flabel, tlabel);
1658 break;
1659 default:
1660 UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
1661 break;
1662 }
1663 } else if (instr->arch_opcode() == kMipsSubOvf) {
1664 switch (branch->condition) {
1665 case kOverflow:
1666 __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
1667 i.InputOperand(1), tlabel, flabel);
1668 break;
1669 case kNotOverflow:
1670 __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
1671 i.InputOperand(1), flabel, tlabel);
1672 break;
1673 default:
1674 UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
1675 break;
1676 }
1677 } else if (instr->arch_opcode() == kMipsMulOvf) {
1678 switch (branch->condition) {
1679 case kOverflow:
1680 __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
1681 i.InputOperand(1), tlabel, flabel);
1682 break;
1683 case kNotOverflow:
1684 __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
1685 i.InputOperand(1), flabel, tlabel);
1686 break;
1687 default:
1688 UNSUPPORTED_COND(kMipsMulOvf, branch->condition);
1689 break;
1690 }
1691 } else if (instr->arch_opcode() == kMipsCmp) {
1692 cc = FlagsConditionToConditionCmp(branch->condition);
1693 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
1694 } else if (instr->arch_opcode() == kMipsCmpS) {
1695 if (!convertCondition(branch->condition, cc)) {
1696 UNSUPPORTED_COND(kMips64CmpS, branch->condition);
1697 }
1698 FPURegister left = i.InputOrZeroSingleRegister(0);
1699 FPURegister right = i.InputOrZeroSingleRegister(1);
1700 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
1701 !__ IsDoubleZeroRegSet()) {
1702 __ Move(kDoubleRegZero, 0.0);
1703 }
1704 __ BranchF32(tlabel, nullptr, cc, left, right);
1705 } else if (instr->arch_opcode() == kMipsCmpD) {
1706 if (!convertCondition(branch->condition, cc)) {
1707 UNSUPPORTED_COND(kMips64CmpD, branch->condition);
1708 }
1709 FPURegister left = i.InputOrZeroDoubleRegister(0);
1710 FPURegister right = i.InputOrZeroDoubleRegister(1);
1711 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
1712 !__ IsDoubleZeroRegSet()) {
1713 __ Move(kDoubleRegZero, 0.0);
1714 }
1715 __ BranchF64(tlabel, nullptr, cc, left, right);
1716 } else {
1717 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
1718 instr->arch_opcode());
1719 UNIMPLEMENTED();
1720 }
1721 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
1722 }
1723
1724
AssembleArchJump(RpoNumber target)1725 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1726 if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
1727 }
1728
1729
1730 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)1731 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1732 FlagsCondition condition) {
1733 MipsOperandConverter i(this, instr);
1734 Label done;
1735
1736 // Materialize a full 32-bit 1 or 0 value. The result register is always the
1737 // last output of the instruction.
1738 Label false_value;
1739 DCHECK_NE(0u, instr->OutputCount());
1740 Register result = i.OutputRegister(instr->OutputCount() - 1);
1741 Condition cc = kNoCondition;
1742 // MIPS does not have condition code flags, so compare and branch are
1743 // implemented differently than on the other arch's. The compare operations
1744 // emit mips psuedo-instructions, which are checked and handled here.
1745
1746 if (instr->arch_opcode() == kMipsTst) {
1747 cc = FlagsConditionToConditionTst(condition);
1748 if (instr->InputAt(1)->IsImmediate() &&
1749 base::bits::IsPowerOfTwo32(i.InputOperand(1).immediate())) {
1750 uint16_t pos =
1751 base::bits::CountTrailingZeros32(i.InputOperand(1).immediate());
1752 __ Ext(result, i.InputRegister(0), pos, 1);
1753 } else {
1754 __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
1755 __ Sltu(result, zero_reg, kScratchReg);
1756 }
1757 if (cc == eq) {
1758 // Sltu produces 0 for equality, invert the result.
1759 __ xori(result, result, 1);
1760 }
1761 return;
1762 } else if (instr->arch_opcode() == kMipsAddOvf ||
1763 instr->arch_opcode() == kMipsSubOvf ||
1764 instr->arch_opcode() == kMipsMulOvf) {
1765 Label flabel, tlabel;
1766 switch (instr->arch_opcode()) {
1767 case kMipsAddOvf:
1768 __ AddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
1769 i.InputOperand(1), &flabel);
1770
1771 break;
1772 case kMipsSubOvf:
1773 __ SubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
1774 i.InputOperand(1), &flabel);
1775 break;
1776 case kMipsMulOvf:
1777 __ MulBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
1778 i.InputOperand(1), &flabel);
1779 break;
1780 default:
1781 UNREACHABLE();
1782 break;
1783 }
1784 __ li(result, 1);
1785 __ Branch(&tlabel);
1786 __ bind(&flabel);
1787 __ li(result, 0);
1788 __ bind(&tlabel);
1789 } else if (instr->arch_opcode() == kMipsCmp) {
1790 cc = FlagsConditionToConditionCmp(condition);
1791 switch (cc) {
1792 case eq:
1793 case ne: {
1794 Register left = i.InputRegister(0);
1795 Operand right = i.InputOperand(1);
1796 Register select;
1797 if (instr->InputAt(1)->IsImmediate() && right.immediate() == 0) {
1798 // Pass left operand if right is zero.
1799 select = left;
1800 } else {
1801 __ Subu(kScratchReg, left, right);
1802 select = kScratchReg;
1803 }
1804 __ Sltu(result, zero_reg, select);
1805 if (cc == eq) {
1806 // Sltu produces 0 for equality, invert the result.
1807 __ xori(result, result, 1);
1808 }
1809 } break;
1810 case lt:
1811 case ge: {
1812 Register left = i.InputRegister(0);
1813 Operand right = i.InputOperand(1);
1814 __ Slt(result, left, right);
1815 if (cc == ge) {
1816 __ xori(result, result, 1);
1817 }
1818 } break;
1819 case gt:
1820 case le: {
1821 Register left = i.InputRegister(1);
1822 Operand right = i.InputOperand(0);
1823 __ Slt(result, left, right);
1824 if (cc == le) {
1825 __ xori(result, result, 1);
1826 }
1827 } break;
1828 case lo:
1829 case hs: {
1830 Register left = i.InputRegister(0);
1831 Operand right = i.InputOperand(1);
1832 __ Sltu(result, left, right);
1833 if (cc == hs) {
1834 __ xori(result, result, 1);
1835 }
1836 } break;
1837 case hi:
1838 case ls: {
1839 Register left = i.InputRegister(1);
1840 Operand right = i.InputOperand(0);
1841 __ Sltu(result, left, right);
1842 if (cc == ls) {
1843 __ xori(result, result, 1);
1844 }
1845 } break;
1846 default:
1847 UNREACHABLE();
1848 }
1849 return;
1850 } else if (instr->arch_opcode() == kMipsCmpD ||
1851 instr->arch_opcode() == kMipsCmpS) {
1852 FPURegister left = i.InputOrZeroDoubleRegister(0);
1853 FPURegister right = i.InputOrZeroDoubleRegister(1);
1854 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
1855 !__ IsDoubleZeroRegSet()) {
1856 __ Move(kDoubleRegZero, 0.0);
1857 }
1858 bool predicate;
1859 FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
1860 if (!IsMipsArchVariant(kMips32r6)) {
1861 __ li(result, Operand(1));
1862 if (instr->arch_opcode() == kMipsCmpD) {
1863 __ c(cc, D, left, right);
1864 } else {
1865 DCHECK(instr->arch_opcode() == kMipsCmpS);
1866 __ c(cc, S, left, right);
1867 }
1868 if (predicate) {
1869 __ Movf(result, zero_reg);
1870 } else {
1871 __ Movt(result, zero_reg);
1872 }
1873 } else {
1874 if (instr->arch_opcode() == kMipsCmpD) {
1875 __ cmp(cc, L, kDoubleCompareReg, left, right);
1876 } else {
1877 DCHECK(instr->arch_opcode() == kMipsCmpS);
1878 __ cmp(cc, W, kDoubleCompareReg, left, right);
1879 }
1880 __ mfc1(result, kDoubleCompareReg);
1881 __ andi(result, result, 1); // Cmp returns all 1's/0's, use only LSB.
1882 if (!predicate) // Toggle result for not equal.
1883 __ xori(result, result, 1);
1884 }
1885 return;
1886 } else {
1887 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
1888 instr->arch_opcode());
1889 TRACE_UNIMPL();
1890 UNIMPLEMENTED();
1891 }
1892 }
1893
1894
AssembleArchLookupSwitch(Instruction * instr)1895 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1896 MipsOperandConverter i(this, instr);
1897 Register input = i.InputRegister(0);
1898 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1899 __ li(at, Operand(i.InputInt32(index + 0)));
1900 __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
1901 }
1902 __ nop(); // Branch delay slot of the last beq.
1903 AssembleArchJump(i.InputRpo(1));
1904 }
1905
1906
AssembleArchTableSwitch(Instruction * instr)1907 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1908 MipsOperandConverter i(this, instr);
1909 Register input = i.InputRegister(0);
1910 size_t const case_count = instr->InputCount() - 2;
1911 __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
1912 __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
1913 return GetLabel(i.InputRpo(index + 2));
1914 });
1915 }
1916
AssembleDeoptimizerCall(int deoptimization_id,Deoptimizer::BailoutType bailout_type,SourcePosition pos)1917 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
1918 int deoptimization_id, Deoptimizer::BailoutType bailout_type,
1919 SourcePosition pos) {
1920 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1921 isolate(), deoptimization_id, bailout_type);
1922 if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
1923 DeoptimizeReason deoptimization_reason =
1924 GetDeoptimizationReason(deoptimization_id);
1925 __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
1926 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1927 return kSuccess;
1928 }
1929
FinishFrame(Frame * frame)1930 void CodeGenerator::FinishFrame(Frame* frame) {
1931 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1932
1933 const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
1934 if (saves_fpu != 0) {
1935 frame->AlignSavedCalleeRegisterSlots();
1936 }
1937
1938 if (saves_fpu != 0) {
1939 int count = base::bits::CountPopulation32(saves_fpu);
1940 DCHECK(kNumCalleeSavedFPU == count);
1941 frame->AllocateSavedCalleeRegisterSlots(count *
1942 (kDoubleSize / kPointerSize));
1943 }
1944
1945 const RegList saves = descriptor->CalleeSavedRegisters();
1946 if (saves != 0) {
1947 int count = base::bits::CountPopulation32(saves);
1948 DCHECK(kNumCalleeSaved == count + 1);
1949 frame->AllocateSavedCalleeRegisterSlots(count);
1950 }
1951 }
1952
AssembleConstructFrame()1953 void CodeGenerator::AssembleConstructFrame() {
1954 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1955 if (frame_access_state()->has_frame()) {
1956 if (descriptor->IsCFunctionCall()) {
1957 __ Push(ra, fp);
1958 __ mov(fp, sp);
1959 } else if (descriptor->IsJSFunctionCall()) {
1960 __ Prologue(this->info()->GeneratePreagedPrologue());
1961 if (descriptor->PushArgumentCount()) {
1962 __ Push(kJavaScriptCallArgCountRegister);
1963 }
1964 } else {
1965 __ StubPrologue(info()->GetOutputStackFrameType());
1966 }
1967 }
1968
1969 int shrink_slots =
1970 frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
1971
1972 if (info()->is_osr()) {
1973 // TurboFan OSR-compiled functions cannot be entered directly.
1974 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1975
1976 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1977 // frame is still on the stack. Optimized code uses OSR values directly from
1978 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1979 // remaining stack slots.
1980 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1981 osr_pc_offset_ = __ pc_offset();
1982 shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
1983 }
1984
1985 const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
1986 if (shrink_slots > 0) {
1987 __ Subu(sp, sp, Operand(shrink_slots * kPointerSize));
1988 }
1989
1990 // Save callee-saved FPU registers.
1991 if (saves_fpu != 0) {
1992 __ MultiPushFPU(saves_fpu);
1993 }
1994
1995 const RegList saves = descriptor->CalleeSavedRegisters();
1996 if (saves != 0) {
1997 // Save callee-saved registers.
1998 __ MultiPush(saves);
1999 DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
2000 }
2001 }
2002
AssembleReturn(InstructionOperand * pop)2003 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
2004 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2005 int pop_count = static_cast<int>(descriptor->StackParameterCount());
2006
2007 // Restore GP registers.
2008 const RegList saves = descriptor->CalleeSavedRegisters();
2009 if (saves != 0) {
2010 __ MultiPop(saves);
2011 }
2012
2013 // Restore FPU registers.
2014 const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
2015 if (saves_fpu != 0) {
2016 __ MultiPopFPU(saves_fpu);
2017 }
2018
2019 MipsOperandConverter g(this, nullptr);
2020 if (descriptor->IsCFunctionCall()) {
2021 AssembleDeconstructFrame();
2022 } else if (frame_access_state()->has_frame()) {
2023 // Canonicalize JSFunction return sites for now unless they have an variable
2024 // number of stack slot pops.
2025 if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
2026 if (return_label_.is_bound()) {
2027 __ Branch(&return_label_);
2028 return;
2029 } else {
2030 __ bind(&return_label_);
2031 AssembleDeconstructFrame();
2032 }
2033 } else {
2034 AssembleDeconstructFrame();
2035 }
2036 }
2037 if (pop->IsImmediate()) {
2038 DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
2039 pop_count += g.ToConstant(pop).ToInt32();
2040 } else {
2041 Register pop_reg = g.ToRegister(pop);
2042 __ sll(pop_reg, pop_reg, kPointerSizeLog2);
2043 __ Addu(sp, sp, Operand(pop_reg));
2044 }
2045 if (pop_count != 0) {
2046 __ DropAndRet(pop_count);
2047 } else {
2048 __ Ret();
2049 }
2050 }
2051
2052
AssembleMove(InstructionOperand * source,InstructionOperand * destination)2053 void CodeGenerator::AssembleMove(InstructionOperand* source,
2054 InstructionOperand* destination) {
2055 MipsOperandConverter g(this, nullptr);
2056 // Dispatch on the source and destination operand kinds. Not all
2057 // combinations are possible.
2058 if (source->IsRegister()) {
2059 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2060 Register src = g.ToRegister(source);
2061 if (destination->IsRegister()) {
2062 __ mov(g.ToRegister(destination), src);
2063 } else {
2064 __ sw(src, g.ToMemOperand(destination));
2065 }
2066 } else if (source->IsStackSlot()) {
2067 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2068 MemOperand src = g.ToMemOperand(source);
2069 if (destination->IsRegister()) {
2070 __ lw(g.ToRegister(destination), src);
2071 } else {
2072 Register temp = kScratchReg;
2073 __ lw(temp, src);
2074 __ sw(temp, g.ToMemOperand(destination));
2075 }
2076 } else if (source->IsConstant()) {
2077 Constant src = g.ToConstant(source);
2078 if (destination->IsRegister() || destination->IsStackSlot()) {
2079 Register dst =
2080 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
2081 switch (src.type()) {
2082 case Constant::kInt32:
2083 if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
2084 src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
2085 src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
2086 __ li(dst, Operand(src.ToInt32(), src.rmode()));
2087 } else {
2088 __ li(dst, Operand(src.ToInt32()));
2089 }
2090 break;
2091 case Constant::kFloat32:
2092 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
2093 break;
2094 case Constant::kInt64:
2095 UNREACHABLE();
2096 break;
2097 case Constant::kFloat64:
2098 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
2099 break;
2100 case Constant::kExternalReference:
2101 __ li(dst, Operand(src.ToExternalReference()));
2102 break;
2103 case Constant::kHeapObject: {
2104 Handle<HeapObject> src_object = src.ToHeapObject();
2105 Heap::RootListIndex index;
2106 if (IsMaterializableFromRoot(src_object, &index)) {
2107 __ LoadRoot(dst, index);
2108 } else {
2109 __ li(dst, src_object);
2110 }
2111 break;
2112 }
2113 case Constant::kRpoNumber:
2114 UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
2115 break;
2116 }
2117 if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
2118 } else if (src.type() == Constant::kFloat32) {
2119 if (destination->IsFPStackSlot()) {
2120 MemOperand dst = g.ToMemOperand(destination);
2121 if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
2122 __ sw(zero_reg, dst);
2123 } else {
2124 __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
2125 __ sw(at, dst);
2126 }
2127 } else {
2128 DCHECK(destination->IsFPRegister());
2129 FloatRegister dst = g.ToSingleRegister(destination);
2130 __ Move(dst, src.ToFloat32());
2131 }
2132 } else {
2133 DCHECK_EQ(Constant::kFloat64, src.type());
2134 DoubleRegister dst = destination->IsFPRegister()
2135 ? g.ToDoubleRegister(destination)
2136 : kScratchDoubleReg;
2137 __ Move(dst, src.ToFloat64());
2138 if (destination->IsFPStackSlot()) {
2139 __ sdc1(dst, g.ToMemOperand(destination));
2140 }
2141 }
2142 } else if (source->IsFPRegister()) {
2143 FPURegister src = g.ToDoubleRegister(source);
2144 if (destination->IsFPRegister()) {
2145 FPURegister dst = g.ToDoubleRegister(destination);
2146 __ Move(dst, src);
2147 } else {
2148 DCHECK(destination->IsFPStackSlot());
2149 MachineRepresentation rep =
2150 LocationOperand::cast(source)->representation();
2151 if (rep == MachineRepresentation::kFloat64) {
2152 __ sdc1(src, g.ToMemOperand(destination));
2153 } else if (rep == MachineRepresentation::kFloat32) {
2154 __ swc1(src, g.ToMemOperand(destination));
2155 } else {
2156 DCHECK_EQ(MachineRepresentation::kSimd128, rep);
2157 UNREACHABLE();
2158 }
2159 }
2160 } else if (source->IsFPStackSlot()) {
2161 DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
2162 MemOperand src = g.ToMemOperand(source);
2163 MachineRepresentation rep = LocationOperand::cast(source)->representation();
2164 if (destination->IsFPRegister()) {
2165 if (rep == MachineRepresentation::kFloat64) {
2166 __ ldc1(g.ToDoubleRegister(destination), src);
2167 } else if (rep == MachineRepresentation::kFloat32) {
2168 __ lwc1(g.ToDoubleRegister(destination), src);
2169 } else {
2170 DCHECK_EQ(MachineRepresentation::kSimd128, rep);
2171 UNREACHABLE();
2172 }
2173 } else {
2174 FPURegister temp = kScratchDoubleReg;
2175 if (rep == MachineRepresentation::kFloat64) {
2176 __ ldc1(temp, src);
2177 __ sdc1(temp, g.ToMemOperand(destination));
2178 } else if (rep == MachineRepresentation::kFloat32) {
2179 __ lwc1(temp, src);
2180 __ swc1(temp, g.ToMemOperand(destination));
2181 } else {
2182 DCHECK_EQ(MachineRepresentation::kSimd128, rep);
2183 UNREACHABLE();
2184 }
2185 }
2186 } else {
2187 UNREACHABLE();
2188 }
2189 }
2190
2191
AssembleSwap(InstructionOperand * source,InstructionOperand * destination)2192 void CodeGenerator::AssembleSwap(InstructionOperand* source,
2193 InstructionOperand* destination) {
2194 MipsOperandConverter g(this, nullptr);
2195 // Dispatch on the source and destination operand kinds. Not all
2196 // combinations are possible.
2197 if (source->IsRegister()) {
2198 // Register-register.
2199 Register temp = kScratchReg;
2200 Register src = g.ToRegister(source);
2201 if (destination->IsRegister()) {
2202 Register dst = g.ToRegister(destination);
2203 __ Move(temp, src);
2204 __ Move(src, dst);
2205 __ Move(dst, temp);
2206 } else {
2207 DCHECK(destination->IsStackSlot());
2208 MemOperand dst = g.ToMemOperand(destination);
2209 __ mov(temp, src);
2210 __ lw(src, dst);
2211 __ sw(temp, dst);
2212 }
2213 } else if (source->IsStackSlot()) {
2214 DCHECK(destination->IsStackSlot());
2215 Register temp_0 = kScratchReg;
2216 Register temp_1 = kCompareReg;
2217 MemOperand src = g.ToMemOperand(source);
2218 MemOperand dst = g.ToMemOperand(destination);
2219 __ lw(temp_0, src);
2220 __ lw(temp_1, dst);
2221 __ sw(temp_0, dst);
2222 __ sw(temp_1, src);
2223 } else if (source->IsFPRegister()) {
2224 FPURegister temp = kScratchDoubleReg;
2225 FPURegister src = g.ToDoubleRegister(source);
2226 if (destination->IsFPRegister()) {
2227 FPURegister dst = g.ToDoubleRegister(destination);
2228 __ Move(temp, src);
2229 __ Move(src, dst);
2230 __ Move(dst, temp);
2231 } else {
2232 DCHECK(destination->IsFPStackSlot());
2233 MemOperand dst = g.ToMemOperand(destination);
2234 MachineRepresentation rep =
2235 LocationOperand::cast(source)->representation();
2236 if (rep == MachineRepresentation::kFloat64) {
2237 __ Move(temp, src);
2238 __ ldc1(src, dst);
2239 __ sdc1(temp, dst);
2240 } else if (rep == MachineRepresentation::kFloat32) {
2241 __ Move(temp, src);
2242 __ lwc1(src, dst);
2243 __ swc1(temp, dst);
2244 } else {
2245 DCHECK_EQ(MachineRepresentation::kSimd128, rep);
2246 UNREACHABLE();
2247 }
2248 }
2249 } else if (source->IsFPStackSlot()) {
2250 DCHECK(destination->IsFPStackSlot());
2251 Register temp_0 = kScratchReg;
2252 FPURegister temp_1 = kScratchDoubleReg;
2253 MemOperand src0 = g.ToMemOperand(source);
2254 MemOperand dst0 = g.ToMemOperand(destination);
2255 MachineRepresentation rep = LocationOperand::cast(source)->representation();
2256 if (rep == MachineRepresentation::kFloat64) {
2257 MemOperand src1(src0.rm(), src0.offset() + kIntSize);
2258 MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
2259 __ ldc1(temp_1, dst0); // Save destination in temp_1.
2260 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
2261 __ sw(temp_0, dst0);
2262 __ lw(temp_0, src1);
2263 __ sw(temp_0, dst1);
2264 __ sdc1(temp_1, src0);
2265 } else if (rep == MachineRepresentation::kFloat32) {
2266 __ lwc1(temp_1, dst0); // Save destination in temp_1.
2267 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
2268 __ sw(temp_0, dst0);
2269 __ swc1(temp_1, src0);
2270 } else {
2271 DCHECK_EQ(MachineRepresentation::kSimd128, rep);
2272 UNREACHABLE();
2273 }
2274 } else {
2275 // No other combinations are possible.
2276 UNREACHABLE();
2277 }
2278 }
2279
2280
AssembleJumpTable(Label ** targets,size_t target_count)2281 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2282 // On 32-bit MIPS we emit the jump tables inline.
2283 UNREACHABLE();
2284 }
2285
2286
EnsureSpaceForLazyDeopt()2287 void CodeGenerator::EnsureSpaceForLazyDeopt() {
2288 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2289 return;
2290 }
2291
2292 int space_needed = Deoptimizer::patch_size();
2293 // Ensure that we have enough space after the previous lazy-bailout
2294 // instruction for patching the code here.
2295 int current_pc = masm()->pc_offset();
2296 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
2297 // Block tramoline pool emission for duration of padding.
2298 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
2299 masm());
2300 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2301 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
2302 while (padding_size > 0) {
2303 __ nop();
2304 padding_size -= v8::internal::Assembler::kInstrSize;
2305 }
2306 }
2307 }
2308
2309 #undef __
2310
2311 } // namespace compiler
2312 } // namespace internal
2313 } // namespace v8
2314