1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/ast/scopes.h"
6 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/osr.h"
11 #include "src/mips/macro-assembler-mips.h"
12
13 namespace v8 {
14 namespace internal {
15 namespace compiler {
16
17 #define __ masm()->
18
19
20 // TODO(plind): Possibly avoid using these lithium names.
21 #define kScratchReg kLithiumScratchReg
22 #define kCompareReg kLithiumScratchReg2
23 #define kScratchReg2 kLithiumScratchReg2
24 #define kScratchDoubleReg kLithiumScratchDouble
25
26
27 // TODO(plind): consider renaming these macros.
28 #define TRACE_MSG(msg) \
29 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
30 __LINE__)
31
32 #define TRACE_UNIMPL() \
33 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
34 __LINE__)
35
36
37 // Adds Mips-specific methods to convert InstructionOperands.
38 class MipsOperandConverter final : public InstructionOperandConverter {
39 public:
MipsOperandConverter(CodeGenerator * gen,Instruction * instr)40 MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
41 : InstructionOperandConverter(gen, instr) {}
42
OutputSingleRegister(size_t index=0)43 FloatRegister OutputSingleRegister(size_t index = 0) {
44 return ToSingleRegister(instr_->OutputAt(index));
45 }
46
InputSingleRegister(size_t index)47 FloatRegister InputSingleRegister(size_t index) {
48 return ToSingleRegister(instr_->InputAt(index));
49 }
50
ToSingleRegister(InstructionOperand * op)51 FloatRegister ToSingleRegister(InstructionOperand* op) {
52 // Single (Float) and Double register namespace is same on MIPS,
53 // both are typedefs of FPURegister.
54 return ToDoubleRegister(op);
55 }
56
InputOrZeroDoubleRegister(size_t index)57 DoubleRegister InputOrZeroDoubleRegister(size_t index) {
58 if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
59
60 return InputDoubleRegister(index);
61 }
62
InputOrZeroSingleRegister(size_t index)63 DoubleRegister InputOrZeroSingleRegister(size_t index) {
64 if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
65
66 return InputSingleRegister(index);
67 }
68
InputImmediate(size_t index)69 Operand InputImmediate(size_t index) {
70 Constant constant = ToConstant(instr_->InputAt(index));
71 switch (constant.type()) {
72 case Constant::kInt32:
73 return Operand(constant.ToInt32());
74 case Constant::kFloat32:
75 return Operand(
76 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
77 case Constant::kFloat64:
78 return Operand(
79 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
80 case Constant::kInt64:
81 case Constant::kExternalReference:
82 case Constant::kHeapObject:
83 // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
84 // maybe not done on arm due to const pool ??
85 break;
86 case Constant::kRpoNumber:
87 UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
88 break;
89 }
90 UNREACHABLE();
91 return Operand(zero_reg);
92 }
93
InputOperand(size_t index)94 Operand InputOperand(size_t index) {
95 InstructionOperand* op = instr_->InputAt(index);
96 if (op->IsRegister()) {
97 return Operand(ToRegister(op));
98 }
99 return InputImmediate(index);
100 }
101
MemoryOperand(size_t * first_index)102 MemOperand MemoryOperand(size_t* first_index) {
103 const size_t index = *first_index;
104 switch (AddressingModeField::decode(instr_->opcode())) {
105 case kMode_None:
106 break;
107 case kMode_MRI:
108 *first_index += 2;
109 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
110 case kMode_MRR:
111 // TODO(plind): r6 address mode, to be implemented ...
112 UNREACHABLE();
113 }
114 UNREACHABLE();
115 return MemOperand(no_reg);
116 }
117
MemoryOperand(size_t index=0)118 MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
119
ToMemOperand(InstructionOperand * op) const120 MemOperand ToMemOperand(InstructionOperand* op) const {
121 DCHECK_NOT_NULL(op);
122 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
123 FrameOffset offset = frame_access_state()->GetFrameOffset(
124 AllocatedOperand::cast(op)->index());
125 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
126 }
127 };
128
129
HasRegisterInput(Instruction * instr,size_t index)130 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
131 return instr->InputAt(index)->IsRegister();
132 }
133
134
135 namespace {
136
137 class OutOfLineLoadSingle final : public OutOfLineCode {
138 public:
OutOfLineLoadSingle(CodeGenerator * gen,FloatRegister result)139 OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
140 : OutOfLineCode(gen), result_(result) {}
141
Generate()142 void Generate() final {
143 __ Move(result_, std::numeric_limits<float>::quiet_NaN());
144 }
145
146 private:
147 FloatRegister const result_;
148 };
149
150
151 class OutOfLineLoadDouble final : public OutOfLineCode {
152 public:
OutOfLineLoadDouble(CodeGenerator * gen,DoubleRegister result)153 OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
154 : OutOfLineCode(gen), result_(result) {}
155
Generate()156 void Generate() final {
157 __ Move(result_, std::numeric_limits<double>::quiet_NaN());
158 }
159
160 private:
161 DoubleRegister const result_;
162 };
163
164
165 class OutOfLineLoadInteger final : public OutOfLineCode {
166 public:
OutOfLineLoadInteger(CodeGenerator * gen,Register result)167 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
168 : OutOfLineCode(gen), result_(result) {}
169
Generate()170 void Generate() final { __ mov(result_, zero_reg); }
171
172 private:
173 Register const result_;
174 };
175
176
177 class OutOfLineRound : public OutOfLineCode {
178 public:
OutOfLineRound(CodeGenerator * gen,DoubleRegister result)179 OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
180 : OutOfLineCode(gen), result_(result) {}
181
Generate()182 void Generate() final {
183 // Handle rounding to zero case where sign has to be preserved.
184 // High bits of double input already in kScratchReg.
185 __ srl(at, kScratchReg, 31);
186 __ sll(at, at, 31);
187 __ Mthc1(at, result_);
188 }
189
190 private:
191 DoubleRegister const result_;
192 };
193
194
195 class OutOfLineRound32 : public OutOfLineCode {
196 public:
OutOfLineRound32(CodeGenerator * gen,DoubleRegister result)197 OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
198 : OutOfLineCode(gen), result_(result) {}
199
Generate()200 void Generate() final {
201 // Handle rounding to zero case where sign has to be preserved.
202 // High bits of float input already in kScratchReg.
203 __ srl(at, kScratchReg, 31);
204 __ sll(at, at, 31);
205 __ mtc1(at, result_);
206 }
207
208 private:
209 DoubleRegister const result_;
210 };
211
212
213 class OutOfLineRecordWrite final : public OutOfLineCode {
214 public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register index,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)215 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
216 Register value, Register scratch0, Register scratch1,
217 RecordWriteMode mode)
218 : OutOfLineCode(gen),
219 object_(object),
220 index_(index),
221 value_(value),
222 scratch0_(scratch0),
223 scratch1_(scratch1),
224 mode_(mode) {}
225
Generate()226 void Generate() final {
227 if (mode_ > RecordWriteMode::kValueIsPointer) {
228 __ JumpIfSmi(value_, exit());
229 }
230 if (mode_ > RecordWriteMode::kValueIsMap) {
231 __ CheckPageFlag(value_, scratch0_,
232 MemoryChunk::kPointersToHereAreInterestingMask, eq,
233 exit());
234 }
235 SaveFPRegsMode const save_fp_mode =
236 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
237 // TODO(turbofan): Once we get frame elision working, we need to save
238 // and restore lr properly here if the frame was elided.
239 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
240 EMIT_REMEMBERED_SET, save_fp_mode);
241 __ Addu(scratch1_, object_, index_);
242 __ CallStub(&stub);
243 }
244
245 private:
246 Register const object_;
247 Register const index_;
248 Register const value_;
249 Register const scratch0_;
250 Register const scratch1_;
251 RecordWriteMode const mode_;
252 };
253
254
FlagsConditionToConditionCmp(FlagsCondition condition)255 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
256 switch (condition) {
257 case kEqual:
258 return eq;
259 case kNotEqual:
260 return ne;
261 case kSignedLessThan:
262 return lt;
263 case kSignedGreaterThanOrEqual:
264 return ge;
265 case kSignedLessThanOrEqual:
266 return le;
267 case kSignedGreaterThan:
268 return gt;
269 case kUnsignedLessThan:
270 return lo;
271 case kUnsignedGreaterThanOrEqual:
272 return hs;
273 case kUnsignedLessThanOrEqual:
274 return ls;
275 case kUnsignedGreaterThan:
276 return hi;
277 case kUnorderedEqual:
278 case kUnorderedNotEqual:
279 break;
280 default:
281 break;
282 }
283 UNREACHABLE();
284 return kNoCondition;
285 }
286
287
FlagsConditionToConditionTst(FlagsCondition condition)288 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
289 switch (condition) {
290 case kNotEqual:
291 return ne;
292 case kEqual:
293 return eq;
294 default:
295 break;
296 }
297 UNREACHABLE();
298 return kNoCondition;
299 }
300
301
FlagsConditionToConditionCmpFPU(bool & predicate,FlagsCondition condition)302 FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
303 FlagsCondition condition) {
304 switch (condition) {
305 case kEqual:
306 predicate = true;
307 return EQ;
308 case kNotEqual:
309 predicate = false;
310 return EQ;
311 case kUnsignedLessThan:
312 predicate = true;
313 return OLT;
314 case kUnsignedGreaterThanOrEqual:
315 predicate = false;
316 return ULT;
317 case kUnsignedLessThanOrEqual:
318 predicate = true;
319 return OLE;
320 case kUnsignedGreaterThan:
321 predicate = false;
322 return ULE;
323 case kUnorderedEqual:
324 case kUnorderedNotEqual:
325 predicate = true;
326 break;
327 default:
328 predicate = true;
329 break;
330 }
331 UNREACHABLE();
332 return kNoFPUCondition;
333 }
334
335 } // namespace
336
337
338 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
339 do { \
340 auto result = i.Output##width##Register(); \
341 auto ool = new (zone()) OutOfLineLoad##width(this, result); \
342 if (instr->InputAt(0)->IsRegister()) { \
343 auto offset = i.InputRegister(0); \
344 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
345 __ addu(kScratchReg, i.InputRegister(2), offset); \
346 __ asm_instr(result, MemOperand(kScratchReg, 0)); \
347 } else { \
348 auto offset = i.InputOperand(0).immediate(); \
349 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
350 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
351 } \
352 __ bind(ool->exit()); \
353 } while (0)
354
355
356 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
357 do { \
358 auto result = i.OutputRegister(); \
359 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
360 if (instr->InputAt(0)->IsRegister()) { \
361 auto offset = i.InputRegister(0); \
362 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
363 __ addu(kScratchReg, i.InputRegister(2), offset); \
364 __ asm_instr(result, MemOperand(kScratchReg, 0)); \
365 } else { \
366 auto offset = i.InputOperand(0).immediate(); \
367 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
368 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
369 } \
370 __ bind(ool->exit()); \
371 } while (0)
372
373
374 #define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
375 do { \
376 Label done; \
377 if (instr->InputAt(0)->IsRegister()) { \
378 auto offset = i.InputRegister(0); \
379 auto value = i.Input##width##Register(2); \
380 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
381 __ addu(kScratchReg, i.InputRegister(3), offset); \
382 __ asm_instr(value, MemOperand(kScratchReg, 0)); \
383 } else { \
384 auto offset = i.InputOperand(0).immediate(); \
385 auto value = i.Input##width##Register(2); \
386 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
387 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
388 } \
389 __ bind(&done); \
390 } while (0)
391
392
393 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
394 do { \
395 Label done; \
396 if (instr->InputAt(0)->IsRegister()) { \
397 auto offset = i.InputRegister(0); \
398 auto value = i.InputRegister(2); \
399 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
400 __ addu(kScratchReg, i.InputRegister(3), offset); \
401 __ asm_instr(value, MemOperand(kScratchReg, 0)); \
402 } else { \
403 auto offset = i.InputOperand(0).immediate(); \
404 auto value = i.InputRegister(2); \
405 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
406 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
407 } \
408 __ bind(&done); \
409 } while (0)
410
411
412 #define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
413 if (IsMipsArchVariant(kMips32r6)) { \
414 __ cfc1(kScratchReg, FCSR); \
415 __ li(at, Operand(mode_##mode)); \
416 __ ctc1(at, FCSR); \
417 __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
418 __ ctc1(kScratchReg, FCSR); \
419 } else { \
420 auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister()); \
421 Label done; \
422 __ Mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
423 __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
424 HeapNumber::kExponentBits); \
425 __ Branch(USE_DELAY_SLOT, &done, hs, at, \
426 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
427 __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
428 __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
429 __ Move(at, kScratchReg2, i.OutputDoubleRegister()); \
430 __ or_(at, at, kScratchReg2); \
431 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
432 __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
433 __ bind(ool->exit()); \
434 __ bind(&done); \
435 }
436
437
438 #define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode) \
439 if (IsMipsArchVariant(kMips32r6)) { \
440 __ cfc1(kScratchReg, FCSR); \
441 __ li(at, Operand(mode_##mode)); \
442 __ ctc1(at, FCSR); \
443 __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
444 __ ctc1(kScratchReg, FCSR); \
445 } else { \
446 int32_t kFloat32ExponentBias = 127; \
447 int32_t kFloat32MantissaBits = 23; \
448 int32_t kFloat32ExponentBits = 8; \
449 auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
450 Label done; \
451 __ mfc1(kScratchReg, i.InputDoubleRegister(0)); \
452 __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits); \
453 __ Branch(USE_DELAY_SLOT, &done, hs, at, \
454 Operand(kFloat32ExponentBias + kFloat32MantissaBits)); \
455 __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
456 __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
457 __ mfc1(at, i.OutputDoubleRegister()); \
458 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
459 __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
460 __ bind(ool->exit()); \
461 __ bind(&done); \
462 }
463
AssembleDeconstructActivationRecord(int stack_param_delta)464 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
465 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
466 if (sp_slot_delta > 0) {
467 __ addiu(sp, sp, sp_slot_delta * kPointerSize);
468 }
469 frame_access_state()->SetFrameAccessToDefault();
470 }
471
472
AssemblePrepareTailCall(int stack_param_delta)473 void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
474 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
475 if (sp_slot_delta < 0) {
476 __ Subu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
477 frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
478 }
479 if (frame()->needs_frame()) {
480 __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
481 __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
482 }
483 frame_access_state()->SetFrameAccessToSP();
484 }
485
486
487 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)488 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
489 MipsOperandConverter i(this, instr);
490 InstructionCode opcode = instr->opcode();
491
492 switch (ArchOpcodeField::decode(opcode)) {
493 case kArchCallCodeObject: {
494 EnsureSpaceForLazyDeopt();
495 if (instr->InputAt(0)->IsImmediate()) {
496 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
497 RelocInfo::CODE_TARGET);
498 } else {
499 __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
500 __ Call(at);
501 }
502 RecordCallPosition(instr);
503 frame_access_state()->ClearSPDelta();
504 break;
505 }
506 case kArchTailCallCodeObject: {
507 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
508 AssembleDeconstructActivationRecord(stack_param_delta);
509 if (instr->InputAt(0)->IsImmediate()) {
510 __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
511 RelocInfo::CODE_TARGET);
512 } else {
513 __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
514 __ Jump(at);
515 }
516 frame_access_state()->ClearSPDelta();
517 break;
518 }
519 case kArchCallJSFunction: {
520 EnsureSpaceForLazyDeopt();
521 Register func = i.InputRegister(0);
522 if (FLAG_debug_code) {
523 // Check the function's context matches the context argument.
524 __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
525 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
526 }
527
528 __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
529 __ Call(at);
530 RecordCallPosition(instr);
531 frame_access_state()->ClearSPDelta();
532 break;
533 }
534 case kArchTailCallJSFunction: {
535 Register func = i.InputRegister(0);
536 if (FLAG_debug_code) {
537 // Check the function's context matches the context argument.
538 __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
539 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
540 }
541
542 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
543 AssembleDeconstructActivationRecord(stack_param_delta);
544 __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
545 __ Jump(at);
546 frame_access_state()->ClearSPDelta();
547 break;
548 }
549 case kArchLazyBailout: {
550 EnsureSpaceForLazyDeopt();
551 RecordCallPosition(instr);
552 break;
553 }
554 case kArchPrepareCallCFunction: {
555 int const num_parameters = MiscField::decode(instr->opcode());
556 __ PrepareCallCFunction(num_parameters, kScratchReg);
557 // Frame alignment requires using FP-relative frame addressing.
558 frame_access_state()->SetFrameAccessToFP();
559 break;
560 }
561 case kArchPrepareTailCall:
562 AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
563 break;
564 case kArchCallCFunction: {
565 int const num_parameters = MiscField::decode(instr->opcode());
566 if (instr->InputAt(0)->IsImmediate()) {
567 ExternalReference ref = i.InputExternalReference(0);
568 __ CallCFunction(ref, num_parameters);
569 } else {
570 Register func = i.InputRegister(0);
571 __ CallCFunction(func, num_parameters);
572 }
573 frame_access_state()->SetFrameAccessToDefault();
574 frame_access_state()->ClearSPDelta();
575 break;
576 }
577 case kArchJmp:
578 AssembleArchJump(i.InputRpo(0));
579 break;
580 case kArchLookupSwitch:
581 AssembleArchLookupSwitch(instr);
582 break;
583 case kArchTableSwitch:
584 AssembleArchTableSwitch(instr);
585 break;
586 case kArchNop:
587 case kArchThrowTerminator:
588 // don't emit code for nops.
589 break;
590 case kArchDeoptimize: {
591 int deopt_state_id =
592 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
593 Deoptimizer::BailoutType bailout_type =
594 Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
595 AssembleDeoptimizerCall(deopt_state_id, bailout_type);
596 break;
597 }
598 case kArchRet:
599 AssembleReturn();
600 break;
601 case kArchStackPointer:
602 __ mov(i.OutputRegister(), sp);
603 break;
604 case kArchFramePointer:
605 __ mov(i.OutputRegister(), fp);
606 break;
607 case kArchTruncateDoubleToI:
608 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
609 break;
610 case kArchStoreWithWriteBarrier: {
611 RecordWriteMode mode =
612 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
613 Register object = i.InputRegister(0);
614 Register index = i.InputRegister(1);
615 Register value = i.InputRegister(2);
616 Register scratch0 = i.TempRegister(0);
617 Register scratch1 = i.TempRegister(1);
618 auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
619 scratch0, scratch1, mode);
620 __ Addu(at, object, index);
621 __ sw(value, MemOperand(at));
622 __ CheckPageFlag(object, scratch0,
623 MemoryChunk::kPointersFromHereAreInterestingMask, ne,
624 ool->entry());
625 __ bind(ool->exit());
626 break;
627 }
628 case kMipsAdd:
629 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
630 break;
631 case kMipsAddOvf:
632 // Pseudo-instruction used for overflow/branch. No opcode emitted here.
633 break;
634 case kMipsSub:
635 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
636 break;
637 case kMipsSubOvf:
638 // Pseudo-instruction used for overflow/branch. No opcode emitted here.
639 break;
640 case kMipsMul:
641 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
642 break;
643 case kMipsMulHigh:
644 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
645 break;
646 case kMipsMulHighU:
647 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
648 break;
649 case kMipsDiv:
650 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
651 if (IsMipsArchVariant(kMips32r6)) {
652 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
653 } else {
654 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
655 }
656 break;
657 case kMipsDivU:
658 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
659 if (IsMipsArchVariant(kMips32r6)) {
660 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
661 } else {
662 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
663 }
664 break;
665 case kMipsMod:
666 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
667 break;
668 case kMipsModU:
669 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
670 break;
671 case kMipsAnd:
672 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
673 break;
674 case kMipsOr:
675 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
676 break;
677 case kMipsNor:
678 if (instr->InputAt(1)->IsRegister()) {
679 __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
680 } else {
681 DCHECK(i.InputOperand(1).immediate() == 0);
682 __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
683 }
684 break;
685 case kMipsXor:
686 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
687 break;
688 case kMipsClz:
689 __ Clz(i.OutputRegister(), i.InputRegister(0));
690 break;
691 case kMipsShl:
692 if (instr->InputAt(1)->IsRegister()) {
693 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
694 } else {
695 int32_t imm = i.InputOperand(1).immediate();
696 __ sll(i.OutputRegister(), i.InputRegister(0), imm);
697 }
698 break;
699 case kMipsShr:
700 if (instr->InputAt(1)->IsRegister()) {
701 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
702 } else {
703 int32_t imm = i.InputOperand(1).immediate();
704 __ srl(i.OutputRegister(), i.InputRegister(0), imm);
705 }
706 break;
707 case kMipsSar:
708 if (instr->InputAt(1)->IsRegister()) {
709 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
710 } else {
711 int32_t imm = i.InputOperand(1).immediate();
712 __ sra(i.OutputRegister(), i.InputRegister(0), imm);
713 }
714 break;
715 case kMipsExt:
716 __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
717 i.InputInt8(2));
718 break;
719 case kMipsIns:
720 if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
721 __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
722 } else {
723 __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
724 i.InputInt8(2));
725 }
726 break;
727 case kMipsRor:
728 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
729 break;
730 case kMipsTst:
731 // Pseudo-instruction used for tst/branch. No opcode emitted here.
732 break;
733 case kMipsCmp:
734 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
735 break;
736 case kMipsMov:
737 // TODO(plind): Should we combine mov/li like this, or use separate instr?
738 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
739 if (HasRegisterInput(instr, 0)) {
740 __ mov(i.OutputRegister(), i.InputRegister(0));
741 } else {
742 __ li(i.OutputRegister(), i.InputOperand(0));
743 }
744 break;
745
746 case kMipsCmpS:
747 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
748 break;
749 case kMipsAddS:
750 // TODO(plind): add special case: combine mult & add.
751 __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
752 i.InputDoubleRegister(1));
753 break;
754 case kMipsSubS:
755 __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
756 i.InputDoubleRegister(1));
757 break;
758 case kMipsMulS:
759 // TODO(plind): add special case: right op is -1.0, see arm port.
760 __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
761 i.InputDoubleRegister(1));
762 break;
763 case kMipsDivS:
764 __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
765 i.InputDoubleRegister(1));
766 break;
767 case kMipsModS: {
768 // TODO(bmeurer): We should really get rid of this special instruction,
769 // and generate a CallAddress instruction instead.
770 FrameScope scope(masm(), StackFrame::MANUAL);
771 __ PrepareCallCFunction(0, 2, kScratchReg);
772 __ MovToFloatParameters(i.InputDoubleRegister(0),
773 i.InputDoubleRegister(1));
774 // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
775 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
776 0, 2);
777 // Move the result in the double result register.
778 __ MovFromFloatResult(i.OutputSingleRegister());
779 break;
780 }
781 case kMipsAbsS:
782 __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
783 break;
784 case kMipsSqrtS: {
785 __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
786 break;
787 }
788 case kMipsMaxS:
789 __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
790 i.InputDoubleRegister(1));
791 break;
792 case kMipsMinS:
793 __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
794 i.InputDoubleRegister(1));
795 break;
796 case kMipsCmpD:
797 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
798 break;
799 case kMipsAddD:
800 // TODO(plind): add special case: combine mult & add.
801 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
802 i.InputDoubleRegister(1));
803 break;
804 case kMipsSubD:
805 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
806 i.InputDoubleRegister(1));
807 break;
808 case kMipsMulD:
809 // TODO(plind): add special case: right op is -1.0, see arm port.
810 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
811 i.InputDoubleRegister(1));
812 break;
813 case kMipsDivD:
814 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
815 i.InputDoubleRegister(1));
816 break;
817 case kMipsModD: {
818 // TODO(bmeurer): We should really get rid of this special instruction,
819 // and generate a CallAddress instruction instead.
820 FrameScope scope(masm(), StackFrame::MANUAL);
821 __ PrepareCallCFunction(0, 2, kScratchReg);
822 __ MovToFloatParameters(i.InputDoubleRegister(0),
823 i.InputDoubleRegister(1));
824 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
825 0, 2);
826 // Move the result in the double result register.
827 __ MovFromFloatResult(i.OutputDoubleRegister());
828 break;
829 }
830 case kMipsAbsD:
831 __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
832 break;
833 case kMipsSqrtD: {
834 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
835 break;
836 }
837 case kMipsMaxD:
838 __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
839 i.InputDoubleRegister(1));
840 break;
841 case kMipsMinD:
842 __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
843 i.InputDoubleRegister(1));
844 break;
845 case kMipsFloat64RoundDown: {
846 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
847 break;
848 }
849 case kMipsFloat32RoundDown: {
850 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
851 break;
852 }
853 case kMipsFloat64RoundTruncate: {
854 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
855 break;
856 }
857 case kMipsFloat32RoundTruncate: {
858 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
859 break;
860 }
861 case kMipsFloat64RoundUp: {
862 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
863 break;
864 }
865 case kMipsFloat32RoundUp: {
866 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
867 break;
868 }
869 case kMipsFloat64RoundTiesEven: {
870 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
871 break;
872 }
873 case kMipsFloat32RoundTiesEven: {
874 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
875 break;
876 }
877 case kMipsFloat64Max: {
878 // (b < a) ? a : b
879 if (IsMipsArchVariant(kMips32r6)) {
880 __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
881 i.InputDoubleRegister(0));
882 __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
883 i.InputDoubleRegister(0));
884 } else {
885 __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
886 // Left operand is result, passthrough if false.
887 __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
888 }
889 break;
890 }
891 case kMipsFloat64Min: {
892 // (a < b) ? a : b
893 if (IsMipsArchVariant(kMips32r6)) {
894 __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
895 i.InputDoubleRegister(1));
896 __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
897 i.InputDoubleRegister(0));
898 } else {
899 __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
900 // Right operand is result, passthrough if false.
901 __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
902 }
903 break;
904 }
905 case kMipsFloat32Max: {
906 // (b < a) ? a : b
907 if (IsMipsArchVariant(kMips32r6)) {
908 __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
909 i.InputDoubleRegister(0));
910 __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
911 i.InputDoubleRegister(0));
912 } else {
913 __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
914 // Left operand is result, passthrough if false.
915 __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
916 }
917 break;
918 }
919 case kMipsFloat32Min: {
920 // (a < b) ? a : b
921 if (IsMipsArchVariant(kMips32r6)) {
922 __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
923 i.InputDoubleRegister(1));
924 __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
925 i.InputDoubleRegister(0));
926 } else {
927 __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
928 // Right operand is result, passthrough if false.
929 __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
930 }
931 break;
932 }
933 case kMipsCvtSD: {
934 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
935 break;
936 }
937 case kMipsCvtDS: {
938 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
939 break;
940 }
941 case kMipsCvtDW: {
942 FPURegister scratch = kScratchDoubleReg;
943 __ mtc1(i.InputRegister(0), scratch);
944 __ cvt_d_w(i.OutputDoubleRegister(), scratch);
945 break;
946 }
947 case kMipsCvtSW: {
948 FPURegister scratch = kScratchDoubleReg;
949 __ mtc1(i.InputRegister(0), scratch);
950 __ cvt_s_w(i.OutputDoubleRegister(), scratch);
951 break;
952 }
953 case kMipsCvtDUw: {
954 FPURegister scratch = kScratchDoubleReg;
955 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
956 break;
957 }
958 case kMipsFloorWD: {
959 FPURegister scratch = kScratchDoubleReg;
960 __ floor_w_d(scratch, i.InputDoubleRegister(0));
961 __ mfc1(i.OutputRegister(), scratch);
962 break;
963 }
964 case kMipsCeilWD: {
965 FPURegister scratch = kScratchDoubleReg;
966 __ ceil_w_d(scratch, i.InputDoubleRegister(0));
967 __ mfc1(i.OutputRegister(), scratch);
968 break;
969 }
970 case kMipsRoundWD: {
971 FPURegister scratch = kScratchDoubleReg;
972 __ round_w_d(scratch, i.InputDoubleRegister(0));
973 __ mfc1(i.OutputRegister(), scratch);
974 break;
975 }
976 case kMipsTruncWD: {
977 FPURegister scratch = kScratchDoubleReg;
978 // Other arches use round to zero here, so we follow.
979 __ trunc_w_d(scratch, i.InputDoubleRegister(0));
980 __ mfc1(i.OutputRegister(), scratch);
981 break;
982 }
983 case kMipsFloorWS: {
984 FPURegister scratch = kScratchDoubleReg;
985 __ floor_w_s(scratch, i.InputDoubleRegister(0));
986 __ mfc1(i.OutputRegister(), scratch);
987 break;
988 }
989 case kMipsCeilWS: {
990 FPURegister scratch = kScratchDoubleReg;
991 __ ceil_w_s(scratch, i.InputDoubleRegister(0));
992 __ mfc1(i.OutputRegister(), scratch);
993 break;
994 }
995 case kMipsRoundWS: {
996 FPURegister scratch = kScratchDoubleReg;
997 __ round_w_s(scratch, i.InputDoubleRegister(0));
998 __ mfc1(i.OutputRegister(), scratch);
999 break;
1000 }
1001 case kMipsTruncWS: {
1002 FPURegister scratch = kScratchDoubleReg;
1003 __ trunc_w_s(scratch, i.InputDoubleRegister(0));
1004 __ mfc1(i.OutputRegister(), scratch);
1005 break;
1006 }
1007 case kMipsTruncUwD: {
1008 FPURegister scratch = kScratchDoubleReg;
1009 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
1010 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
1011 break;
1012 }
1013 case kMipsFloat64ExtractLowWord32:
1014 __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
1015 break;
1016 case kMipsFloat64ExtractHighWord32:
1017 __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
1018 break;
1019 case kMipsFloat64InsertLowWord32:
1020 __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
1021 break;
1022 case kMipsFloat64InsertHighWord32:
1023 __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
1024 break;
1025 // ... more basic instructions ...
1026
1027 case kMipsLbu:
1028 __ lbu(i.OutputRegister(), i.MemoryOperand());
1029 break;
1030 case kMipsLb:
1031 __ lb(i.OutputRegister(), i.MemoryOperand());
1032 break;
1033 case kMipsSb:
1034 __ sb(i.InputRegister(2), i.MemoryOperand());
1035 break;
1036 case kMipsLhu:
1037 __ lhu(i.OutputRegister(), i.MemoryOperand());
1038 break;
1039 case kMipsLh:
1040 __ lh(i.OutputRegister(), i.MemoryOperand());
1041 break;
1042 case kMipsSh:
1043 __ sh(i.InputRegister(2), i.MemoryOperand());
1044 break;
1045 case kMipsLw:
1046 __ lw(i.OutputRegister(), i.MemoryOperand());
1047 break;
1048 case kMipsSw:
1049 __ sw(i.InputRegister(2), i.MemoryOperand());
1050 break;
1051 case kMipsLwc1: {
1052 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
1053 break;
1054 }
1055 case kMipsSwc1: {
1056 size_t index = 0;
1057 MemOperand operand = i.MemoryOperand(&index);
1058 __ swc1(i.InputSingleRegister(index), operand);
1059 break;
1060 }
1061 case kMipsLdc1:
1062 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
1063 break;
1064 case kMipsSdc1:
1065 __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
1066 break;
1067 case kMipsPush:
1068 if (instr->InputAt(0)->IsDoubleRegister()) {
1069 __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1070 __ Subu(sp, sp, Operand(kDoubleSize));
1071 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1072 } else {
1073 __ Push(i.InputRegister(0));
1074 frame_access_state()->IncreaseSPDelta(1);
1075 }
1076 break;
1077 case kMipsStackClaim: {
1078 __ Subu(sp, sp, Operand(i.InputInt32(0)));
1079 frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
1080 break;
1081 }
1082 case kMipsStoreToStackSlot: {
1083 if (instr->InputAt(0)->IsDoubleRegister()) {
1084 __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
1085 } else {
1086 __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
1087 }
1088 break;
1089 }
1090 case kCheckedLoadInt8:
1091 ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
1092 break;
1093 case kCheckedLoadUint8:
1094 ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
1095 break;
1096 case kCheckedLoadInt16:
1097 ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
1098 break;
1099 case kCheckedLoadUint16:
1100 ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
1101 break;
1102 case kCheckedLoadWord32:
1103 ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
1104 break;
1105 case kCheckedLoadFloat32:
1106 ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
1107 break;
1108 case kCheckedLoadFloat64:
1109 ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
1110 break;
1111 case kCheckedStoreWord8:
1112 ASSEMBLE_CHECKED_STORE_INTEGER(sb);
1113 break;
1114 case kCheckedStoreWord16:
1115 ASSEMBLE_CHECKED_STORE_INTEGER(sh);
1116 break;
1117 case kCheckedStoreWord32:
1118 ASSEMBLE_CHECKED_STORE_INTEGER(sw);
1119 break;
1120 case kCheckedStoreFloat32:
1121 ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
1122 break;
1123 case kCheckedStoreFloat64:
1124 ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
1125 break;
1126 case kCheckedLoadWord64:
1127 case kCheckedStoreWord64:
1128 UNREACHABLE(); // currently unsupported checked int64 load/store.
1129 break;
1130 }
1131 } // NOLINT(readability/fn_size)
1132
1133
1134 #define UNSUPPORTED_COND(opcode, condition) \
1135 OFStream out(stdout); \
1136 out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
1137 UNIMPLEMENTED();
1138
convertCondition(FlagsCondition condition,Condition & cc)1139 static bool convertCondition(FlagsCondition condition, Condition& cc) {
1140 switch (condition) {
1141 case kEqual:
1142 cc = eq;
1143 return true;
1144 case kNotEqual:
1145 cc = ne;
1146 return true;
1147 case kUnsignedLessThan:
1148 cc = lt;
1149 return true;
1150 case kUnsignedGreaterThanOrEqual:
1151 cc = uge;
1152 return true;
1153 case kUnsignedLessThanOrEqual:
1154 cc = le;
1155 return true;
1156 case kUnsignedGreaterThan:
1157 cc = ugt;
1158 return true;
1159 default:
1160 break;
1161 }
1162 return false;
1163 }
1164
1165
1166 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)1167 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1168 MipsOperandConverter i(this, instr);
1169 Label* tlabel = branch->true_label;
1170 Label* flabel = branch->false_label;
1171 Condition cc = kNoCondition;
1172 // MIPS does not have condition code flags, so compare and branch are
1173 // implemented differently than on the other arch's. The compare operations
1174 // emit mips pseudo-instructions, which are handled here by branch
1175 // instructions that do the actual comparison. Essential that the input
1176 // registers to compare pseudo-op are not modified before this branch op, as
1177 // they are tested here.
1178
1179 if (instr->arch_opcode() == kMipsTst) {
1180 cc = FlagsConditionToConditionTst(branch->condition);
1181 __ And(at, i.InputRegister(0), i.InputOperand(1));
1182 __ Branch(tlabel, cc, at, Operand(zero_reg));
1183 } else if (instr->arch_opcode() == kMipsAddOvf) {
1184 switch (branch->condition) {
1185 case kOverflow:
1186 __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
1187 i.InputOperand(1), tlabel, flabel);
1188 break;
1189 case kNotOverflow:
1190 __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
1191 i.InputOperand(1), flabel, tlabel);
1192 break;
1193 default:
1194 UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
1195 break;
1196 }
1197 } else if (instr->arch_opcode() == kMipsSubOvf) {
1198 switch (branch->condition) {
1199 case kOverflow:
1200 __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
1201 i.InputOperand(1), tlabel, flabel);
1202 break;
1203 case kNotOverflow:
1204 __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
1205 i.InputOperand(1), flabel, tlabel);
1206 break;
1207 default:
1208 UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
1209 break;
1210 }
1211 } else if (instr->arch_opcode() == kMipsCmp) {
1212 cc = FlagsConditionToConditionCmp(branch->condition);
1213 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
1214 } else if (instr->arch_opcode() == kMipsCmpS) {
1215 if (!convertCondition(branch->condition, cc)) {
1216 UNSUPPORTED_COND(kMips64CmpS, branch->condition);
1217 }
1218 FPURegister left = i.InputOrZeroSingleRegister(0);
1219 FPURegister right = i.InputOrZeroSingleRegister(1);
1220 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
1221 !__ IsDoubleZeroRegSet()) {
1222 __ Move(kDoubleRegZero, 0.0);
1223 }
1224 __ BranchF32(tlabel, nullptr, cc, left, right);
1225 } else if (instr->arch_opcode() == kMipsCmpD) {
1226 if (!convertCondition(branch->condition, cc)) {
1227 UNSUPPORTED_COND(kMips64CmpD, branch->condition);
1228 }
1229 FPURegister left = i.InputOrZeroDoubleRegister(0);
1230 FPURegister right = i.InputOrZeroDoubleRegister(1);
1231 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
1232 !__ IsDoubleZeroRegSet()) {
1233 __ Move(kDoubleRegZero, 0.0);
1234 }
1235 __ BranchF64(tlabel, nullptr, cc, left, right);
1236 } else {
1237 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
1238 instr->arch_opcode());
1239 UNIMPLEMENTED();
1240 }
1241 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
1242 }
1243
1244
AssembleArchJump(RpoNumber target)1245 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1246 if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
1247 }
1248
1249
1250 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)1251 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1252 FlagsCondition condition) {
1253 MipsOperandConverter i(this, instr);
1254 Label done;
1255
1256 // Materialize a full 32-bit 1 or 0 value. The result register is always the
1257 // last output of the instruction.
1258 Label false_value;
1259 DCHECK_NE(0u, instr->OutputCount());
1260 Register result = i.OutputRegister(instr->OutputCount() - 1);
1261 Condition cc = kNoCondition;
1262 // MIPS does not have condition code flags, so compare and branch are
1263 // implemented differently than on the other arch's. The compare operations
1264 // emit mips psuedo-instructions, which are checked and handled here.
1265
1266 if (instr->arch_opcode() == kMipsTst) {
1267 cc = FlagsConditionToConditionTst(condition);
1268 __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
1269 __ Sltu(result, zero_reg, kScratchReg);
1270 if (cc == eq) {
1271 // Sltu produces 0 for equality, invert the result.
1272 __ xori(result, result, 1);
1273 }
1274 return;
1275 } else if (instr->arch_opcode() == kMipsAddOvf ||
1276 instr->arch_opcode() == kMipsSubOvf) {
1277 Label flabel, tlabel;
1278 switch (instr->arch_opcode()) {
1279 case kMipsAddOvf:
1280 __ AddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
1281 i.InputOperand(1), &flabel);
1282
1283 break;
1284 case kMipsSubOvf:
1285 __ SubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
1286 i.InputOperand(1), &flabel);
1287 break;
1288 default:
1289 UNREACHABLE();
1290 break;
1291 }
1292 __ li(result, 1);
1293 __ Branch(&tlabel);
1294 __ bind(&flabel);
1295 __ li(result, 0);
1296 __ bind(&tlabel);
1297 } else if (instr->arch_opcode() == kMipsCmp) {
1298 cc = FlagsConditionToConditionCmp(condition);
1299 switch (cc) {
1300 case eq:
1301 case ne: {
1302 Register left = i.InputRegister(0);
1303 Operand right = i.InputOperand(1);
1304 Register select;
1305 if (instr->InputAt(1)->IsImmediate() && right.immediate() == 0) {
1306 // Pass left operand if right is zero.
1307 select = left;
1308 } else {
1309 __ Subu(kScratchReg, left, right);
1310 select = kScratchReg;
1311 }
1312 __ Sltu(result, zero_reg, select);
1313 if (cc == eq) {
1314 // Sltu produces 0 for equality, invert the result.
1315 __ xori(result, result, 1);
1316 }
1317 } break;
1318 case lt:
1319 case ge: {
1320 Register left = i.InputRegister(0);
1321 Operand right = i.InputOperand(1);
1322 __ Slt(result, left, right);
1323 if (cc == ge) {
1324 __ xori(result, result, 1);
1325 }
1326 } break;
1327 case gt:
1328 case le: {
1329 Register left = i.InputRegister(1);
1330 Operand right = i.InputOperand(0);
1331 __ Slt(result, left, right);
1332 if (cc == le) {
1333 __ xori(result, result, 1);
1334 }
1335 } break;
1336 case lo:
1337 case hs: {
1338 Register left = i.InputRegister(0);
1339 Operand right = i.InputOperand(1);
1340 __ Sltu(result, left, right);
1341 if (cc == hs) {
1342 __ xori(result, result, 1);
1343 }
1344 } break;
1345 case hi:
1346 case ls: {
1347 Register left = i.InputRegister(1);
1348 Operand right = i.InputOperand(0);
1349 __ Sltu(result, left, right);
1350 if (cc == ls) {
1351 __ xori(result, result, 1);
1352 }
1353 } break;
1354 default:
1355 UNREACHABLE();
1356 }
1357 return;
1358 } else if (instr->arch_opcode() == kMipsCmpD ||
1359 instr->arch_opcode() == kMipsCmpS) {
1360 FPURegister left = i.InputOrZeroDoubleRegister(0);
1361 FPURegister right = i.InputOrZeroDoubleRegister(1);
1362 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
1363 !__ IsDoubleZeroRegSet()) {
1364 __ Move(kDoubleRegZero, 0.0);
1365 }
1366 bool predicate;
1367 FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
1368 if (!IsMipsArchVariant(kMips32r6)) {
1369 __ li(result, Operand(1));
1370 if (instr->arch_opcode() == kMipsCmpD) {
1371 __ c(cc, D, left, right);
1372 } else {
1373 DCHECK(instr->arch_opcode() == kMipsCmpS);
1374 __ c(cc, S, left, right);
1375 }
1376 if (predicate) {
1377 __ Movf(result, zero_reg);
1378 } else {
1379 __ Movt(result, zero_reg);
1380 }
1381 } else {
1382 if (instr->arch_opcode() == kMipsCmpD) {
1383 __ cmp(cc, L, kDoubleCompareReg, left, right);
1384 } else {
1385 DCHECK(instr->arch_opcode() == kMipsCmpS);
1386 __ cmp(cc, W, kDoubleCompareReg, left, right);
1387 }
1388 __ mfc1(result, kDoubleCompareReg);
1389 __ andi(result, result, 1); // Cmp returns all 1's/0's, use only LSB.
1390 if (!predicate) // Toggle result for not equal.
1391 __ xori(result, result, 1);
1392 }
1393 return;
1394 } else {
1395 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
1396 instr->arch_opcode());
1397 TRACE_UNIMPL();
1398 UNIMPLEMENTED();
1399 }
1400 }
1401
1402
AssembleArchLookupSwitch(Instruction * instr)1403 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1404 MipsOperandConverter i(this, instr);
1405 Register input = i.InputRegister(0);
1406 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1407 __ li(at, Operand(i.InputInt32(index + 0)));
1408 __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
1409 }
1410 __ nop(); // Branch delay slot of the last beq.
1411 AssembleArchJump(i.InputRpo(1));
1412 }
1413
1414
AssembleArchTableSwitch(Instruction * instr)1415 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1416 MipsOperandConverter i(this, instr);
1417 Register input = i.InputRegister(0);
1418 size_t const case_count = instr->InputCount() - 2;
1419 Label here;
1420 __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
1421 __ BlockTrampolinePoolFor(case_count + 6);
1422 __ bal(&here);
1423 __ sll(at, input, 2); // Branch delay slot.
1424 __ bind(&here);
1425 __ addu(at, at, ra);
1426 __ lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
1427 __ jr(at);
1428 __ nop(); // Branch delay slot nop.
1429 for (size_t index = 0; index < case_count; ++index) {
1430 __ dd(GetLabel(i.InputRpo(index + 2)));
1431 }
1432 }
1433
1434
AssembleDeoptimizerCall(int deoptimization_id,Deoptimizer::BailoutType bailout_type)1435 void CodeGenerator::AssembleDeoptimizerCall(
1436 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1437 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1438 isolate(), deoptimization_id, bailout_type);
1439 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1440 }
1441
1442
AssemblePrologue()1443 void CodeGenerator::AssemblePrologue() {
1444 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1445 int stack_shrink_slots = frame()->GetSpillSlotCount();
1446 if (descriptor->IsCFunctionCall()) {
1447 __ Push(ra, fp);
1448 __ mov(fp, sp);
1449 } else if (descriptor->IsJSFunctionCall()) {
1450 __ Prologue(this->info()->GeneratePreagedPrologue());
1451 } else if (frame()->needs_frame()) {
1452 __ StubPrologue();
1453 } else {
1454 frame()->SetElidedFrameSizeInSlots(0);
1455 }
1456 frame_access_state()->SetFrameAccessToDefault();
1457
1458 if (info()->is_osr()) {
1459 // TurboFan OSR-compiled functions cannot be entered directly.
1460 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1461
1462 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1463 // frame is still on the stack. Optimized code uses OSR values directly from
1464 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1465 // remaining stack slots.
1466 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1467 osr_pc_offset_ = __ pc_offset();
1468 // TODO(titzer): cannot address target function == local #-1
1469 __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1470 stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
1471 }
1472
1473 const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
1474 if (saves_fpu != 0) {
1475 stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
1476 }
1477 if (stack_shrink_slots > 0) {
1478 __ Subu(sp, sp, Operand(stack_shrink_slots * kPointerSize));
1479 }
1480
1481 // Save callee-saved FPU registers.
1482 if (saves_fpu != 0) {
1483 __ MultiPushFPU(saves_fpu);
1484 int count = base::bits::CountPopulation32(saves_fpu);
1485 DCHECK(kNumCalleeSavedFPU == count);
1486 frame()->AllocateSavedCalleeRegisterSlots(count *
1487 (kDoubleSize / kPointerSize));
1488 }
1489
1490 const RegList saves = descriptor->CalleeSavedRegisters();
1491 if (saves != 0) {
1492 // Save callee-saved registers.
1493 __ MultiPush(saves);
1494 // kNumCalleeSaved includes the fp register, but the fp register
1495 // is saved separately in TF.
1496 int count = base::bits::CountPopulation32(saves);
1497 DCHECK(kNumCalleeSaved == count + 1);
1498 frame()->AllocateSavedCalleeRegisterSlots(count);
1499 }
1500 }
1501
1502
AssembleReturn()1503 void CodeGenerator::AssembleReturn() {
1504 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1505 int pop_count = static_cast<int>(descriptor->StackParameterCount());
1506
1507 // Restore GP registers.
1508 const RegList saves = descriptor->CalleeSavedRegisters();
1509 if (saves != 0) {
1510 __ MultiPop(saves);
1511 }
1512
1513 // Restore FPU registers.
1514 const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
1515 if (saves_fpu != 0) {
1516 __ MultiPopFPU(saves_fpu);
1517 }
1518
1519 if (descriptor->IsCFunctionCall()) {
1520 __ mov(sp, fp);
1521 __ Pop(ra, fp);
1522 } else if (frame()->needs_frame()) {
1523 // Canonicalize JSFunction return sites for now.
1524 if (return_label_.is_bound()) {
1525 __ Branch(&return_label_);
1526 return;
1527 } else {
1528 __ bind(&return_label_);
1529 __ mov(sp, fp);
1530 __ Pop(ra, fp);
1531 }
1532 }
1533 if (pop_count != 0) {
1534 __ DropAndRet(pop_count);
1535 } else {
1536 __ Ret();
1537 }
1538 }
1539
1540
AssembleMove(InstructionOperand * source,InstructionOperand * destination)1541 void CodeGenerator::AssembleMove(InstructionOperand* source,
1542 InstructionOperand* destination) {
1543 MipsOperandConverter g(this, nullptr);
1544 // Dispatch on the source and destination operand kinds. Not all
1545 // combinations are possible.
1546 if (source->IsRegister()) {
1547 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1548 Register src = g.ToRegister(source);
1549 if (destination->IsRegister()) {
1550 __ mov(g.ToRegister(destination), src);
1551 } else {
1552 __ sw(src, g.ToMemOperand(destination));
1553 }
1554 } else if (source->IsStackSlot()) {
1555 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1556 MemOperand src = g.ToMemOperand(source);
1557 if (destination->IsRegister()) {
1558 __ lw(g.ToRegister(destination), src);
1559 } else {
1560 Register temp = kScratchReg;
1561 __ lw(temp, src);
1562 __ sw(temp, g.ToMemOperand(destination));
1563 }
1564 } else if (source->IsConstant()) {
1565 Constant src = g.ToConstant(source);
1566 if (destination->IsRegister() || destination->IsStackSlot()) {
1567 Register dst =
1568 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1569 switch (src.type()) {
1570 case Constant::kInt32:
1571 __ li(dst, Operand(src.ToInt32()));
1572 break;
1573 case Constant::kFloat32:
1574 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1575 break;
1576 case Constant::kInt64:
1577 UNREACHABLE();
1578 break;
1579 case Constant::kFloat64:
1580 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1581 break;
1582 case Constant::kExternalReference:
1583 __ li(dst, Operand(src.ToExternalReference()));
1584 break;
1585 case Constant::kHeapObject: {
1586 Handle<HeapObject> src_object = src.ToHeapObject();
1587 Heap::RootListIndex index;
1588 int offset;
1589 if (IsMaterializableFromFrame(src_object, &offset)) {
1590 __ lw(dst, MemOperand(fp, offset));
1591 } else if (IsMaterializableFromRoot(src_object, &index)) {
1592 __ LoadRoot(dst, index);
1593 } else {
1594 __ li(dst, src_object);
1595 }
1596 break;
1597 }
1598 case Constant::kRpoNumber:
1599 UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
1600 break;
1601 }
1602 if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
1603 } else if (src.type() == Constant::kFloat32) {
1604 if (destination->IsDoubleStackSlot()) {
1605 MemOperand dst = g.ToMemOperand(destination);
1606 __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
1607 __ sw(at, dst);
1608 } else {
1609 FloatRegister dst = g.ToSingleRegister(destination);
1610 __ Move(dst, src.ToFloat32());
1611 }
1612 } else {
1613 DCHECK_EQ(Constant::kFloat64, src.type());
1614 DoubleRegister dst = destination->IsDoubleRegister()
1615 ? g.ToDoubleRegister(destination)
1616 : kScratchDoubleReg;
1617 __ Move(dst, src.ToFloat64());
1618 if (destination->IsDoubleStackSlot()) {
1619 __ sdc1(dst, g.ToMemOperand(destination));
1620 }
1621 }
1622 } else if (source->IsDoubleRegister()) {
1623 FPURegister src = g.ToDoubleRegister(source);
1624 if (destination->IsDoubleRegister()) {
1625 FPURegister dst = g.ToDoubleRegister(destination);
1626 __ Move(dst, src);
1627 } else {
1628 DCHECK(destination->IsDoubleStackSlot());
1629 __ sdc1(src, g.ToMemOperand(destination));
1630 }
1631 } else if (source->IsDoubleStackSlot()) {
1632 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1633 MemOperand src = g.ToMemOperand(source);
1634 if (destination->IsDoubleRegister()) {
1635 __ ldc1(g.ToDoubleRegister(destination), src);
1636 } else {
1637 FPURegister temp = kScratchDoubleReg;
1638 __ ldc1(temp, src);
1639 __ sdc1(temp, g.ToMemOperand(destination));
1640 }
1641 } else {
1642 UNREACHABLE();
1643 }
1644 }
1645
1646
AssembleSwap(InstructionOperand * source,InstructionOperand * destination)1647 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1648 InstructionOperand* destination) {
1649 MipsOperandConverter g(this, nullptr);
1650 // Dispatch on the source and destination operand kinds. Not all
1651 // combinations are possible.
1652 if (source->IsRegister()) {
1653 // Register-register.
1654 Register temp = kScratchReg;
1655 Register src = g.ToRegister(source);
1656 if (destination->IsRegister()) {
1657 Register dst = g.ToRegister(destination);
1658 __ Move(temp, src);
1659 __ Move(src, dst);
1660 __ Move(dst, temp);
1661 } else {
1662 DCHECK(destination->IsStackSlot());
1663 MemOperand dst = g.ToMemOperand(destination);
1664 __ mov(temp, src);
1665 __ lw(src, dst);
1666 __ sw(temp, dst);
1667 }
1668 } else if (source->IsStackSlot()) {
1669 DCHECK(destination->IsStackSlot());
1670 Register temp_0 = kScratchReg;
1671 Register temp_1 = kCompareReg;
1672 MemOperand src = g.ToMemOperand(source);
1673 MemOperand dst = g.ToMemOperand(destination);
1674 __ lw(temp_0, src);
1675 __ lw(temp_1, dst);
1676 __ sw(temp_0, dst);
1677 __ sw(temp_1, src);
1678 } else if (source->IsDoubleRegister()) {
1679 FPURegister temp = kScratchDoubleReg;
1680 FPURegister src = g.ToDoubleRegister(source);
1681 if (destination->IsDoubleRegister()) {
1682 FPURegister dst = g.ToDoubleRegister(destination);
1683 __ Move(temp, src);
1684 __ Move(src, dst);
1685 __ Move(dst, temp);
1686 } else {
1687 DCHECK(destination->IsDoubleStackSlot());
1688 MemOperand dst = g.ToMemOperand(destination);
1689 __ Move(temp, src);
1690 __ ldc1(src, dst);
1691 __ sdc1(temp, dst);
1692 }
1693 } else if (source->IsDoubleStackSlot()) {
1694 DCHECK(destination->IsDoubleStackSlot());
1695 Register temp_0 = kScratchReg;
1696 FPURegister temp_1 = kScratchDoubleReg;
1697 MemOperand src0 = g.ToMemOperand(source);
1698 MemOperand src1(src0.rm(), src0.offset() + kIntSize);
1699 MemOperand dst0 = g.ToMemOperand(destination);
1700 MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
1701 __ ldc1(temp_1, dst0); // Save destination in temp_1.
1702 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
1703 __ sw(temp_0, dst0);
1704 __ lw(temp_0, src1);
1705 __ sw(temp_0, dst1);
1706 __ sdc1(temp_1, src0);
1707 } else {
1708 // No other combinations are possible.
1709 UNREACHABLE();
1710 }
1711 }
1712
1713
AssembleJumpTable(Label ** targets,size_t target_count)1714 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1715 // On 32-bit MIPS we emit the jump tables inline.
1716 UNREACHABLE();
1717 }
1718
1719
AddNopForSmiCodeInlining()1720 void CodeGenerator::AddNopForSmiCodeInlining() {
1721 // Unused on 32-bit ARM. Still exists on 64-bit arm.
1722 // TODO(plind): Unclear when this is called now. Understand, fix if needed.
1723 __ nop(); // Maybe PROPERTY_ACCESS_INLINED?
1724 }
1725
1726
EnsureSpaceForLazyDeopt()1727 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1728 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
1729 return;
1730 }
1731
1732 int space_needed = Deoptimizer::patch_size();
1733 // Ensure that we have enough space after the previous lazy-bailout
1734 // instruction for patching the code here.
1735 int current_pc = masm()->pc_offset();
1736 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1737 // Block tramoline pool emission for duration of padding.
1738 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
1739 masm());
1740 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1741 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1742 while (padding_size > 0) {
1743 __ nop();
1744 padding_size -= v8::internal::Assembler::kInstrSize;
1745 }
1746 }
1747 }
1748
1749 #undef __
1750
1751 } // namespace compiler
1752 } // namespace internal
1753 } // namespace v8
1754