1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/crankshaft/mips64/lithium-codegen-mips64.h"
6 
7 #include "src/code-factory.h"
8 #include "src/code-stubs.h"
9 #include "src/crankshaft/hydrogen-osr.h"
10 #include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
11 #include "src/ic/ic.h"
12 #include "src/ic/stub-cache.h"
13 
14 namespace v8 {
15 namespace internal {
16 
17 
18 class SafepointGenerator final : public CallWrapper {
19  public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)20   SafepointGenerator(LCodeGen* codegen,
21                      LPointerMap* pointers,
22                      Safepoint::DeoptMode mode)
23       : codegen_(codegen),
24         pointers_(pointers),
25         deopt_mode_(mode) { }
~SafepointGenerator()26   virtual ~SafepointGenerator() {}
27 
BeforeCall(int call_size) const28   void BeforeCall(int call_size) const override {}
29 
AfterCall() const30   void AfterCall() const override {
31     codegen_->RecordSafepoint(pointers_, deopt_mode_);
32   }
33 
34  private:
35   LCodeGen* codegen_;
36   LPointerMap* pointers_;
37   Safepoint::DeoptMode deopt_mode_;
38 };
39 
PushSafepointRegistersScope(LCodeGen * codegen)40 LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
41     LCodeGen* codegen)
42     : codegen_(codegen) {
43   DCHECK(codegen_->info()->is_calling());
44   DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
45   codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
46 
47   StoreRegistersStateStub stub(codegen_->isolate());
48   codegen_->masm_->push(ra);
49   codegen_->masm_->CallStub(&stub);
50 }
51 
~PushSafepointRegistersScope()52 LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
53   DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
54   RestoreRegistersStateStub stub(codegen_->isolate());
55   codegen_->masm_->push(ra);
56   codegen_->masm_->CallStub(&stub);
57   codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
58 }
59 
60 #define __ masm()->
61 
GenerateCode()62 bool LCodeGen::GenerateCode() {
63   LPhase phase("Z_Code generation", chunk());
64   DCHECK(is_unused());
65   status_ = GENERATING;
66 
67   // Open a frame scope to indicate that there is a frame on the stack.  The
68   // NONE indicates that the scope shouldn't actually generate code to set up
69   // the frame (that is done in GeneratePrologue).
70   FrameScope frame_scope(masm_, StackFrame::NONE);
71 
72   return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
73          GenerateJumpTable() && GenerateSafepointTable();
74 }
75 
76 
FinishCode(Handle<Code> code)77 void LCodeGen::FinishCode(Handle<Code> code) {
78   DCHECK(is_done());
79   code->set_stack_slots(GetTotalFrameSlotCount());
80   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
81   PopulateDeoptimizationData(code);
82 }
83 
84 
SaveCallerDoubles()85 void LCodeGen::SaveCallerDoubles() {
86   DCHECK(info()->saves_caller_doubles());
87   DCHECK(NeedsEagerFrame());
88   Comment(";;; Save clobbered callee double registers");
89   int count = 0;
90   BitVector* doubles = chunk()->allocated_double_registers();
91   BitVector::Iterator save_iterator(doubles);
92   while (!save_iterator.Done()) {
93     __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
94             MemOperand(sp, count * kDoubleSize));
95     save_iterator.Advance();
96     count++;
97   }
98 }
99 
100 
RestoreCallerDoubles()101 void LCodeGen::RestoreCallerDoubles() {
102   DCHECK(info()->saves_caller_doubles());
103   DCHECK(NeedsEagerFrame());
104   Comment(";;; Restore clobbered callee double registers");
105   BitVector* doubles = chunk()->allocated_double_registers();
106   BitVector::Iterator save_iterator(doubles);
107   int count = 0;
108   while (!save_iterator.Done()) {
109     __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
110             MemOperand(sp, count * kDoubleSize));
111     save_iterator.Advance();
112     count++;
113   }
114 }
115 
116 
GeneratePrologue()117 bool LCodeGen::GeneratePrologue() {
118   DCHECK(is_generating());
119 
120   if (info()->IsOptimizing()) {
121     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
122 
123     // a1: Callee's JS function.
124     // cp: Callee's context.
125     // fp: Caller's frame pointer.
126     // lr: Caller's pc.
127   }
128 
129   info()->set_prologue_offset(masm_->pc_offset());
130   if (NeedsEagerFrame()) {
131     if (info()->IsStub()) {
132       __ StubPrologue(StackFrame::STUB);
133     } else {
134       __ Prologue(info()->GeneratePreagedPrologue());
135     }
136     frame_is_built_ = true;
137   }
138 
139   // Reserve space for the stack slots needed by the code.
140   int slots = GetStackSlotCount();
141   if (slots > 0) {
142     if (FLAG_debug_code) {
143       __ Dsubu(sp,  sp, Operand(slots * kPointerSize));
144       __ Push(a0, a1);
145       __ Daddu(a0, sp, Operand(slots *  kPointerSize));
146       __ li(a1, Operand(kSlotsZapValue));
147       Label loop;
148       __ bind(&loop);
149       __ Dsubu(a0, a0, Operand(kPointerSize));
150       __ sd(a1, MemOperand(a0, 2 * kPointerSize));
151       __ Branch(&loop, ne, a0, Operand(sp));
152       __ Pop(a0, a1);
153     } else {
154       __ Dsubu(sp, sp, Operand(slots * kPointerSize));
155     }
156   }
157 
158   if (info()->saves_caller_doubles()) {
159     SaveCallerDoubles();
160   }
161   return !is_aborted();
162 }
163 
164 
DoPrologue(LPrologue * instr)165 void LCodeGen::DoPrologue(LPrologue* instr) {
166   Comment(";;; Prologue begin");
167 
168   // Possibly allocate a local context.
169   if (info()->scope()->NeedsContext()) {
170     Comment(";;; Allocate local context");
171     bool need_write_barrier = true;
172     // Argument to NewContext is the function, which is in a1.
173     int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
174     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
175     if (info()->scope()->is_script_scope()) {
176       __ push(a1);
177       __ Push(info()->scope()->scope_info());
178       __ CallRuntime(Runtime::kNewScriptContext);
179       deopt_mode = Safepoint::kLazyDeopt;
180     } else {
181       if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
182         FastNewFunctionContextStub stub(isolate());
183         __ li(FastNewFunctionContextDescriptor::SlotsRegister(),
184               Operand(slots));
185         __ CallStub(&stub);
186         // Result of FastNewFunctionContextStub is always in new space.
187         need_write_barrier = false;
188       } else {
189         __ push(a1);
190         __ CallRuntime(Runtime::kNewFunctionContext);
191       }
192     }
193     RecordSafepoint(deopt_mode);
194 
195     // Context is returned in both v0. It replaces the context passed to us.
196     // It's saved in the stack and kept live in cp.
197     __ mov(cp, v0);
198     __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
199     // Copy any necessary parameters into the context.
200     int num_parameters = info()->scope()->num_parameters();
201     int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
202     for (int i = first_parameter; i < num_parameters; i++) {
203       Variable* var = (i == -1) ? info()->scope()->receiver()
204                                 : info()->scope()->parameter(i);
205       if (var->IsContextSlot()) {
206         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
207             (num_parameters - 1 - i) * kPointerSize;
208         // Load parameter from stack.
209         __ ld(a0, MemOperand(fp, parameter_offset));
210         // Store it in the context.
211         MemOperand target = ContextMemOperand(cp, var->index());
212         __ sd(a0, target);
213         // Update the write barrier. This clobbers a3 and a0.
214         if (need_write_barrier) {
215           __ RecordWriteContextSlot(
216               cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
217         } else if (FLAG_debug_code) {
218           Label done;
219           __ JumpIfInNewSpace(cp, a0, &done);
220           __ Abort(kExpectedNewSpaceObject);
221           __ bind(&done);
222         }
223       }
224     }
225     Comment(";;; End allocate local context");
226   }
227 
228   Comment(";;; Prologue end");
229 }
230 
231 
GenerateOsrPrologue()232 void LCodeGen::GenerateOsrPrologue() {
233   // Generate the OSR entry prologue at the first unknown OSR value, or if there
234   // are none, at the OSR entrypoint instruction.
235   if (osr_pc_offset_ >= 0) return;
236 
237   osr_pc_offset_ = masm()->pc_offset();
238 
239   // Adjust the frame size, subsuming the unoptimized frame into the
240   // optimized frame.
241   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
242   DCHECK(slots >= 0);
243   __ Dsubu(sp, sp, Operand(slots * kPointerSize));
244 }
245 
246 
GenerateBodyInstructionPre(LInstruction * instr)247 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
248   if (instr->IsCall()) {
249     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
250   }
251   if (!instr->IsLazyBailout() && !instr->IsGap()) {
252     safepoints_.BumpLastLazySafepointIndex();
253   }
254 }
255 
256 
GenerateDeferredCode()257 bool LCodeGen::GenerateDeferredCode() {
258   DCHECK(is_generating());
259   if (deferred_.length() > 0) {
260     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
261       LDeferredCode* code = deferred_[i];
262 
263       HValue* value =
264           instructions_->at(code->instruction_index())->hydrogen_value();
265       RecordAndWritePosition(value->position());
266 
267       Comment(";;; <@%d,#%d> "
268               "-------------------- Deferred %s --------------------",
269               code->instruction_index(),
270               code->instr()->hydrogen_value()->id(),
271               code->instr()->Mnemonic());
272       __ bind(code->entry());
273       if (NeedsDeferredFrame()) {
274         Comment(";;; Build frame");
275         DCHECK(!frame_is_built_);
276         DCHECK(info()->IsStub());
277         frame_is_built_ = true;
278         __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
279         __ PushCommonFrame(scratch0());
280         Comment(";;; Deferred code");
281       }
282       code->Generate();
283       if (NeedsDeferredFrame()) {
284         Comment(";;; Destroy frame");
285         DCHECK(frame_is_built_);
286         __ PopCommonFrame(scratch0());
287         frame_is_built_ = false;
288       }
289       __ jmp(code->exit());
290     }
291   }
292   // Deferred code is the last part of the instruction sequence. Mark
293   // the generated code as done unless we bailed out.
294   if (!is_aborted()) status_ = DONE;
295   return !is_aborted();
296 }
297 
298 
GenerateJumpTable()299 bool LCodeGen::GenerateJumpTable() {
300   if (jump_table_.length() > 0) {
301     Comment(";;; -------------------- Jump table --------------------");
302     Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
303     Label table_start, call_deopt_entry;
304 
305     __ bind(&table_start);
306     Label needs_frame;
307     Address base = jump_table_[0]->address;
308     for (int i = 0; i < jump_table_.length(); i++) {
309       Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
310       __ bind(&table_entry->label);
311       Address entry = table_entry->address;
312       DeoptComment(table_entry->deopt_info);
313 
314       // Second-level deopt table entries are contiguous and small, so instead
315       // of loading the full, absolute address of each one, load the base
316       // address and add an immediate offset.
317       if (is_int16(entry - base)) {
318         if (table_entry->needs_frame) {
319           DCHECK(!info()->saves_caller_doubles());
320           Comment(";;; call deopt with frame");
321           __ PushCommonFrame();
322           __ BranchAndLink(&needs_frame, USE_DELAY_SLOT);
323           __ li(t9, Operand(entry - base));
324         } else {
325           __ BranchAndLink(&call_deopt_entry, USE_DELAY_SLOT);
326           __ li(t9, Operand(entry - base));
327         }
328 
329       } else {
330         __ li(t9, Operand(entry - base));
331         if (table_entry->needs_frame) {
332           DCHECK(!info()->saves_caller_doubles());
333           Comment(";;; call deopt with frame");
334           __ PushCommonFrame();
335           __ BranchAndLink(&needs_frame);
336         } else {
337           __ BranchAndLink(&call_deopt_entry);
338         }
339       }
340     }
341     if (needs_frame.is_linked()) {
342       __ bind(&needs_frame);
343       // This variant of deopt can only be used with stubs. Since we don't
344       // have a function pointer to install in the stack frame that we're
345       // building, install a special marker there instead.
346       __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
347       __ push(at);
348       DCHECK(info()->IsStub());
349     }
350 
351     Comment(";;; call deopt");
352     __ bind(&call_deopt_entry);
353 
354     if (info()->saves_caller_doubles()) {
355       DCHECK(info()->IsStub());
356       RestoreCallerDoubles();
357     }
358 
359     __ li(at,
360           Operand(reinterpret_cast<int64_t>(base), RelocInfo::RUNTIME_ENTRY));
361     __ Daddu(t9, t9, Operand(at));
362     __ Jump(t9);
363   }
364   // The deoptimization jump table is the last part of the instruction
365   // sequence. Mark the generated code as done unless we bailed out.
366   if (!is_aborted()) status_ = DONE;
367   return !is_aborted();
368 }
369 
370 
GenerateSafepointTable()371 bool LCodeGen::GenerateSafepointTable() {
372   DCHECK(is_done());
373   safepoints_.Emit(masm(), GetTotalFrameSlotCount());
374   return !is_aborted();
375 }
376 
377 
ToRegister(int index) const378 Register LCodeGen::ToRegister(int index) const {
379   return Register::from_code(index);
380 }
381 
382 
ToDoubleRegister(int index) const383 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
384   return DoubleRegister::from_code(index);
385 }
386 
387 
ToRegister(LOperand * op) const388 Register LCodeGen::ToRegister(LOperand* op) const {
389   DCHECK(op->IsRegister());
390   return ToRegister(op->index());
391 }
392 
393 
EmitLoadRegister(LOperand * op,Register scratch)394 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
395   if (op->IsRegister()) {
396     return ToRegister(op->index());
397   } else if (op->IsConstantOperand()) {
398     LConstantOperand* const_op = LConstantOperand::cast(op);
399     HConstant* constant = chunk_->LookupConstant(const_op);
400     Handle<Object> literal = constant->handle(isolate());
401     Representation r = chunk_->LookupLiteralRepresentation(const_op);
402     if (r.IsInteger32()) {
403       AllowDeferredHandleDereference get_number;
404       DCHECK(literal->IsNumber());
405       __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
406     } else if (r.IsSmi()) {
407       DCHECK(constant->HasSmiValue());
408       __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
409     } else if (r.IsDouble()) {
410       Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
411     } else {
412       DCHECK(r.IsSmiOrTagged());
413       __ li(scratch, literal);
414     }
415     return scratch;
416   } else if (op->IsStackSlot()) {
417     __ ld(scratch, ToMemOperand(op));
418     return scratch;
419   }
420   UNREACHABLE();
421   return scratch;
422 }
423 
424 
ToDoubleRegister(LOperand * op) const425 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
426   DCHECK(op->IsDoubleRegister());
427   return ToDoubleRegister(op->index());
428 }
429 
430 
EmitLoadDoubleRegister(LOperand * op,FloatRegister flt_scratch,DoubleRegister dbl_scratch)431 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
432                                                 FloatRegister flt_scratch,
433                                                 DoubleRegister dbl_scratch) {
434   if (op->IsDoubleRegister()) {
435     return ToDoubleRegister(op->index());
436   } else if (op->IsConstantOperand()) {
437     LConstantOperand* const_op = LConstantOperand::cast(op);
438     HConstant* constant = chunk_->LookupConstant(const_op);
439     Handle<Object> literal = constant->handle(isolate());
440     Representation r = chunk_->LookupLiteralRepresentation(const_op);
441     if (r.IsInteger32()) {
442       DCHECK(literal->IsNumber());
443       __ li(at, Operand(static_cast<int32_t>(literal->Number())));
444       __ mtc1(at, flt_scratch);
445       __ cvt_d_w(dbl_scratch, flt_scratch);
446       return dbl_scratch;
447     } else if (r.IsDouble()) {
448       Abort(kUnsupportedDoubleImmediate);
449     } else if (r.IsTagged()) {
450       Abort(kUnsupportedTaggedImmediate);
451     }
452   } else if (op->IsStackSlot()) {
453     MemOperand mem_op = ToMemOperand(op);
454     __ ldc1(dbl_scratch, mem_op);
455     return dbl_scratch;
456   }
457   UNREACHABLE();
458   return dbl_scratch;
459 }
460 
461 
ToHandle(LConstantOperand * op) const462 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
463   HConstant* constant = chunk_->LookupConstant(op);
464   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
465   return constant->handle(isolate());
466 }
467 
468 
IsInteger32(LConstantOperand * op) const469 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
470   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
471 }
472 
473 
IsSmi(LConstantOperand * op) const474 bool LCodeGen::IsSmi(LConstantOperand* op) const {
475   return chunk_->LookupLiteralRepresentation(op).IsSmi();
476 }
477 
478 
ToInteger32(LConstantOperand * op) const479 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
480   // return ToRepresentation(op, Representation::Integer32());
481   HConstant* constant = chunk_->LookupConstant(op);
482   return constant->Integer32Value();
483 }
484 
485 
ToRepresentation_donotuse(LConstantOperand * op,const Representation & r) const486 int64_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op,
487                                             const Representation& r) const {
488   HConstant* constant = chunk_->LookupConstant(op);
489   int32_t value = constant->Integer32Value();
490   if (r.IsInteger32()) return value;
491   DCHECK(r.IsSmiOrTagged());
492   return reinterpret_cast<int64_t>(Smi::FromInt(value));
493 }
494 
495 
ToSmi(LConstantOperand * op) const496 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
497   HConstant* constant = chunk_->LookupConstant(op);
498   return Smi::FromInt(constant->Integer32Value());
499 }
500 
501 
ToDouble(LConstantOperand * op) const502 double LCodeGen::ToDouble(LConstantOperand* op) const {
503   HConstant* constant = chunk_->LookupConstant(op);
504   DCHECK(constant->HasDoubleValue());
505   return constant->DoubleValue();
506 }
507 
508 
ToOperand(LOperand * op)509 Operand LCodeGen::ToOperand(LOperand* op) {
510   if (op->IsConstantOperand()) {
511     LConstantOperand* const_op = LConstantOperand::cast(op);
512     HConstant* constant = chunk()->LookupConstant(const_op);
513     Representation r = chunk_->LookupLiteralRepresentation(const_op);
514     if (r.IsSmi()) {
515       DCHECK(constant->HasSmiValue());
516       return Operand(Smi::FromInt(constant->Integer32Value()));
517     } else if (r.IsInteger32()) {
518       DCHECK(constant->HasInteger32Value());
519       return Operand(constant->Integer32Value());
520     } else if (r.IsDouble()) {
521       Abort(kToOperandUnsupportedDoubleImmediate);
522     }
523     DCHECK(r.IsTagged());
524     return Operand(constant->handle(isolate()));
525   } else if (op->IsRegister()) {
526     return Operand(ToRegister(op));
527   } else if (op->IsDoubleRegister()) {
528     Abort(kToOperandIsDoubleRegisterUnimplemented);
529     return Operand((int64_t)0);
530   }
531   // Stack slots not implemented, use ToMemOperand instead.
532   UNREACHABLE();
533   return Operand((int64_t)0);
534 }
535 
536 
ArgumentsOffsetWithoutFrame(int index)537 static int ArgumentsOffsetWithoutFrame(int index) {
538   DCHECK(index < 0);
539   return -(index + 1) * kPointerSize;
540 }
541 
542 
ToMemOperand(LOperand * op) const543 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
544   DCHECK(!op->IsRegister());
545   DCHECK(!op->IsDoubleRegister());
546   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
547   if (NeedsEagerFrame()) {
548     return MemOperand(fp, FrameSlotToFPOffset(op->index()));
549   } else {
550     // Retrieve parameter without eager stack-frame relative to the
551     // stack-pointer.
552     return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
553   }
554 }
555 
556 
ToHighMemOperand(LOperand * op) const557 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
558   DCHECK(op->IsDoubleStackSlot());
559   if (NeedsEagerFrame()) {
560     // return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
561     return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kIntSize);
562   } else {
563     // Retrieve parameter without eager stack-frame relative to the
564     // stack-pointer.
565     // return MemOperand(
566     //    sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
567     return MemOperand(
568         sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize);
569   }
570 }
571 
572 
WriteTranslation(LEnvironment * environment,Translation * translation)573 void LCodeGen::WriteTranslation(LEnvironment* environment,
574                                 Translation* translation) {
575   if (environment == NULL) return;
576 
577   // The translation includes one command per value in the environment.
578   int translation_size = environment->translation_size();
579 
580   WriteTranslation(environment->outer(), translation);
581   WriteTranslationFrame(environment, translation);
582 
583   int object_index = 0;
584   int dematerialized_index = 0;
585   for (int i = 0; i < translation_size; ++i) {
586     LOperand* value = environment->values()->at(i);
587     AddToTranslation(
588         environment, translation, value, environment->HasTaggedValueAt(i),
589         environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
590   }
591 }
592 
593 
AddToTranslation(LEnvironment * environment,Translation * translation,LOperand * op,bool is_tagged,bool is_uint32,int * object_index_pointer,int * dematerialized_index_pointer)594 void LCodeGen::AddToTranslation(LEnvironment* environment,
595                                 Translation* translation,
596                                 LOperand* op,
597                                 bool is_tagged,
598                                 bool is_uint32,
599                                 int* object_index_pointer,
600                                 int* dematerialized_index_pointer) {
601   if (op == LEnvironment::materialization_marker()) {
602     int object_index = (*object_index_pointer)++;
603     if (environment->ObjectIsDuplicateAt(object_index)) {
604       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
605       translation->DuplicateObject(dupe_of);
606       return;
607     }
608     int object_length = environment->ObjectLengthAt(object_index);
609     if (environment->ObjectIsArgumentsAt(object_index)) {
610       translation->BeginArgumentsObject(object_length);
611     } else {
612       translation->BeginCapturedObject(object_length);
613     }
614     int dematerialized_index = *dematerialized_index_pointer;
615     int env_offset = environment->translation_size() + dematerialized_index;
616     *dematerialized_index_pointer += object_length;
617     for (int i = 0; i < object_length; ++i) {
618       LOperand* value = environment->values()->at(env_offset + i);
619       AddToTranslation(environment,
620                        translation,
621                        value,
622                        environment->HasTaggedValueAt(env_offset + i),
623                        environment->HasUint32ValueAt(env_offset + i),
624                        object_index_pointer,
625                        dematerialized_index_pointer);
626     }
627     return;
628   }
629 
630   if (op->IsStackSlot()) {
631     int index = op->index();
632     if (is_tagged) {
633       translation->StoreStackSlot(index);
634     } else if (is_uint32) {
635       translation->StoreUint32StackSlot(index);
636     } else {
637       translation->StoreInt32StackSlot(index);
638     }
639   } else if (op->IsDoubleStackSlot()) {
640     int index = op->index();
641     translation->StoreDoubleStackSlot(index);
642   } else if (op->IsRegister()) {
643     Register reg = ToRegister(op);
644     if (is_tagged) {
645       translation->StoreRegister(reg);
646     } else if (is_uint32) {
647       translation->StoreUint32Register(reg);
648     } else {
649       translation->StoreInt32Register(reg);
650     }
651   } else if (op->IsDoubleRegister()) {
652     DoubleRegister reg = ToDoubleRegister(op);
653     translation->StoreDoubleRegister(reg);
654   } else if (op->IsConstantOperand()) {
655     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
656     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
657     translation->StoreLiteral(src_index);
658   } else {
659     UNREACHABLE();
660   }
661 }
662 
663 
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)664 void LCodeGen::CallCode(Handle<Code> code,
665                         RelocInfo::Mode mode,
666                         LInstruction* instr) {
667   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
668 }
669 
670 
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode)671 void LCodeGen::CallCodeGeneric(Handle<Code> code,
672                                RelocInfo::Mode mode,
673                                LInstruction* instr,
674                                SafepointMode safepoint_mode) {
675   DCHECK(instr != NULL);
676   __ Call(code, mode);
677   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
678 }
679 
680 
CallRuntime(const Runtime::Function * function,int num_arguments,LInstruction * instr,SaveFPRegsMode save_doubles)681 void LCodeGen::CallRuntime(const Runtime::Function* function,
682                            int num_arguments,
683                            LInstruction* instr,
684                            SaveFPRegsMode save_doubles) {
685   DCHECK(instr != NULL);
686 
687   __ CallRuntime(function, num_arguments, save_doubles);
688 
689   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
690 }
691 
692 
LoadContextFromDeferred(LOperand * context)693 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
694   if (context->IsRegister()) {
695     __ Move(cp, ToRegister(context));
696   } else if (context->IsStackSlot()) {
697     __ ld(cp, ToMemOperand(context));
698   } else if (context->IsConstantOperand()) {
699     HConstant* constant =
700         chunk_->LookupConstant(LConstantOperand::cast(context));
701     __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
702   } else {
703     UNREACHABLE();
704   }
705 }
706 
707 
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr,LOperand * context)708 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
709                                        int argc,
710                                        LInstruction* instr,
711                                        LOperand* context) {
712   LoadContextFromDeferred(context);
713   __ CallRuntimeSaveDoubles(id);
714   RecordSafepointWithRegisters(
715       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
716 }
717 
718 
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)719 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
720                                                     Safepoint::DeoptMode mode) {
721   environment->set_has_been_used();
722   if (!environment->HasBeenRegistered()) {
723     // Physical stack frame layout:
724     // -x ............. -4  0 ..................................... y
725     // [incoming arguments] [spill slots] [pushed outgoing arguments]
726 
727     // Layout of the environment:
728     // 0 ..................................................... size-1
729     // [parameters] [locals] [expression stack including arguments]
730 
731     // Layout of the translation:
732     // 0 ........................................................ size - 1 + 4
733     // [expression stack including arguments] [locals] [4 words] [parameters]
734     // |>------------  translation_size ------------<|
735 
736     int frame_count = 0;
737     int jsframe_count = 0;
738     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
739       ++frame_count;
740       if (e->frame_type() == JS_FUNCTION) {
741         ++jsframe_count;
742       }
743     }
744     Translation translation(&translations_, frame_count, jsframe_count, zone());
745     WriteTranslation(environment, &translation);
746     int deoptimization_index = deoptimizations_.length();
747     int pc_offset = masm()->pc_offset();
748     environment->Register(deoptimization_index,
749                           translation.index(),
750                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
751     deoptimizations_.Add(environment, zone());
752   }
753 }
754 
DeoptimizeIf(Condition condition,LInstruction * instr,DeoptimizeReason deopt_reason,Deoptimizer::BailoutType bailout_type,Register src1,const Operand & src2)755 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
756                             DeoptimizeReason deopt_reason,
757                             Deoptimizer::BailoutType bailout_type,
758                             Register src1, const Operand& src2) {
759   LEnvironment* environment = instr->environment();
760   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
761   DCHECK(environment->HasBeenRegistered());
762   int id = environment->deoptimization_index();
763   Address entry =
764       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
765   if (entry == NULL) {
766     Abort(kBailoutWasNotPrepared);
767     return;
768   }
769 
770   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
771     Register scratch = scratch0();
772     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
773     Label no_deopt;
774     __ Push(a1, scratch);
775     __ li(scratch, Operand(count));
776     __ lw(a1, MemOperand(scratch));
777     __ Subu(a1, a1, Operand(1));
778     __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
779     __ li(a1, Operand(FLAG_deopt_every_n_times));
780     __ sw(a1, MemOperand(scratch));
781     __ Pop(a1, scratch);
782 
783     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
784     __ bind(&no_deopt);
785     __ sw(a1, MemOperand(scratch));
786     __ Pop(a1, scratch);
787   }
788 
789   if (info()->ShouldTrapOnDeopt()) {
790     Label skip;
791     if (condition != al) {
792       __ Branch(&skip, NegateCondition(condition), src1, src2);
793     }
794     __ stop("trap_on_deopt");
795     __ bind(&skip);
796   }
797 
798   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
799 
800   DCHECK(info()->IsStub() || frame_is_built_);
801   // Go through jump table if we need to handle condition, build frame, or
802   // restore caller doubles.
803   if (condition == al && frame_is_built_ &&
804       !info()->saves_caller_doubles()) {
805     DeoptComment(deopt_info);
806     __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
807   } else {
808     Deoptimizer::JumpTableEntry* table_entry =
809         new (zone()) Deoptimizer::JumpTableEntry(
810             entry, deopt_info, bailout_type, !frame_is_built_);
811     // We often have several deopts to the same entry, reuse the last
812     // jump entry if this is the case.
813     if (FLAG_trace_deopt || isolate()->is_profiling() ||
814         jump_table_.is_empty() ||
815         !table_entry->IsEquivalentTo(*jump_table_.last())) {
816       jump_table_.Add(table_entry, zone());
817     }
818     __ Branch(&jump_table_.last()->label, condition, src1, src2);
819   }
820 }
821 
DeoptimizeIf(Condition condition,LInstruction * instr,DeoptimizeReason deopt_reason,Register src1,const Operand & src2)822 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
823                             DeoptimizeReason deopt_reason, Register src1,
824                             const Operand& src2) {
825   Deoptimizer::BailoutType bailout_type = info()->IsStub()
826       ? Deoptimizer::LAZY
827       : Deoptimizer::EAGER;
828   DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
829 }
830 
831 
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode)832 void LCodeGen::RecordSafepointWithLazyDeopt(
833     LInstruction* instr, SafepointMode safepoint_mode) {
834   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
835     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
836   } else {
837     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
838     RecordSafepointWithRegisters(
839         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
840   }
841 }
842 
843 
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)844 void LCodeGen::RecordSafepoint(
845     LPointerMap* pointers,
846     Safepoint::Kind kind,
847     int arguments,
848     Safepoint::DeoptMode deopt_mode) {
849   DCHECK(expected_safepoint_kind_ == kind);
850 
851   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
852   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
853       kind, arguments, deopt_mode);
854   for (int i = 0; i < operands->length(); i++) {
855     LOperand* pointer = operands->at(i);
856     if (pointer->IsStackSlot()) {
857       safepoint.DefinePointerSlot(pointer->index(), zone());
858     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
859       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
860     }
861   }
862 }
863 
864 
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode deopt_mode)865 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
866                                Safepoint::DeoptMode deopt_mode) {
867   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
868 }
869 
870 
RecordSafepoint(Safepoint::DeoptMode deopt_mode)871 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
872   LPointerMap empty_pointers(zone());
873   RecordSafepoint(&empty_pointers, deopt_mode);
874 }
875 
876 
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode deopt_mode)877 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
878                                             int arguments,
879                                             Safepoint::DeoptMode deopt_mode) {
880   RecordSafepoint(
881       pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
882 }
883 
884 
LabelType(LLabel * label)885 static const char* LabelType(LLabel* label) {
886   if (label->is_loop_header()) return " (loop header)";
887   if (label->is_osr_entry()) return " (OSR entry)";
888   return "";
889 }
890 
891 
DoLabel(LLabel * label)892 void LCodeGen::DoLabel(LLabel* label) {
893   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
894           current_instruction_,
895           label->hydrogen_value()->id(),
896           label->block_id(),
897           LabelType(label));
898   __ bind(label->label());
899   current_block_ = label->block_id();
900   DoGap(label);
901 }
902 
903 
DoParallelMove(LParallelMove * move)904 void LCodeGen::DoParallelMove(LParallelMove* move) {
905   resolver_.Resolve(move);
906 }
907 
908 
DoGap(LGap * gap)909 void LCodeGen::DoGap(LGap* gap) {
910   for (int i = LGap::FIRST_INNER_POSITION;
911        i <= LGap::LAST_INNER_POSITION;
912        i++) {
913     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
914     LParallelMove* move = gap->GetParallelMove(inner_pos);
915     if (move != NULL) DoParallelMove(move);
916   }
917 }
918 
919 
DoInstructionGap(LInstructionGap * instr)920 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
921   DoGap(instr);
922 }
923 
924 
DoParameter(LParameter * instr)925 void LCodeGen::DoParameter(LParameter* instr) {
926   // Nothing to do.
927 }
928 
929 
DoUnknownOSRValue(LUnknownOSRValue * instr)930 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
931   GenerateOsrPrologue();
932 }
933 
934 
DoModByPowerOf2I(LModByPowerOf2I * instr)935 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
936   Register dividend = ToRegister(instr->dividend());
937   int32_t divisor = instr->divisor();
938   DCHECK(dividend.is(ToRegister(instr->result())));
939 
940   // Theoretically, a variation of the branch-free code for integer division by
941   // a power of 2 (calculating the remainder via an additional multiplication
942   // (which gets simplified to an 'and') and subtraction) should be faster, and
943   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
944   // indicate that positive dividends are heavily favored, so the branching
945   // version performs better.
946   HMod* hmod = instr->hydrogen();
947   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
948   Label dividend_is_not_negative, done;
949 
950   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
951     __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
952     // Note: The code below even works when right contains kMinInt.
953     __ dsubu(dividend, zero_reg, dividend);
954     __ And(dividend, dividend, Operand(mask));
955     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
956       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
957                    Operand(zero_reg));
958     }
959     __ Branch(USE_DELAY_SLOT, &done);
960     __ dsubu(dividend, zero_reg, dividend);
961   }
962 
963   __ bind(&dividend_is_not_negative);
964   __ And(dividend, dividend, Operand(mask));
965   __ bind(&done);
966 }
967 
968 
DoModByConstI(LModByConstI * instr)969 void LCodeGen::DoModByConstI(LModByConstI* instr) {
970   Register dividend = ToRegister(instr->dividend());
971   int32_t divisor = instr->divisor();
972   Register result = ToRegister(instr->result());
973   DCHECK(!dividend.is(result));
974 
975   if (divisor == 0) {
976     DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
977     return;
978   }
979 
980   __ TruncatingDiv(result, dividend, Abs(divisor));
981   __ Dmul(result, result, Operand(Abs(divisor)));
982   __ Dsubu(result, dividend, Operand(result));
983 
984   // Check for negative zero.
985   HMod* hmod = instr->hydrogen();
986   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
987     Label remainder_not_zero;
988     __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
989     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, dividend,
990                  Operand(zero_reg));
991     __ bind(&remainder_not_zero);
992   }
993 }
994 
995 
DoModI(LModI * instr)996 void LCodeGen::DoModI(LModI* instr) {
997   HMod* hmod = instr->hydrogen();
998   const Register left_reg = ToRegister(instr->left());
999   const Register right_reg = ToRegister(instr->right());
1000   const Register result_reg = ToRegister(instr->result());
1001 
1002   // div runs in the background while we check for special cases.
1003   __ Dmod(result_reg, left_reg, right_reg);
1004 
1005   Label done;
1006   // Check for x % 0, we have to deopt in this case because we can't return a
1007   // NaN.
1008   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1009     DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, right_reg,
1010                  Operand(zero_reg));
1011   }
1012 
1013   // Check for kMinInt % -1, div will return kMinInt, which is not what we
1014   // want. We have to deopt if we care about -0, because we can't return that.
1015   if (hmod->CheckFlag(HValue::kCanOverflow)) {
1016     Label no_overflow_possible;
1017     __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1018     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1019       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, right_reg,
1020                    Operand(-1));
1021     } else {
1022       __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1023       __ Branch(USE_DELAY_SLOT, &done);
1024       __ mov(result_reg, zero_reg);
1025     }
1026     __ bind(&no_overflow_possible);
1027   }
1028 
1029   // If we care about -0, test if the dividend is <0 and the result is 0.
1030   __ Branch(&done, ge, left_reg, Operand(zero_reg));
1031 
1032   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1033     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result_reg,
1034                  Operand(zero_reg));
1035   }
1036   __ bind(&done);
1037 }
1038 
1039 
DoDivByPowerOf2I(LDivByPowerOf2I * instr)1040 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1041   Register dividend = ToRegister(instr->dividend());
1042   int32_t divisor = instr->divisor();
1043   Register result = ToRegister(instr->result());
1044   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1045   DCHECK(!result.is(dividend));
1046 
1047   // Check for (0 / -x) that will produce negative zero.
1048   HDiv* hdiv = instr->hydrogen();
1049   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1050     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
1051                  Operand(zero_reg));
1052   }
1053   // Check for (kMinInt / -1).
1054   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1055     DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, dividend,
1056                  Operand(kMinInt));
1057   }
1058   // Deoptimize if remainder will not be 0.
1059   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1060       divisor != 1 && divisor != -1) {
1061     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1062     __ And(at, dividend, Operand(mask));
1063     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, at,
1064                  Operand(zero_reg));
1065   }
1066 
1067   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
1068     __ Dsubu(result, zero_reg, dividend);
1069     return;
1070   }
1071   uint16_t shift = WhichPowerOf2Abs(divisor);
1072   if (shift == 0) {
1073     __ Move(result, dividend);
1074   } else if (shift == 1) {
1075     __ dsrl32(result, dividend, 31);
1076     __ Daddu(result, dividend, Operand(result));
1077   } else {
1078     __ dsra32(result, dividend, 31);
1079     __ dsrl32(result, result, 32 - shift);
1080     __ Daddu(result, dividend, Operand(result));
1081   }
1082   if (shift > 0) __ dsra(result, result, shift);
1083   if (divisor < 0) __ Dsubu(result, zero_reg, result);
1084 }
1085 
1086 
DoDivByConstI(LDivByConstI * instr)1087 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1088   Register dividend = ToRegister(instr->dividend());
1089   int32_t divisor = instr->divisor();
1090   Register result = ToRegister(instr->result());
1091   DCHECK(!dividend.is(result));
1092 
1093   if (divisor == 0) {
1094     DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
1095     return;
1096   }
1097 
1098   // Check for (0 / -x) that will produce negative zero.
1099   HDiv* hdiv = instr->hydrogen();
1100   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1101     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
1102                  Operand(zero_reg));
1103   }
1104 
1105   __ TruncatingDiv(result, dividend, Abs(divisor));
1106   if (divisor < 0) __ Subu(result, zero_reg, result);
1107 
1108   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1109     __ Dmul(scratch0(), result, Operand(divisor));
1110     __ Dsubu(scratch0(), scratch0(), dividend);
1111     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, scratch0(),
1112                  Operand(zero_reg));
1113   }
1114 }
1115 
1116 
1117 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
DoDivI(LDivI * instr)1118 void LCodeGen::DoDivI(LDivI* instr) {
1119   HBinaryOperation* hdiv = instr->hydrogen();
1120   Register dividend = ToRegister(instr->dividend());
1121   Register divisor = ToRegister(instr->divisor());
1122   const Register result = ToRegister(instr->result());
1123 
1124   // On MIPS div is asynchronous - it will run in the background while we
1125   // check for special cases.
1126   __ Div(result, dividend, divisor);
1127 
1128   // Check for x / 0.
1129   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1130     DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
1131                  Operand(zero_reg));
1132   }
1133 
1134   // Check for (0 / -x) that will produce negative zero.
1135   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1136     Label left_not_zero;
1137     __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1138     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
1139                  Operand(zero_reg));
1140     __ bind(&left_not_zero);
1141   }
1142 
1143   // Check for (kMinInt / -1).
1144   if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1145       !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1146     Label left_not_min_int;
1147     __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1148     DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
1149     __ bind(&left_not_min_int);
1150   }
1151 
1152   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1153     // Calculate remainder.
1154     Register remainder = ToRegister(instr->temp());
1155     if (kArchVariant != kMips64r6) {
1156       __ mfhi(remainder);
1157     } else {
1158       __ dmod(remainder, dividend, divisor);
1159     }
1160     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, remainder,
1161                  Operand(zero_reg));
1162   }
1163 }
1164 
1165 
DoMultiplyAddD(LMultiplyAddD * instr)1166 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1167   DoubleRegister addend = ToDoubleRegister(instr->addend());
1168   DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1169   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1170 
1171   // This is computed in-place.
1172   DCHECK(addend.is(ToDoubleRegister(instr->result())));
1173 
1174   __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0());
1175 }
1176 
1177 
DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I * instr)1178 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1179   Register dividend = ToRegister(instr->dividend());
1180   Register result = ToRegister(instr->result());
1181   int32_t divisor = instr->divisor();
1182   Register scratch = result.is(dividend) ? scratch0() : dividend;
1183   DCHECK(!result.is(dividend) || !scratch.is(dividend));
1184 
1185   // If the divisor is 1, return the dividend.
1186   if (divisor == 0) {
1187     __ Move(result, dividend);
1188     return;
1189   }
1190 
1191   // If the divisor is positive, things are easy: There can be no deopts and we
1192   // can simply do an arithmetic right shift.
1193   uint16_t shift = WhichPowerOf2Abs(divisor);
1194   if (divisor > 1) {
1195     __ dsra(result, dividend, shift);
1196     return;
1197   }
1198 
1199   // If the divisor is negative, we have to negate and handle edge cases.
1200   // Dividend can be the same register as result so save the value of it
1201   // for checking overflow.
1202   __ Move(scratch, dividend);
1203 
1204   __ Dsubu(result, zero_reg, dividend);
1205   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1206     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
1207                  Operand(zero_reg));
1208   }
1209 
1210   __ Xor(scratch, scratch, result);
1211   // Dividing by -1 is basically negation, unless we overflow.
1212   if (divisor == -1) {
1213     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1214       DeoptimizeIf(gt, instr, DeoptimizeReason::kOverflow, result,
1215                    Operand(kMaxInt));
1216     }
1217     return;
1218   }
1219 
1220   // If the negation could not overflow, simply shifting is OK.
1221   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1222     __ dsra(result, result, shift);
1223     return;
1224   }
1225 
1226   Label no_overflow, done;
1227   __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1228   __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE);
1229   __ Branch(&done);
1230   __ bind(&no_overflow);
1231   __ dsra(result, result, shift);
1232   __ bind(&done);
1233 }
1234 
1235 
DoFlooringDivByConstI(LFlooringDivByConstI * instr)1236 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1237   Register dividend = ToRegister(instr->dividend());
1238   int32_t divisor = instr->divisor();
1239   Register result = ToRegister(instr->result());
1240   DCHECK(!dividend.is(result));
1241 
1242   if (divisor == 0) {
1243     DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
1244     return;
1245   }
1246 
1247   // Check for (0 / -x) that will produce negative zero.
1248   HMathFloorOfDiv* hdiv = instr->hydrogen();
1249   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1250     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
1251                  Operand(zero_reg));
1252   }
1253 
1254   // Easy case: We need no dynamic check for the dividend and the flooring
1255   // division is the same as the truncating division.
1256   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1257       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1258     __ TruncatingDiv(result, dividend, Abs(divisor));
1259     if (divisor < 0) __ Dsubu(result, zero_reg, result);
1260     return;
1261   }
1262 
1263   // In the general case we may need to adjust before and after the truncating
1264   // division to get a flooring division.
1265   Register temp = ToRegister(instr->temp());
1266   DCHECK(!temp.is(dividend) && !temp.is(result));
1267   Label needs_adjustment, done;
1268   __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1269             dividend, Operand(zero_reg));
1270   __ TruncatingDiv(result, dividend, Abs(divisor));
1271   if (divisor < 0) __ Dsubu(result, zero_reg, result);
1272   __ jmp(&done);
1273   __ bind(&needs_adjustment);
1274   __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1275   __ TruncatingDiv(result, temp, Abs(divisor));
1276   if (divisor < 0) __ Dsubu(result, zero_reg, result);
1277   __ Dsubu(result, result, Operand(1));
1278   __ bind(&done);
1279 }
1280 
1281 
1282 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
DoFlooringDivI(LFlooringDivI * instr)1283 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1284   HBinaryOperation* hdiv = instr->hydrogen();
1285   Register dividend = ToRegister(instr->dividend());
1286   Register divisor = ToRegister(instr->divisor());
1287   const Register result = ToRegister(instr->result());
1288 
1289   // On MIPS div is asynchronous - it will run in the background while we
1290   // check for special cases.
1291   __ Ddiv(result, dividend, divisor);
1292 
1293   // Check for x / 0.
1294   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1295     DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
1296                  Operand(zero_reg));
1297   }
1298 
1299   // Check for (0 / -x) that will produce negative zero.
1300   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1301     Label left_not_zero;
1302     __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1303     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
1304                  Operand(zero_reg));
1305     __ bind(&left_not_zero);
1306   }
1307 
1308   // Check for (kMinInt / -1).
1309   if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1310       !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1311     Label left_not_min_int;
1312     __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1313     DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
1314     __ bind(&left_not_min_int);
1315   }
1316 
1317   // We performed a truncating division. Correct the result if necessary.
1318   Label done;
1319   Register remainder = scratch0();
1320   if (kArchVariant != kMips64r6) {
1321     __ mfhi(remainder);
1322   } else {
1323     __ dmod(remainder, dividend, divisor);
1324   }
1325   __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1326   __ Xor(remainder, remainder, Operand(divisor));
1327   __ Branch(&done, ge, remainder, Operand(zero_reg));
1328   __ Dsubu(result, result, Operand(1));
1329   __ bind(&done);
1330 }
1331 
1332 
DoMulS(LMulS * instr)1333 void LCodeGen::DoMulS(LMulS* instr) {
1334   Register scratch = scratch0();
1335   Register result = ToRegister(instr->result());
1336   // Note that result may alias left.
1337   Register left = ToRegister(instr->left());
1338   LOperand* right_op = instr->right();
1339 
1340   bool bailout_on_minus_zero =
1341     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1342   bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1343 
1344   if (right_op->IsConstantOperand()) {
1345     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1346 
1347     if (bailout_on_minus_zero && (constant < 0)) {
1348       // The case of a null constant will be handled separately.
1349       // If constant is negative and left is null, the result should be -0.
1350       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
1351                    Operand(zero_reg));
1352     }
1353 
1354     switch (constant) {
1355       case -1:
1356         if (overflow) {
1357           Label no_overflow;
1358           __ DsubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
1359           DeoptimizeIf(al, instr);
1360           __ bind(&no_overflow);
1361         } else {
1362           __ Dsubu(result, zero_reg, left);
1363         }
1364         break;
1365       case 0:
1366         if (bailout_on_minus_zero) {
1367           // If left is strictly negative and the constant is null, the
1368           // result is -0. Deoptimize if required, otherwise return 0.
1369           DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
1370                        Operand(zero_reg));
1371         }
1372         __ mov(result, zero_reg);
1373         break;
1374       case 1:
1375         // Nothing to do.
1376         __ Move(result, left);
1377         break;
1378       default:
1379         // Multiplying by powers of two and powers of two plus or minus
1380         // one can be done faster with shifted operands.
1381         // For other constants we emit standard code.
1382         int32_t mask = constant >> 31;
1383         uint32_t constant_abs = (constant + mask) ^ mask;
1384 
1385         if (base::bits::IsPowerOfTwo32(constant_abs)) {
1386           int32_t shift = WhichPowerOf2(constant_abs);
1387           __ dsll(result, left, shift);
1388           // Correct the sign of the result if the constant is negative.
1389           if (constant < 0) __ Dsubu(result, zero_reg, result);
1390         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1391           int32_t shift = WhichPowerOf2(constant_abs - 1);
1392           __ Dlsa(result, left, left, shift);
1393           // Correct the sign of the result if the constant is negative.
1394           if (constant < 0) __ Dsubu(result, zero_reg, result);
1395         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1396           int32_t shift = WhichPowerOf2(constant_abs + 1);
1397           __ dsll(scratch, left, shift);
1398           __ Dsubu(result, scratch, left);
1399           // Correct the sign of the result if the constant is negative.
1400           if (constant < 0) __ Dsubu(result, zero_reg, result);
1401         } else {
1402           // Generate standard code.
1403           __ li(at, constant);
1404           __ Dmul(result, left, at);
1405         }
1406     }
1407   } else {
1408     DCHECK(right_op->IsRegister());
1409     Register right = ToRegister(right_op);
1410 
1411     if (overflow) {
1412       // hi:lo = left * right.
1413       __ Dmulh(result, left, right);
1414       __ dsra32(scratch, result, 0);
1415       __ sra(at, result, 31);
1416       __ SmiTag(result);
1417       DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
1418                    Operand(at));
1419     } else {
1420       __ SmiUntag(result, left);
1421       __ dmul(result, result, right);
1422     }
1423 
1424     if (bailout_on_minus_zero) {
1425       Label done;
1426       __ Xor(at, left, right);
1427       __ Branch(&done, ge, at, Operand(zero_reg));
1428       // Bail out if the result is minus zero.
1429       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
1430                    Operand(zero_reg));
1431       __ bind(&done);
1432     }
1433   }
1434 }
1435 
1436 
DoMulI(LMulI * instr)1437 void LCodeGen::DoMulI(LMulI* instr) {
1438   Register scratch = scratch0();
1439   Register result = ToRegister(instr->result());
1440   // Note that result may alias left.
1441   Register left = ToRegister(instr->left());
1442   LOperand* right_op = instr->right();
1443 
1444   bool bailout_on_minus_zero =
1445       instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1446   bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1447 
1448   if (right_op->IsConstantOperand()) {
1449     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1450 
1451     if (bailout_on_minus_zero && (constant < 0)) {
1452       // The case of a null constant will be handled separately.
1453       // If constant is negative and left is null, the result should be -0.
1454       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
1455                    Operand(zero_reg));
1456     }
1457 
1458     switch (constant) {
1459       case -1:
1460         if (overflow) {
1461           Label no_overflow;
1462           __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
1463           DeoptimizeIf(al, instr);
1464           __ bind(&no_overflow);
1465         } else {
1466           __ Subu(result, zero_reg, left);
1467         }
1468         break;
1469       case 0:
1470         if (bailout_on_minus_zero) {
1471           // If left is strictly negative and the constant is null, the
1472           // result is -0. Deoptimize if required, otherwise return 0.
1473           DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
1474                        Operand(zero_reg));
1475         }
1476         __ mov(result, zero_reg);
1477         break;
1478       case 1:
1479         // Nothing to do.
1480         __ Move(result, left);
1481         break;
1482       default:
1483         // Multiplying by powers of two and powers of two plus or minus
1484         // one can be done faster with shifted operands.
1485         // For other constants we emit standard code.
1486         int32_t mask = constant >> 31;
1487         uint32_t constant_abs = (constant + mask) ^ mask;
1488 
1489         if (base::bits::IsPowerOfTwo32(constant_abs)) {
1490           int32_t shift = WhichPowerOf2(constant_abs);
1491           __ sll(result, left, shift);
1492           // Correct the sign of the result if the constant is negative.
1493           if (constant < 0) __ Subu(result, zero_reg, result);
1494         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1495           int32_t shift = WhichPowerOf2(constant_abs - 1);
1496           __ Lsa(result, left, left, shift);
1497           // Correct the sign of the result if the constant is negative.
1498           if (constant < 0) __ Subu(result, zero_reg, result);
1499         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1500           int32_t shift = WhichPowerOf2(constant_abs + 1);
1501           __ sll(scratch, left, shift);
1502           __ Subu(result, scratch, left);
1503           // Correct the sign of the result if the constant is negative.
1504           if (constant < 0) __ Subu(result, zero_reg, result);
1505         } else {
1506           // Generate standard code.
1507           __ li(at, constant);
1508           __ Mul(result, left, at);
1509         }
1510     }
1511 
1512   } else {
1513     DCHECK(right_op->IsRegister());
1514     Register right = ToRegister(right_op);
1515 
1516     if (overflow) {
1517       // hi:lo = left * right.
1518       __ Dmul(result, left, right);
1519       __ dsra32(scratch, result, 0);
1520       __ sra(at, result, 31);
1521 
1522       DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
1523                    Operand(at));
1524     } else {
1525       __ mul(result, left, right);
1526     }
1527 
1528     if (bailout_on_minus_zero) {
1529       Label done;
1530       __ Xor(at, left, right);
1531       __ Branch(&done, ge, at, Operand(zero_reg));
1532       // Bail out if the result is minus zero.
1533       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
1534                    Operand(zero_reg));
1535       __ bind(&done);
1536     }
1537   }
1538 }
1539 
1540 
DoBitI(LBitI * instr)1541 void LCodeGen::DoBitI(LBitI* instr) {
1542   LOperand* left_op = instr->left();
1543   LOperand* right_op = instr->right();
1544   DCHECK(left_op->IsRegister());
1545   Register left = ToRegister(left_op);
1546   Register result = ToRegister(instr->result());
1547   Operand right(no_reg);
1548 
1549   if (right_op->IsStackSlot()) {
1550     right = Operand(EmitLoadRegister(right_op, at));
1551   } else {
1552     DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1553     right = ToOperand(right_op);
1554   }
1555 
1556   switch (instr->op()) {
1557     case Token::BIT_AND:
1558       __ And(result, left, right);
1559       break;
1560     case Token::BIT_OR:
1561       __ Or(result, left, right);
1562       break;
1563     case Token::BIT_XOR:
1564       if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1565         __ Nor(result, zero_reg, left);
1566       } else {
1567         __ Xor(result, left, right);
1568       }
1569       break;
1570     default:
1571       UNREACHABLE();
1572       break;
1573   }
1574 }
1575 
1576 
DoShiftI(LShiftI * instr)1577 void LCodeGen::DoShiftI(LShiftI* instr) {
1578   // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1579   // result may alias either of them.
1580   LOperand* right_op = instr->right();
1581   Register left = ToRegister(instr->left());
1582   Register result = ToRegister(instr->result());
1583 
1584   if (right_op->IsRegister()) {
1585     // No need to mask the right operand on MIPS, it is built into the variable
1586     // shift instructions.
1587     switch (instr->op()) {
1588       case Token::ROR:
1589         __ Ror(result, left, Operand(ToRegister(right_op)));
1590         break;
1591       case Token::SAR:
1592         __ srav(result, left, ToRegister(right_op));
1593         break;
1594       case Token::SHR:
1595         __ srlv(result, left, ToRegister(right_op));
1596         if (instr->can_deopt()) {
1597            // TODO(yy): (-1) >>> 0. anything else?
1598            DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, result,
1599                         Operand(zero_reg));
1600            DeoptimizeIf(gt, instr, DeoptimizeReason::kNegativeValue, result,
1601                         Operand(kMaxInt));
1602         }
1603         break;
1604       case Token::SHL:
1605         __ sllv(result, left, ToRegister(right_op));
1606         break;
1607       default:
1608         UNREACHABLE();
1609         break;
1610     }
1611   } else {
1612     // Mask the right_op operand.
1613     int value = ToInteger32(LConstantOperand::cast(right_op));
1614     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1615     switch (instr->op()) {
1616       case Token::ROR:
1617         if (shift_count != 0) {
1618           __ Ror(result, left, Operand(shift_count));
1619         } else {
1620           __ Move(result, left);
1621         }
1622         break;
1623       case Token::SAR:
1624         if (shift_count != 0) {
1625           __ sra(result, left, shift_count);
1626         } else {
1627           __ Move(result, left);
1628         }
1629         break;
1630       case Token::SHR:
1631         if (shift_count != 0) {
1632           __ srl(result, left, shift_count);
1633         } else {
1634           if (instr->can_deopt()) {
1635             __ And(at, left, Operand(0x80000000));
1636             DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue, at,
1637                          Operand(zero_reg));
1638           }
1639           __ Move(result, left);
1640         }
1641         break;
1642       case Token::SHL:
1643         if (shift_count != 0) {
1644           if (instr->hydrogen_value()->representation().IsSmi()) {
1645             __ dsll(result, left, shift_count);
1646           } else {
1647             __ sll(result, left, shift_count);
1648           }
1649         } else {
1650           __ Move(result, left);
1651         }
1652         break;
1653       default:
1654         UNREACHABLE();
1655         break;
1656     }
1657   }
1658 }
1659 
1660 
DoSubS(LSubS * instr)1661 void LCodeGen::DoSubS(LSubS* instr) {
1662   LOperand* left = instr->left();
1663   LOperand* right = instr->right();
1664   LOperand* result = instr->result();
1665   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1666 
1667   if (!can_overflow) {
1668     DCHECK(right->IsRegister() || right->IsConstantOperand());
1669     __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
1670   } else {  // can_overflow.
1671     Register scratch = scratch0();
1672     Label no_overflow_label;
1673     DCHECK(right->IsRegister() || right->IsConstantOperand());
1674     __ DsubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1675                        &no_overflow_label, scratch);
1676     DeoptimizeIf(al, instr);
1677     __ bind(&no_overflow_label);
1678   }
1679 }
1680 
1681 
DoSubI(LSubI * instr)1682 void LCodeGen::DoSubI(LSubI* instr) {
1683   LOperand* left = instr->left();
1684   LOperand* right = instr->right();
1685   LOperand* result = instr->result();
1686   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1687 
1688   if (!can_overflow) {
1689     DCHECK(right->IsRegister() || right->IsConstantOperand());
1690     __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1691   } else {  // can_overflow.
1692     Register scratch = scratch0();
1693     Label no_overflow_label;
1694     DCHECK(right->IsRegister() || right->IsConstantOperand());
1695     __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1696                       &no_overflow_label, scratch);
1697     DeoptimizeIf(al, instr);
1698     __ bind(&no_overflow_label);
1699   }
1700 }
1701 
1702 
DoConstantI(LConstantI * instr)1703 void LCodeGen::DoConstantI(LConstantI* instr) {
1704   __ li(ToRegister(instr->result()), Operand(instr->value()));
1705 }
1706 
1707 
DoConstantS(LConstantS * instr)1708 void LCodeGen::DoConstantS(LConstantS* instr) {
1709   __ li(ToRegister(instr->result()), Operand(instr->value()));
1710 }
1711 
1712 
DoConstantD(LConstantD * instr)1713 void LCodeGen::DoConstantD(LConstantD* instr) {
1714   DCHECK(instr->result()->IsDoubleRegister());
1715   DoubleRegister result = ToDoubleRegister(instr->result());
1716   double v = instr->value();
1717   __ Move(result, v);
1718 }
1719 
1720 
DoConstantE(LConstantE * instr)1721 void LCodeGen::DoConstantE(LConstantE* instr) {
1722   __ li(ToRegister(instr->result()), Operand(instr->value()));
1723 }
1724 
1725 
DoConstantT(LConstantT * instr)1726 void LCodeGen::DoConstantT(LConstantT* instr) {
1727   Handle<Object> object = instr->value(isolate());
1728   AllowDeferredHandleDereference smi_check;
1729   __ li(ToRegister(instr->result()), object);
1730 }
1731 
1732 
BuildSeqStringOperand(Register string,LOperand * index,String::Encoding encoding)1733 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1734                                            LOperand* index,
1735                                            String::Encoding encoding) {
1736   if (index->IsConstantOperand()) {
1737     int offset = ToInteger32(LConstantOperand::cast(index));
1738     if (encoding == String::TWO_BYTE_ENCODING) {
1739       offset *= kUC16Size;
1740     }
1741     STATIC_ASSERT(kCharSize == 1);
1742     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1743   }
1744   Register scratch = scratch0();
1745   DCHECK(!scratch.is(string));
1746   DCHECK(!scratch.is(ToRegister(index)));
1747   if (encoding == String::ONE_BYTE_ENCODING) {
1748     __ Daddu(scratch, string, ToRegister(index));
1749   } else {
1750     STATIC_ASSERT(kUC16Size == 2);
1751     __ dsll(scratch, ToRegister(index), 1);
1752     __ Daddu(scratch, string, scratch);
1753   }
1754   return FieldMemOperand(scratch, SeqString::kHeaderSize);
1755 }
1756 
1757 
DoSeqStringGetChar(LSeqStringGetChar * instr)1758 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1759   String::Encoding encoding = instr->hydrogen()->encoding();
1760   Register string = ToRegister(instr->string());
1761   Register result = ToRegister(instr->result());
1762 
1763   if (FLAG_debug_code) {
1764     Register scratch = scratch0();
1765     __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1766     __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1767 
1768     __ And(scratch, scratch,
1769            Operand(kStringRepresentationMask | kStringEncodingMask));
1770     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1771     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1772     __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1773                                 ? one_byte_seq_type : two_byte_seq_type));
1774     __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1775   }
1776 
1777   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1778   if (encoding == String::ONE_BYTE_ENCODING) {
1779     __ lbu(result, operand);
1780   } else {
1781     __ lhu(result, operand);
1782   }
1783 }
1784 
1785 
DoSeqStringSetChar(LSeqStringSetChar * instr)1786 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1787   String::Encoding encoding = instr->hydrogen()->encoding();
1788   Register string = ToRegister(instr->string());
1789   Register value = ToRegister(instr->value());
1790 
1791   if (FLAG_debug_code) {
1792     Register scratch = scratch0();
1793     Register index = ToRegister(instr->index());
1794     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1795     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1796     int encoding_mask =
1797         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1798         ? one_byte_seq_type : two_byte_seq_type;
1799     __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1800   }
1801 
1802   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1803   if (encoding == String::ONE_BYTE_ENCODING) {
1804     __ sb(value, operand);
1805   } else {
1806     __ sh(value, operand);
1807   }
1808 }
1809 
1810 
DoAddE(LAddE * instr)1811 void LCodeGen::DoAddE(LAddE* instr) {
1812   LOperand* result = instr->result();
1813   LOperand* left = instr->left();
1814   LOperand* right = instr->right();
1815 
1816   DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1817   DCHECK(right->IsRegister() || right->IsConstantOperand());
1818   __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
1819 }
1820 
1821 
DoAddS(LAddS * instr)1822 void LCodeGen::DoAddS(LAddS* instr) {
1823   LOperand* left = instr->left();
1824   LOperand* right = instr->right();
1825   LOperand* result = instr->result();
1826   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1827 
1828   if (!can_overflow) {
1829     DCHECK(right->IsRegister() || right->IsConstantOperand());
1830     __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
1831   } else {  // can_overflow.
1832     Label no_overflow_label;
1833     Register scratch = scratch1();
1834     DCHECK(right->IsRegister() || right->IsConstantOperand());
1835     __ DaddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1836                        &no_overflow_label, scratch);
1837     DeoptimizeIf(al, instr);
1838     __ bind(&no_overflow_label);
1839   }
1840 }
1841 
1842 
DoAddI(LAddI * instr)1843 void LCodeGen::DoAddI(LAddI* instr) {
1844   LOperand* left = instr->left();
1845   LOperand* right = instr->right();
1846   LOperand* result = instr->result();
1847   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1848 
1849   if (!can_overflow) {
1850     DCHECK(right->IsRegister() || right->IsConstantOperand());
1851     __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1852   } else {  // can_overflow.
1853     Label no_overflow_label;
1854     Register scratch = scratch1();
1855     DCHECK(right->IsRegister() || right->IsConstantOperand());
1856     __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1857                       &no_overflow_label, scratch);
1858     DeoptimizeIf(al, instr);
1859     __ bind(&no_overflow_label);
1860   }
1861 }
1862 
1863 
DoMathMinMax(LMathMinMax * instr)1864 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1865   LOperand* left = instr->left();
1866   LOperand* right = instr->right();
1867   HMathMinMax::Operation operation = instr->hydrogen()->operation();
1868   Register scratch = scratch1();
1869   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1870     Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1871     Register left_reg = ToRegister(left);
1872     Register right_reg = EmitLoadRegister(right, scratch0());
1873     Register result_reg = ToRegister(instr->result());
1874     Label return_right, done;
1875     __ Slt(scratch, left_reg, Operand(right_reg));
1876     if (condition == ge) {
1877      __  Movz(result_reg, left_reg, scratch);
1878      __  Movn(result_reg, right_reg, scratch);
1879     } else {
1880      DCHECK(condition == le);
1881      __  Movn(result_reg, left_reg, scratch);
1882      __  Movz(result_reg, right_reg, scratch);
1883     }
1884   } else {
1885     DCHECK(instr->hydrogen()->representation().IsDouble());
1886     FPURegister left_reg = ToDoubleRegister(left);
1887     FPURegister right_reg = ToDoubleRegister(right);
1888     FPURegister result_reg = ToDoubleRegister(instr->result());
1889     Label nan, done;
1890     if (operation == HMathMinMax::kMathMax) {
1891       __ MaxNaNCheck_d(result_reg, left_reg, right_reg, &nan);
1892     } else {
1893       DCHECK(operation == HMathMinMax::kMathMin);
1894       __ MinNaNCheck_d(result_reg, left_reg, right_reg, &nan);
1895     }
1896     __ Branch(&done);
1897 
1898     __ bind(&nan);
1899     __ LoadRoot(scratch, Heap::kNanValueRootIndex);
1900     __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
1901 
1902     __ bind(&done);
1903   }
1904 }
1905 
1906 
DoArithmeticD(LArithmeticD * instr)1907 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1908   DoubleRegister left = ToDoubleRegister(instr->left());
1909   DoubleRegister right = ToDoubleRegister(instr->right());
1910   DoubleRegister result = ToDoubleRegister(instr->result());
1911   switch (instr->op()) {
1912     case Token::ADD:
1913       __ add_d(result, left, right);
1914       break;
1915     case Token::SUB:
1916       __ sub_d(result, left, right);
1917       break;
1918     case Token::MUL:
1919       __ mul_d(result, left, right);
1920       break;
1921     case Token::DIV:
1922       __ div_d(result, left, right);
1923       break;
1924     case Token::MOD: {
1925       // Save a0-a3 on the stack.
1926       RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1927       __ MultiPush(saved_regs);
1928 
1929       __ PrepareCallCFunction(0, 2, scratch0());
1930       __ MovToFloatParameters(left, right);
1931       __ CallCFunction(
1932           ExternalReference::mod_two_doubles_operation(isolate()),
1933           0, 2);
1934       // Move the result in the double result register.
1935       __ MovFromFloatResult(result);
1936 
1937       // Restore saved register.
1938       __ MultiPop(saved_regs);
1939       break;
1940     }
1941     default:
1942       UNREACHABLE();
1943       break;
1944   }
1945 }
1946 
1947 
DoArithmeticT(LArithmeticT * instr)1948 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1949   DCHECK(ToRegister(instr->context()).is(cp));
1950   DCHECK(ToRegister(instr->left()).is(a1));
1951   DCHECK(ToRegister(instr->right()).is(a0));
1952   DCHECK(ToRegister(instr->result()).is(v0));
1953 
1954   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
1955   CallCode(code, RelocInfo::CODE_TARGET, instr);
1956   // Other arch use a nop here, to signal that there is no inlined
1957   // patchable code. Mips does not need the nop, since our marker
1958   // instruction (andi zero_reg) will never be used in normal code.
1959 }
1960 
1961 
1962 template<class InstrType>
EmitBranch(InstrType instr,Condition condition,Register src1,const Operand & src2)1963 void LCodeGen::EmitBranch(InstrType instr,
1964                           Condition condition,
1965                           Register src1,
1966                           const Operand& src2) {
1967   int left_block = instr->TrueDestination(chunk_);
1968   int right_block = instr->FalseDestination(chunk_);
1969 
1970   int next_block = GetNextEmittedBlock();
1971   if (right_block == left_block || condition == al) {
1972     EmitGoto(left_block);
1973   } else if (left_block == next_block) {
1974     __ Branch(chunk_->GetAssemblyLabel(right_block),
1975               NegateCondition(condition), src1, src2);
1976   } else if (right_block == next_block) {
1977     __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1978   } else {
1979     __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1980     __ Branch(chunk_->GetAssemblyLabel(right_block));
1981   }
1982 }
1983 
1984 
1985 template<class InstrType>
EmitBranchF(InstrType instr,Condition condition,FPURegister src1,FPURegister src2)1986 void LCodeGen::EmitBranchF(InstrType instr,
1987                            Condition condition,
1988                            FPURegister src1,
1989                            FPURegister src2) {
1990   int right_block = instr->FalseDestination(chunk_);
1991   int left_block = instr->TrueDestination(chunk_);
1992 
1993   int next_block = GetNextEmittedBlock();
1994   if (right_block == left_block) {
1995     EmitGoto(left_block);
1996   } else if (left_block == next_block) {
1997     __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1998                NegateFpuCondition(condition), src1, src2);
1999   } else if (right_block == next_block) {
2000     __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2001                condition, src1, src2);
2002   } else {
2003     __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2004                condition, src1, src2);
2005     __ Branch(chunk_->GetAssemblyLabel(right_block));
2006   }
2007 }
2008 
2009 
2010 template <class InstrType>
EmitTrueBranch(InstrType instr,Condition condition,Register src1,const Operand & src2)2011 void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
2012                               Register src1, const Operand& src2) {
2013   int true_block = instr->TrueDestination(chunk_);
2014   __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
2015 }
2016 
2017 
2018 template <class InstrType>
EmitFalseBranch(InstrType instr,Condition condition,Register src1,const Operand & src2)2019 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
2020                                Register src1, const Operand& src2) {
2021   int false_block = instr->FalseDestination(chunk_);
2022   __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
2023 }
2024 
2025 
2026 template<class InstrType>
EmitFalseBranchF(InstrType instr,Condition condition,FPURegister src1,FPURegister src2)2027 void LCodeGen::EmitFalseBranchF(InstrType instr,
2028                                 Condition condition,
2029                                 FPURegister src1,
2030                                 FPURegister src2) {
2031   int false_block = instr->FalseDestination(chunk_);
2032   __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2033              condition, src1, src2);
2034 }
2035 
2036 
DoDebugBreak(LDebugBreak * instr)2037 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2038   __ stop("LDebugBreak");
2039 }
2040 
2041 
DoBranch(LBranch * instr)2042 void LCodeGen::DoBranch(LBranch* instr) {
2043   Representation r = instr->hydrogen()->value()->representation();
2044   if (r.IsInteger32() || r.IsSmi()) {
2045     DCHECK(!info()->IsStub());
2046     Register reg = ToRegister(instr->value());
2047     EmitBranch(instr, ne, reg, Operand(zero_reg));
2048   } else if (r.IsDouble()) {
2049     DCHECK(!info()->IsStub());
2050     DoubleRegister reg = ToDoubleRegister(instr->value());
2051     // Test the double value. Zero and NaN are false.
2052     EmitBranchF(instr, ogl, reg, kDoubleRegZero);
2053   } else {
2054     DCHECK(r.IsTagged());
2055     Register reg = ToRegister(instr->value());
2056     HType type = instr->hydrogen()->value()->type();
2057     if (type.IsBoolean()) {
2058       DCHECK(!info()->IsStub());
2059       __ LoadRoot(at, Heap::kTrueValueRootIndex);
2060       EmitBranch(instr, eq, reg, Operand(at));
2061     } else if (type.IsSmi()) {
2062       DCHECK(!info()->IsStub());
2063       EmitBranch(instr, ne, reg, Operand(zero_reg));
2064     } else if (type.IsJSArray()) {
2065       DCHECK(!info()->IsStub());
2066       EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2067     } else if (type.IsHeapNumber()) {
2068       DCHECK(!info()->IsStub());
2069       DoubleRegister dbl_scratch = double_scratch0();
2070       __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2071       // Test the double value. Zero and NaN are false.
2072       EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
2073     } else if (type.IsString()) {
2074       DCHECK(!info()->IsStub());
2075       __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
2076       EmitBranch(instr, ne, at, Operand(zero_reg));
2077     } else {
2078       ToBooleanHints expected = instr->hydrogen()->expected_input_types();
2079       // Avoid deopts in the case where we've never executed this path before.
2080       if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
2081 
2082       if (expected & ToBooleanHint::kUndefined) {
2083         // undefined -> false.
2084         __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2085         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2086       }
2087       if (expected & ToBooleanHint::kBoolean) {
2088         // Boolean -> its value.
2089         __ LoadRoot(at, Heap::kTrueValueRootIndex);
2090         __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2091         __ LoadRoot(at, Heap::kFalseValueRootIndex);
2092         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2093       }
2094       if (expected & ToBooleanHint::kNull) {
2095         // 'null' -> false.
2096         __ LoadRoot(at, Heap::kNullValueRootIndex);
2097         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2098       }
2099 
2100       if (expected & ToBooleanHint::kSmallInteger) {
2101         // Smis: 0 -> false, all other -> true.
2102         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2103         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2104       } else if (expected & ToBooleanHint::kNeedsMap) {
2105         // If we need a map later and have a Smi -> deopt.
2106         __ SmiTst(reg, at);
2107         DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
2108       }
2109 
2110       const Register map = scratch0();
2111       if (expected & ToBooleanHint::kNeedsMap) {
2112         __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2113         if (expected & ToBooleanHint::kCanBeUndetectable) {
2114           // Undetectable -> false.
2115           __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2116           __ And(at, at, Operand(1 << Map::kIsUndetectable));
2117           __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2118         }
2119       }
2120 
2121       if (expected & ToBooleanHint::kReceiver) {
2122         // spec object -> true.
2123         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2124         __ Branch(instr->TrueLabel(chunk_),
2125                   ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
2126       }
2127 
2128       if (expected & ToBooleanHint::kString) {
2129         // String value -> false iff empty.
2130         Label not_string;
2131         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2132         __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2133         __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
2134         __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2135         __ Branch(instr->FalseLabel(chunk_));
2136         __ bind(&not_string);
2137       }
2138 
2139       if (expected & ToBooleanHint::kSymbol) {
2140         // Symbol value -> true.
2141         const Register scratch = scratch1();
2142         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2143         __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2144       }
2145 
2146       if (expected & ToBooleanHint::kSimdValue) {
2147         // SIMD value -> true.
2148         const Register scratch = scratch1();
2149         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2150         __ Branch(instr->TrueLabel(chunk_), eq, scratch,
2151                   Operand(SIMD128_VALUE_TYPE));
2152       }
2153 
2154       if (expected & ToBooleanHint::kHeapNumber) {
2155         // heap number -> false iff +0, -0, or NaN.
2156         DoubleRegister dbl_scratch = double_scratch0();
2157         Label not_heap_number;
2158         __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2159         __ Branch(&not_heap_number, ne, map, Operand(at));
2160         __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2161         __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2162                    ne, dbl_scratch, kDoubleRegZero);
2163         // Falls through if dbl_scratch == 0.
2164         __ Branch(instr->FalseLabel(chunk_));
2165         __ bind(&not_heap_number);
2166       }
2167 
2168       if (expected != ToBooleanHint::kAny) {
2169         // We've seen something for the first time -> deopt.
2170         // This can only happen if we are not generic already.
2171         DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg,
2172                      Operand(zero_reg));
2173       }
2174     }
2175   }
2176 }
2177 
2178 
EmitGoto(int block)2179 void LCodeGen::EmitGoto(int block) {
2180   if (!IsNextEmittedBlock(block)) {
2181     __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2182   }
2183 }
2184 
2185 
DoGoto(LGoto * instr)2186 void LCodeGen::DoGoto(LGoto* instr) {
2187   EmitGoto(instr->block_id());
2188 }
2189 
2190 
TokenToCondition(Token::Value op,bool is_unsigned)2191 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2192   Condition cond = kNoCondition;
2193   switch (op) {
2194     case Token::EQ:
2195     case Token::EQ_STRICT:
2196       cond = eq;
2197       break;
2198     case Token::NE:
2199     case Token::NE_STRICT:
2200       cond = ne;
2201       break;
2202     case Token::LT:
2203       cond = is_unsigned ? lo : lt;
2204       break;
2205     case Token::GT:
2206       cond = is_unsigned ? hi : gt;
2207       break;
2208     case Token::LTE:
2209       cond = is_unsigned ? ls : le;
2210       break;
2211     case Token::GTE:
2212       cond = is_unsigned ? hs : ge;
2213       break;
2214     case Token::IN:
2215     case Token::INSTANCEOF:
2216     default:
2217       UNREACHABLE();
2218   }
2219   return cond;
2220 }
2221 
2222 
DoCompareNumericAndBranch(LCompareNumericAndBranch * instr)2223 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2224   LOperand* left = instr->left();
2225   LOperand* right = instr->right();
2226   bool is_unsigned =
2227       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2228       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2229   Condition cond = TokenToCondition(instr->op(), is_unsigned);
2230 
2231   if (left->IsConstantOperand() && right->IsConstantOperand()) {
2232     // We can statically evaluate the comparison.
2233     double left_val = ToDouble(LConstantOperand::cast(left));
2234     double right_val = ToDouble(LConstantOperand::cast(right));
2235     int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2236                          ? instr->TrueDestination(chunk_)
2237                          : instr->FalseDestination(chunk_);
2238     EmitGoto(next_block);
2239   } else {
2240     if (instr->is_double()) {
2241       // Compare left and right as doubles and load the
2242       // resulting flags into the normal status register.
2243       FPURegister left_reg = ToDoubleRegister(left);
2244       FPURegister right_reg = ToDoubleRegister(right);
2245 
2246       // If a NaN is involved, i.e. the result is unordered,
2247       // jump to false block label.
2248       __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2249                  left_reg, right_reg);
2250 
2251       EmitBranchF(instr, cond, left_reg, right_reg);
2252     } else {
2253       Register cmp_left;
2254       Operand cmp_right = Operand((int64_t)0);
2255       if (right->IsConstantOperand()) {
2256         int32_t value = ToInteger32(LConstantOperand::cast(right));
2257         if (instr->hydrogen_value()->representation().IsSmi()) {
2258           cmp_left = ToRegister(left);
2259           cmp_right = Operand(Smi::FromInt(value));
2260         } else {
2261           cmp_left = ToRegister(left);
2262           cmp_right = Operand(value);
2263         }
2264       } else if (left->IsConstantOperand()) {
2265         int32_t value = ToInteger32(LConstantOperand::cast(left));
2266         if (instr->hydrogen_value()->representation().IsSmi()) {
2267           cmp_left = ToRegister(right);
2268           cmp_right = Operand(Smi::FromInt(value));
2269         } else {
2270           cmp_left = ToRegister(right);
2271           cmp_right = Operand(value);
2272         }
2273         // We commuted the operands, so commute the condition.
2274         cond = CommuteCondition(cond);
2275       } else {
2276         cmp_left = ToRegister(left);
2277         cmp_right = Operand(ToRegister(right));
2278       }
2279 
2280       EmitBranch(instr, cond, cmp_left, cmp_right);
2281     }
2282   }
2283 }
2284 
2285 
DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch * instr)2286 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2287   Register left = ToRegister(instr->left());
2288   Register right = ToRegister(instr->right());
2289 
2290   EmitBranch(instr, eq, left, Operand(right));
2291 }
2292 
2293 
DoCmpHoleAndBranch(LCmpHoleAndBranch * instr)2294 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2295   if (instr->hydrogen()->representation().IsTagged()) {
2296     Register input_reg = ToRegister(instr->object());
2297     __ li(at, Operand(factory()->the_hole_value()));
2298     EmitBranch(instr, eq, input_reg, Operand(at));
2299     return;
2300   }
2301 
2302   DoubleRegister input_reg = ToDoubleRegister(instr->object());
2303   EmitFalseBranchF(instr, eq, input_reg, input_reg);
2304 
2305   Register scratch = scratch0();
2306   __ FmoveHigh(scratch, input_reg);
2307   EmitBranch(instr, eq, scratch,
2308              Operand(static_cast<int32_t>(kHoleNanUpper32)));
2309 }
2310 
2311 
EmitIsString(Register input,Register temp1,Label * is_not_string,SmiCheck check_needed=INLINE_SMI_CHECK)2312 Condition LCodeGen::EmitIsString(Register input,
2313                                  Register temp1,
2314                                  Label* is_not_string,
2315                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
2316   if (check_needed == INLINE_SMI_CHECK) {
2317     __ JumpIfSmi(input, is_not_string);
2318   }
2319   __ GetObjectType(input, temp1, temp1);
2320 
2321   return lt;
2322 }
2323 
2324 
DoIsStringAndBranch(LIsStringAndBranch * instr)2325 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2326   Register reg = ToRegister(instr->value());
2327   Register temp1 = ToRegister(instr->temp());
2328 
2329   SmiCheck check_needed =
2330       instr->hydrogen()->value()->type().IsHeapObject()
2331           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2332   Condition true_cond =
2333       EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2334 
2335   EmitBranch(instr, true_cond, temp1,
2336              Operand(FIRST_NONSTRING_TYPE));
2337 }
2338 
2339 
DoIsSmiAndBranch(LIsSmiAndBranch * instr)2340 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2341   Register input_reg = EmitLoadRegister(instr->value(), at);
2342   __ And(at, input_reg, kSmiTagMask);
2343   EmitBranch(instr, eq, at, Operand(zero_reg));
2344 }
2345 
2346 
DoIsUndetectableAndBranch(LIsUndetectableAndBranch * instr)2347 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2348   Register input = ToRegister(instr->value());
2349   Register temp = ToRegister(instr->temp());
2350 
2351   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2352     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2353   }
2354   __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2355   __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2356   __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2357   EmitBranch(instr, ne, at, Operand(zero_reg));
2358 }
2359 
2360 
ComputeCompareCondition(Token::Value op)2361 static Condition ComputeCompareCondition(Token::Value op) {
2362   switch (op) {
2363     case Token::EQ_STRICT:
2364     case Token::EQ:
2365       return eq;
2366     case Token::LT:
2367       return lt;
2368     case Token::GT:
2369       return gt;
2370     case Token::LTE:
2371       return le;
2372     case Token::GTE:
2373       return ge;
2374     default:
2375       UNREACHABLE();
2376       return kNoCondition;
2377   }
2378 }
2379 
2380 
DoStringCompareAndBranch(LStringCompareAndBranch * instr)2381 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2382   DCHECK(ToRegister(instr->context()).is(cp));
2383   DCHECK(ToRegister(instr->left()).is(a1));
2384   DCHECK(ToRegister(instr->right()).is(a0));
2385 
2386   Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
2387   CallCode(code, RelocInfo::CODE_TARGET, instr);
2388   __ LoadRoot(at, Heap::kTrueValueRootIndex);
2389   EmitBranch(instr, eq, v0, Operand(at));
2390 }
2391 
2392 
TestType(HHasInstanceTypeAndBranch * instr)2393 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2394   InstanceType from = instr->from();
2395   InstanceType to = instr->to();
2396   if (from == FIRST_TYPE) return to;
2397   DCHECK(from == to || to == LAST_TYPE);
2398   return from;
2399 }
2400 
2401 
BranchCondition(HHasInstanceTypeAndBranch * instr)2402 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2403   InstanceType from = instr->from();
2404   InstanceType to = instr->to();
2405   if (from == to) return eq;
2406   if (to == LAST_TYPE) return hs;
2407   if (from == FIRST_TYPE) return ls;
2408   UNREACHABLE();
2409   return eq;
2410 }
2411 
2412 
DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch * instr)2413 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2414   Register scratch = scratch0();
2415   Register input = ToRegister(instr->value());
2416 
2417   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2418     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2419   }
2420 
2421   __ GetObjectType(input, scratch, scratch);
2422   EmitBranch(instr,
2423              BranchCondition(instr->hydrogen()),
2424              scratch,
2425              Operand(TestType(instr->hydrogen())));
2426 }
2427 
2428 // Branches to a label or falls through with the answer in flags.  Trashes
2429 // the temp registers, but not the input.
EmitClassOfTest(Label * is_true,Label * is_false,Handle<String> class_name,Register input,Register temp,Register temp2)2430 void LCodeGen::EmitClassOfTest(Label* is_true,
2431                                Label* is_false,
2432                                Handle<String>class_name,
2433                                Register input,
2434                                Register temp,
2435                                Register temp2) {
2436   DCHECK(!input.is(temp));
2437   DCHECK(!input.is(temp2));
2438   DCHECK(!temp.is(temp2));
2439 
2440   __ JumpIfSmi(input, is_false);
2441 
2442   __ GetObjectType(input, temp, temp2);
2443   STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
2444   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2445     __ Branch(is_true, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
2446   } else {
2447     __ Branch(is_false, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
2448   }
2449 
2450   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2451   // Check if the constructor in the map is a function.
2452   Register instance_type = scratch1();
2453   DCHECK(!instance_type.is(temp));
2454   __ GetMapConstructor(temp, temp, temp2, instance_type);
2455 
2456   // Objects with a non-function constructor have class 'Object'.
2457   if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2458     __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2459   } else {
2460     __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2461   }
2462 
2463   // temp now contains the constructor function. Grab the
2464   // instance class name from there.
2465   __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2466   __ ld(temp, FieldMemOperand(temp,
2467                                SharedFunctionInfo::kInstanceClassNameOffset));
2468   // The class name we are testing against is internalized since it's a literal.
2469   // The name in the constructor is internalized because of the way the context
2470   // is booted.  This routine isn't expected to work for random API-created
2471   // classes and it doesn't have to because you can't access it with natives
2472   // syntax.  Since both sides are internalized it is sufficient to use an
2473   // identity comparison.
2474 
2475   // End with the address of this class_name instance in temp register.
2476   // On MIPS, the caller must do the comparison with Handle<String>class_name.
2477 }
2478 
2479 
DoClassOfTestAndBranch(LClassOfTestAndBranch * instr)2480 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2481   Register input = ToRegister(instr->value());
2482   Register temp = scratch0();
2483   Register temp2 = ToRegister(instr->temp());
2484   Handle<String> class_name = instr->hydrogen()->class_name();
2485 
2486   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2487                   class_name, input, temp, temp2);
2488 
2489   EmitBranch(instr, eq, temp, Operand(class_name));
2490 }
2491 
2492 
DoCmpMapAndBranch(LCmpMapAndBranch * instr)2493 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2494   Register reg = ToRegister(instr->value());
2495   Register temp = ToRegister(instr->temp());
2496 
2497   __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2498   EmitBranch(instr, eq, temp, Operand(instr->map()));
2499 }
2500 
2501 
DoHasInPrototypeChainAndBranch(LHasInPrototypeChainAndBranch * instr)2502 void LCodeGen::DoHasInPrototypeChainAndBranch(
2503     LHasInPrototypeChainAndBranch* instr) {
2504   Register const object = ToRegister(instr->object());
2505   Register const object_map = scratch0();
2506   Register const object_instance_type = scratch1();
2507   Register const object_prototype = object_map;
2508   Register const prototype = ToRegister(instr->prototype());
2509 
2510   // The {object} must be a spec object.  It's sufficient to know that {object}
2511   // is not a smi, since all other non-spec objects have {null} prototypes and
2512   // will be ruled out below.
2513   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2514     __ SmiTst(object, at);
2515     EmitFalseBranch(instr, eq, at, Operand(zero_reg));
2516   }
2517 
2518   // Loop through the {object}s prototype chain looking for the {prototype}.
2519   __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2520   Label loop;
2521   __ bind(&loop);
2522 
2523   // Deoptimize if the object needs to be access checked.
2524   __ lbu(object_instance_type,
2525          FieldMemOperand(object_map, Map::kBitFieldOffset));
2526   __ And(object_instance_type, object_instance_type,
2527          Operand(1 << Map::kIsAccessCheckNeeded));
2528   DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type,
2529                Operand(zero_reg));
2530   __ lbu(object_instance_type,
2531          FieldMemOperand(object_map, Map::kInstanceTypeOffset));
2532   DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type,
2533                Operand(JS_PROXY_TYPE));
2534 
2535   __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
2536   __ LoadRoot(at, Heap::kNullValueRootIndex);
2537   EmitFalseBranch(instr, eq, object_prototype, Operand(at));
2538   EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
2539   __ Branch(&loop, USE_DELAY_SLOT);
2540   __ ld(object_map, FieldMemOperand(object_prototype,
2541                                     HeapObject::kMapOffset));  // In delay slot.
2542 }
2543 
2544 
DoCmpT(LCmpT * instr)2545 void LCodeGen::DoCmpT(LCmpT* instr) {
2546   DCHECK(ToRegister(instr->context()).is(cp));
2547   Token::Value op = instr->op();
2548 
2549   Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2550   CallCode(ic, RelocInfo::CODE_TARGET, instr);
2551   // On MIPS there is no need for a "no inlined smi code" marker (nop).
2552 
2553   Condition condition = ComputeCompareCondition(op);
2554   // A minor optimization that relies on LoadRoot always emitting one
2555   // instruction.
2556   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2557   Label done, check;
2558   __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2559   __ bind(&check);
2560   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2561   DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2562   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2563   __ bind(&done);
2564 }
2565 
2566 
DoReturn(LReturn * instr)2567 void LCodeGen::DoReturn(LReturn* instr) {
2568   if (FLAG_trace && info()->IsOptimizing()) {
2569     // Push the return value on the stack as the parameter.
2570     // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2571     // managed by the register allocator and tearing down the frame, it's
2572     // safe to write to the context register.
2573     __ push(v0);
2574     __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2575     __ CallRuntime(Runtime::kTraceExit);
2576   }
2577   if (info()->saves_caller_doubles()) {
2578     RestoreCallerDoubles();
2579   }
2580   if (NeedsEagerFrame()) {
2581     __ mov(sp, fp);
2582     __ Pop(ra, fp);
2583   }
2584   if (instr->has_constant_parameter_count()) {
2585     int parameter_count = ToInteger32(instr->constant_parameter_count());
2586     int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2587     if (sp_delta != 0) {
2588       __ Daddu(sp, sp, Operand(sp_delta));
2589     }
2590   } else {
2591     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
2592     Register reg = ToRegister(instr->parameter_count());
2593     // The argument count parameter is a smi
2594     __ SmiUntag(reg);
2595     __ Dlsa(sp, sp, reg, kPointerSizeLog2);
2596   }
2597 
2598   __ Jump(ra);
2599 }
2600 
2601 
DoLoadContextSlot(LLoadContextSlot * instr)2602 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2603   Register context = ToRegister(instr->context());
2604   Register result = ToRegister(instr->result());
2605 
2606   __ ld(result, ContextMemOperand(context, instr->slot_index()));
2607   if (instr->hydrogen()->RequiresHoleCheck()) {
2608     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2609 
2610     if (instr->hydrogen()->DeoptimizesOnHole()) {
2611       DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
2612     } else {
2613       Label is_not_hole;
2614       __ Branch(&is_not_hole, ne, result, Operand(at));
2615       __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2616       __ bind(&is_not_hole);
2617     }
2618   }
2619 }
2620 
2621 
DoStoreContextSlot(LStoreContextSlot * instr)2622 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2623   Register context = ToRegister(instr->context());
2624   Register value = ToRegister(instr->value());
2625   Register scratch = scratch0();
2626   MemOperand target = ContextMemOperand(context, instr->slot_index());
2627 
2628   Label skip_assignment;
2629 
2630   if (instr->hydrogen()->RequiresHoleCheck()) {
2631     __ ld(scratch, target);
2632     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2633 
2634     if (instr->hydrogen()->DeoptimizesOnHole()) {
2635       DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, Operand(at));
2636     } else {
2637       __ Branch(&skip_assignment, ne, scratch, Operand(at));
2638     }
2639   }
2640 
2641   __ sd(value, target);
2642   if (instr->hydrogen()->NeedsWriteBarrier()) {
2643     SmiCheck check_needed =
2644         instr->hydrogen()->value()->type().IsHeapObject()
2645             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2646     __ RecordWriteContextSlot(context,
2647                               target.offset(),
2648                               value,
2649                               scratch0(),
2650                               GetRAState(),
2651                               kSaveFPRegs,
2652                               EMIT_REMEMBERED_SET,
2653                               check_needed);
2654   }
2655 
2656   __ bind(&skip_assignment);
2657 }
2658 
2659 
DoLoadNamedField(LLoadNamedField * instr)2660 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2661   HObjectAccess access = instr->hydrogen()->access();
2662   int offset = access.offset();
2663   Register object = ToRegister(instr->object());
2664   if (access.IsExternalMemory()) {
2665     Register result = ToRegister(instr->result());
2666     MemOperand operand = MemOperand(object, offset);
2667     __ Load(result, operand, access.representation());
2668     return;
2669   }
2670 
2671   if (instr->hydrogen()->representation().IsDouble()) {
2672     DoubleRegister result = ToDoubleRegister(instr->result());
2673     __ ldc1(result, FieldMemOperand(object, offset));
2674     return;
2675   }
2676 
2677   Register result = ToRegister(instr->result());
2678   if (!access.IsInobject()) {
2679     __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2680     object = result;
2681   }
2682 
2683   Representation representation = access.representation();
2684   if (representation.IsSmi() && SmiValuesAre32Bits() &&
2685       instr->hydrogen()->representation().IsInteger32()) {
2686     if (FLAG_debug_code) {
2687       // Verify this is really an Smi.
2688       Register scratch = scratch0();
2689       __ Load(scratch, FieldMemOperand(object, offset), representation);
2690       __ AssertSmi(scratch);
2691     }
2692 
2693     // Read int value directly from upper half of the smi.
2694     STATIC_ASSERT(kSmiTag == 0);
2695     STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
2696     offset = SmiWordOffset(offset);
2697     representation = Representation::Integer32();
2698   }
2699   __ Load(result, FieldMemOperand(object, offset), representation);
2700 }
2701 
2702 
DoLoadFunctionPrototype(LLoadFunctionPrototype * instr)2703 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2704   Register scratch = scratch0();
2705   Register function = ToRegister(instr->function());
2706   Register result = ToRegister(instr->result());
2707 
2708   // Get the prototype or initial map from the function.
2709   __ ld(result,
2710          FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2711 
2712   // Check that the function has a prototype or an initial map.
2713   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2714   DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
2715 
2716   // If the function does not have an initial map, we're done.
2717   Label done;
2718   __ GetObjectType(result, scratch, scratch);
2719   __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2720 
2721   // Get the prototype from the initial map.
2722   __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
2723 
2724   // All done.
2725   __ bind(&done);
2726 }
2727 
2728 
DoLoadRoot(LLoadRoot * instr)2729 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2730   Register result = ToRegister(instr->result());
2731   __ LoadRoot(result, instr->index());
2732 }
2733 
2734 
DoAccessArgumentsAt(LAccessArgumentsAt * instr)2735 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2736   Register arguments = ToRegister(instr->arguments());
2737   Register result = ToRegister(instr->result());
2738   // There are two words between the frame pointer and the last argument.
2739   // Subtracting from length accounts for one of them add one more.
2740   if (instr->length()->IsConstantOperand()) {
2741     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2742     if (instr->index()->IsConstantOperand()) {
2743       int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2744       int index = (const_length - const_index) + 1;
2745       __ ld(result, MemOperand(arguments, index * kPointerSize));
2746     } else {
2747       Register index = ToRegister(instr->index());
2748       __ li(at, Operand(const_length + 1));
2749       __ Dsubu(result, at, index);
2750       __ Dlsa(at, arguments, result, kPointerSizeLog2);
2751       __ ld(result, MemOperand(at));
2752     }
2753   } else if (instr->index()->IsConstantOperand()) {
2754     Register length = ToRegister(instr->length());
2755     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2756     int loc = const_index - 1;
2757     if (loc != 0) {
2758       __ Dsubu(result, length, Operand(loc));
2759       __ Dlsa(at, arguments, result, kPointerSizeLog2);
2760       __ ld(result, MemOperand(at));
2761     } else {
2762       __ Dlsa(at, arguments, length, kPointerSizeLog2);
2763       __ ld(result, MemOperand(at));
2764     }
2765   } else {
2766     Register length = ToRegister(instr->length());
2767     Register index = ToRegister(instr->index());
2768     __ Dsubu(result, length, index);
2769     __ Daddu(result, result, 1);
2770     __ Dlsa(at, arguments, result, kPointerSizeLog2);
2771     __ ld(result, MemOperand(at));
2772   }
2773 }
2774 
2775 
DoLoadKeyedExternalArray(LLoadKeyed * instr)2776 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2777   Register external_pointer = ToRegister(instr->elements());
2778   Register key = no_reg;
2779   ElementsKind elements_kind = instr->elements_kind();
2780   bool key_is_constant = instr->key()->IsConstantOperand();
2781   int constant_key = 0;
2782   if (key_is_constant) {
2783     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2784     if (constant_key & 0xF0000000) {
2785       Abort(kArrayIndexConstantValueTooBig);
2786     }
2787   } else {
2788     key = ToRegister(instr->key());
2789   }
2790   int element_size_shift = ElementsKindToShiftSize(elements_kind);
2791   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2792       ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
2793       : element_size_shift;
2794   int base_offset = instr->base_offset();
2795 
2796   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2797     FPURegister result = ToDoubleRegister(instr->result());
2798     if (key_is_constant) {
2799       __ Daddu(scratch0(), external_pointer,
2800           constant_key << element_size_shift);
2801     } else {
2802       if (shift_size < 0) {
2803          if (shift_size == -32) {
2804            __ dsra32(scratch0(), key, 0);
2805          } else {
2806            __ dsra(scratch0(), key, -shift_size);
2807          }
2808       } else {
2809         __ dsll(scratch0(), key, shift_size);
2810       }
2811       __ Daddu(scratch0(), scratch0(), external_pointer);
2812     }
2813     if (elements_kind == FLOAT32_ELEMENTS) {
2814       __ lwc1(result, MemOperand(scratch0(), base_offset));
2815       __ cvt_d_s(result, result);
2816     } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2817       __ ldc1(result, MemOperand(scratch0(), base_offset));
2818     }
2819   } else {
2820     Register result = ToRegister(instr->result());
2821     MemOperand mem_operand = PrepareKeyedOperand(
2822         key, external_pointer, key_is_constant, constant_key,
2823         element_size_shift, shift_size, base_offset);
2824     switch (elements_kind) {
2825       case INT8_ELEMENTS:
2826         __ lb(result, mem_operand);
2827         break;
2828       case UINT8_ELEMENTS:
2829       case UINT8_CLAMPED_ELEMENTS:
2830         __ lbu(result, mem_operand);
2831         break;
2832       case INT16_ELEMENTS:
2833         __ lh(result, mem_operand);
2834         break;
2835       case UINT16_ELEMENTS:
2836         __ lhu(result, mem_operand);
2837         break;
2838       case INT32_ELEMENTS:
2839         __ lw(result, mem_operand);
2840         break;
2841       case UINT32_ELEMENTS:
2842         __ lw(result, mem_operand);
2843         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2844           DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue,
2845                        result, Operand(0x80000000));
2846         }
2847         break;
2848       case FLOAT32_ELEMENTS:
2849       case FLOAT64_ELEMENTS:
2850       case FAST_DOUBLE_ELEMENTS:
2851       case FAST_ELEMENTS:
2852       case FAST_SMI_ELEMENTS:
2853       case FAST_HOLEY_DOUBLE_ELEMENTS:
2854       case FAST_HOLEY_ELEMENTS:
2855       case FAST_HOLEY_SMI_ELEMENTS:
2856       case DICTIONARY_ELEMENTS:
2857       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2858       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
2859       case FAST_STRING_WRAPPER_ELEMENTS:
2860       case SLOW_STRING_WRAPPER_ELEMENTS:
2861       case NO_ELEMENTS:
2862         UNREACHABLE();
2863         break;
2864     }
2865   }
2866 }
2867 
2868 
DoLoadKeyedFixedDoubleArray(LLoadKeyed * instr)2869 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2870   Register elements = ToRegister(instr->elements());
2871   bool key_is_constant = instr->key()->IsConstantOperand();
2872   Register key = no_reg;
2873   DoubleRegister result = ToDoubleRegister(instr->result());
2874   Register scratch = scratch0();
2875 
2876   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2877 
2878   int base_offset = instr->base_offset();
2879   if (key_is_constant) {
2880     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2881     if (constant_key & 0xF0000000) {
2882       Abort(kArrayIndexConstantValueTooBig);
2883     }
2884     base_offset += constant_key * kDoubleSize;
2885   }
2886   __ Daddu(scratch, elements, Operand(base_offset));
2887 
2888   if (!key_is_constant) {
2889     key = ToRegister(instr->key());
2890     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2891         ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
2892         : element_size_shift;
2893     if (shift_size > 0) {
2894       __ dsll(at, key, shift_size);
2895     } else if (shift_size == -32) {
2896       __ dsra32(at, key, 0);
2897     } else {
2898       __ dsra(at, key, -shift_size);
2899     }
2900     __ Daddu(scratch, scratch, at);
2901   }
2902 
2903   __ ldc1(result, MemOperand(scratch));
2904 
2905   if (instr->hydrogen()->RequiresHoleCheck()) {
2906     __ FmoveHigh(scratch, result);
2907     DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch,
2908                  Operand(static_cast<int32_t>(kHoleNanUpper32)));
2909   }
2910 }
2911 
2912 
DoLoadKeyedFixedArray(LLoadKeyed * instr)2913 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2914   HLoadKeyed* hinstr = instr->hydrogen();
2915   Register elements = ToRegister(instr->elements());
2916   Register result = ToRegister(instr->result());
2917   Register scratch = scratch0();
2918   Register store_base = scratch;
2919   int offset = instr->base_offset();
2920 
2921   if (instr->key()->IsConstantOperand()) {
2922     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2923     offset += ToInteger32(const_operand) * kPointerSize;
2924     store_base = elements;
2925   } else {
2926     Register key = ToRegister(instr->key());
2927     // Even though the HLoadKeyed instruction forces the input
2928     // representation for the key to be an integer, the input gets replaced
2929     // during bound check elimination with the index argument to the bounds
2930     // check, which can be tagged, so that case must be handled here, too.
2931     if (instr->hydrogen()->key()->representation().IsSmi()) {
2932     __ SmiScale(scratch, key, kPointerSizeLog2);
2933     __ daddu(scratch, elements, scratch);
2934     } else {
2935       __ Dlsa(scratch, elements, key, kPointerSizeLog2);
2936     }
2937   }
2938 
2939   Representation representation = hinstr->representation();
2940   if (representation.IsInteger32() && SmiValuesAre32Bits() &&
2941       hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
2942     DCHECK(!hinstr->RequiresHoleCheck());
2943     if (FLAG_debug_code) {
2944       Register temp = scratch1();
2945       __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
2946       __ AssertSmi(temp);
2947     }
2948 
2949     // Read int value directly from upper half of the smi.
2950     STATIC_ASSERT(kSmiTag == 0);
2951     STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
2952     offset = SmiWordOffset(offset);
2953   }
2954 
2955   __ Load(result, MemOperand(store_base, offset), representation);
2956 
2957   // Check for the hole value.
2958   if (hinstr->RequiresHoleCheck()) {
2959     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2960       __ SmiTst(result, scratch);
2961       DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
2962                    Operand(zero_reg));
2963     } else {
2964       __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2965       DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result,
2966                    Operand(scratch));
2967     }
2968   } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
2969     DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
2970     Label done;
2971     __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2972     __ Branch(&done, ne, result, Operand(scratch));
2973     if (info()->IsStub()) {
2974       // A stub can safely convert the hole to undefined only if the array
2975       // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
2976       // it needs to bail out.
2977       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
2978       // The comparison only needs LS bits of value, which is a smi.
2979       __ ld(result, FieldMemOperand(result, Cell::kValueOffset));
2980       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
2981                    Operand(Smi::FromInt(Isolate::kProtectorValid)));
2982     }
2983     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2984     __ bind(&done);
2985   }
2986 }
2987 
2988 
DoLoadKeyed(LLoadKeyed * instr)2989 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
2990   if (instr->is_fixed_typed_array()) {
2991     DoLoadKeyedExternalArray(instr);
2992   } else if (instr->hydrogen()->representation().IsDouble()) {
2993     DoLoadKeyedFixedDoubleArray(instr);
2994   } else {
2995     DoLoadKeyedFixedArray(instr);
2996   }
2997 }
2998 
2999 
PrepareKeyedOperand(Register key,Register base,bool key_is_constant,int constant_key,int element_size,int shift_size,int base_offset)3000 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3001                                          Register base,
3002                                          bool key_is_constant,
3003                                          int constant_key,
3004                                          int element_size,
3005                                          int shift_size,
3006                                          int base_offset) {
3007   if (key_is_constant) {
3008     return MemOperand(base, (constant_key << element_size) + base_offset);
3009   }
3010 
3011   if (base_offset == 0) {
3012     if (shift_size >= 0) {
3013       __ dsll(scratch0(), key, shift_size);
3014       __ Daddu(scratch0(), base, scratch0());
3015       return MemOperand(scratch0());
3016     } else {
3017       if (shift_size == -32) {
3018         __ dsra32(scratch0(), key, 0);
3019       } else {
3020         __ dsra(scratch0(), key, -shift_size);
3021       }
3022       __ Daddu(scratch0(), base, scratch0());
3023       return MemOperand(scratch0());
3024     }
3025   }
3026 
3027   if (shift_size >= 0) {
3028     __ dsll(scratch0(), key, shift_size);
3029     __ Daddu(scratch0(), base, scratch0());
3030     return MemOperand(scratch0(), base_offset);
3031   } else {
3032     if (shift_size == -32) {
3033        __ dsra32(scratch0(), key, 0);
3034     } else {
3035       __ dsra(scratch0(), key, -shift_size);
3036     }
3037     __ Daddu(scratch0(), base, scratch0());
3038     return MemOperand(scratch0(), base_offset);
3039   }
3040 }
3041 
3042 
DoArgumentsElements(LArgumentsElements * instr)3043 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3044   Register scratch = scratch0();
3045   Register temp = scratch1();
3046   Register result = ToRegister(instr->result());
3047 
3048   if (instr->hydrogen()->from_inlined()) {
3049     __ Dsubu(result, sp, 2 * kPointerSize);
3050   } else if (instr->hydrogen()->arguments_adaptor()) {
3051     // Check if the calling frame is an arguments adaptor frame.
3052     Label done, adapted;
3053     __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3054     __ ld(result,
3055           MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
3056     __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3057 
3058     // Result is the frame pointer for the frame if not adapted and for the real
3059     // frame below the adaptor frame if adapted.
3060     __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
3061     __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
3062   } else {
3063     __ mov(result, fp);
3064   }
3065 }
3066 
3067 
DoArgumentsLength(LArgumentsLength * instr)3068 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3069   Register elem = ToRegister(instr->elements());
3070   Register result = ToRegister(instr->result());
3071 
3072   Label done;
3073 
3074   // If no arguments adaptor frame the number of arguments is fixed.
3075   __ Daddu(result, zero_reg, Operand(scope()->num_parameters()));
3076   __ Branch(&done, eq, fp, Operand(elem));
3077 
3078   // Arguments adaptor frame present. Get argument length from there.
3079   __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3080   __ ld(result,
3081         MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3082   __ SmiUntag(result);
3083 
3084   // Argument length is in result register.
3085   __ bind(&done);
3086 }
3087 
3088 
DoWrapReceiver(LWrapReceiver * instr)3089 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3090   Register receiver = ToRegister(instr->receiver());
3091   Register function = ToRegister(instr->function());
3092   Register result = ToRegister(instr->result());
3093   Register scratch = scratch0();
3094 
3095   // If the receiver is null or undefined, we have to pass the global
3096   // object as a receiver to normal functions. Values have to be
3097   // passed unchanged to builtins and strict-mode functions.
3098   Label global_object, result_in_receiver;
3099 
3100   if (!instr->hydrogen()->known_function()) {
3101     // Do not transform the receiver to object for strict mode functions.
3102     __ ld(scratch,
3103            FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3104 
3105     // Do not transform the receiver to object for builtins.
3106     int32_t strict_mode_function_mask =
3107         1 <<  SharedFunctionInfo::kStrictModeBitWithinByte;
3108     int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
3109 
3110     __ lbu(at,
3111            FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
3112     __ And(at, at, Operand(strict_mode_function_mask));
3113     __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3114     __ lbu(at,
3115            FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
3116     __ And(at, at, Operand(native_mask));
3117     __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3118   }
3119 
3120   // Normal function. Replace undefined or null with global receiver.
3121   __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3122   __ Branch(&global_object, eq, receiver, Operand(scratch));
3123   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3124   __ Branch(&global_object, eq, receiver, Operand(scratch));
3125 
3126   // Deoptimize if the receiver is not a JS object.
3127   __ SmiTst(receiver, scratch);
3128   DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, scratch, Operand(zero_reg));
3129 
3130   __ GetObjectType(receiver, scratch, scratch);
3131   DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject, scratch,
3132                Operand(FIRST_JS_RECEIVER_TYPE));
3133   __ Branch(&result_in_receiver);
3134 
3135   __ bind(&global_object);
3136   __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
3137   __ ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3138   __ ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3139 
3140   if (result.is(receiver)) {
3141     __ bind(&result_in_receiver);
3142   } else {
3143     Label result_ok;
3144     __ Branch(&result_ok);
3145     __ bind(&result_in_receiver);
3146     __ mov(result, receiver);
3147     __ bind(&result_ok);
3148   }
3149 }
3150 
3151 
DoApplyArguments(LApplyArguments * instr)3152 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3153   Register receiver = ToRegister(instr->receiver());
3154   Register function = ToRegister(instr->function());
3155   Register length = ToRegister(instr->length());
3156   Register elements = ToRegister(instr->elements());
3157   Register scratch = scratch0();
3158   DCHECK(receiver.is(a0));  // Used for parameter count.
3159   DCHECK(function.is(a1));  // Required by InvokeFunction.
3160   DCHECK(ToRegister(instr->result()).is(v0));
3161 
3162   // Copy the arguments to this function possibly from the
3163   // adaptor frame below it.
3164   const uint32_t kArgumentsLimit = 1 * KB;
3165   DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments, length,
3166                Operand(kArgumentsLimit));
3167 
3168   // Push the receiver and use the register to keep the original
3169   // number of arguments.
3170   __ push(receiver);
3171   __ Move(receiver, length);
3172   // The arguments are at a one pointer size offset from elements.
3173   __ Daddu(elements, elements, Operand(1 * kPointerSize));
3174 
3175   // Loop through the arguments pushing them onto the execution
3176   // stack.
3177   Label invoke, loop;
3178   // length is a small non-negative integer, due to the test above.
3179   __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3180   __ dsll(scratch, length, kPointerSizeLog2);
3181   __ bind(&loop);
3182   __ Daddu(scratch, elements, scratch);
3183   __ ld(scratch, MemOperand(scratch));
3184   __ push(scratch);
3185   __ Dsubu(length, length, Operand(1));
3186   __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3187   __ dsll(scratch, length, kPointerSizeLog2);
3188 
3189   __ bind(&invoke);
3190 
3191   InvokeFlag flag = CALL_FUNCTION;
3192   if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
3193     DCHECK(!info()->saves_caller_doubles());
3194     // TODO(ishell): drop current frame before pushing arguments to the stack.
3195     flag = JUMP_FUNCTION;
3196     ParameterCount actual(a0);
3197     // It is safe to use t0, t1 and t2 as scratch registers here given that
3198     // we are not going to return to caller function anyway.
3199     PrepareForTailCall(actual, t0, t1, t2);
3200   }
3201 
3202   DCHECK(instr->HasPointerMap());
3203   LPointerMap* pointers = instr->pointer_map();
3204   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3205   // The number of arguments is stored in receiver which is a0, as expected
3206   // by InvokeFunction.
3207   ParameterCount actual(receiver);
3208   __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
3209 }
3210 
3211 
DoPushArgument(LPushArgument * instr)3212 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3213   LOperand* argument = instr->value();
3214   if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3215     Abort(kDoPushArgumentNotImplementedForDoubleType);
3216   } else {
3217     Register argument_reg = EmitLoadRegister(argument, at);
3218     __ push(argument_reg);
3219   }
3220 }
3221 
3222 
DoDrop(LDrop * instr)3223 void LCodeGen::DoDrop(LDrop* instr) {
3224   __ Drop(instr->count());
3225 }
3226 
3227 
DoThisFunction(LThisFunction * instr)3228 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3229   Register result = ToRegister(instr->result());
3230   __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3231 }
3232 
3233 
DoContext(LContext * instr)3234 void LCodeGen::DoContext(LContext* instr) {
3235   // If there is a non-return use, the context must be moved to a register.
3236   Register result = ToRegister(instr->result());
3237   if (info()->IsOptimizing()) {
3238     __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3239   } else {
3240     // If there is no frame, the context must be in cp.
3241     DCHECK(result.is(cp));
3242   }
3243 }
3244 
3245 
DoDeclareGlobals(LDeclareGlobals * instr)3246 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3247   DCHECK(ToRegister(instr->context()).is(cp));
3248   __ li(scratch0(), instr->hydrogen()->pairs());
3249   __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3250   __ Push(scratch0(), scratch1());
3251   __ li(scratch0(), instr->hydrogen()->feedback_vector());
3252   __ Push(scratch0());
3253   CallRuntime(Runtime::kDeclareGlobals, instr);
3254 }
3255 
CallKnownFunction(Handle<JSFunction> function,int formal_parameter_count,int arity,bool is_tail_call,LInstruction * instr)3256 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3257                                  int formal_parameter_count, int arity,
3258                                  bool is_tail_call, LInstruction* instr) {
3259   bool dont_adapt_arguments =
3260       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3261   bool can_invoke_directly =
3262       dont_adapt_arguments || formal_parameter_count == arity;
3263 
3264   Register function_reg = a1;
3265   LPointerMap* pointers = instr->pointer_map();
3266 
3267   if (can_invoke_directly) {
3268     // Change context.
3269     __ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3270 
3271     // Always initialize new target and number of actual arguments.
3272     __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
3273     __ li(a0, Operand(arity));
3274 
3275     bool is_self_call = function.is_identical_to(info()->closure());
3276 
3277     // Invoke function.
3278     if (is_self_call) {
3279       Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
3280       if (is_tail_call) {
3281         __ Jump(self, RelocInfo::CODE_TARGET);
3282       } else {
3283         __ Call(self, RelocInfo::CODE_TARGET);
3284       }
3285     } else {
3286       __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3287       if (is_tail_call) {
3288         __ Jump(at);
3289       } else {
3290         __ Call(at);
3291       }
3292     }
3293 
3294     if (!is_tail_call) {
3295       // Set up deoptimization.
3296       RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3297     }
3298   } else {
3299     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3300     ParameterCount actual(arity);
3301     ParameterCount expected(formal_parameter_count);
3302     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3303     __ InvokeFunction(function_reg, expected, actual, flag, generator);
3304   }
3305 }
3306 
3307 
DoDeferredMathAbsTaggedHeapNumber(LMathAbs * instr)3308 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3309   DCHECK(instr->context() != NULL);
3310   DCHECK(ToRegister(instr->context()).is(cp));
3311   Register input = ToRegister(instr->value());
3312   Register result = ToRegister(instr->result());
3313   Register scratch = scratch0();
3314 
3315   // Deoptimize if not a heap number.
3316   __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3317   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3318   DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
3319                Operand(at));
3320 
3321   Label done;
3322   Register exponent = scratch0();
3323   scratch = no_reg;
3324   __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3325   // Check the sign of the argument. If the argument is positive, just
3326   // return it.
3327   __ Move(result, input);
3328   __ And(at, exponent, Operand(HeapNumber::kSignMask));
3329   __ Branch(&done, eq, at, Operand(zero_reg));
3330 
3331   // Input is negative. Reverse its sign.
3332   // Preserve the value of all registers.
3333   {
3334     PushSafepointRegistersScope scope(this);
3335 
3336     // Registers were saved at the safepoint, so we can use
3337     // many scratch registers.
3338     Register tmp1 = input.is(a1) ? a0 : a1;
3339     Register tmp2 = input.is(a2) ? a0 : a2;
3340     Register tmp3 = input.is(a3) ? a0 : a3;
3341     Register tmp4 = input.is(a4) ? a0 : a4;
3342 
3343     // exponent: floating point exponent value.
3344 
3345     Label allocated, slow;
3346     __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3347     __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3348     __ Branch(&allocated);
3349 
3350     // Slow case: Call the runtime system to do the number allocation.
3351     __ bind(&slow);
3352 
3353     CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3354                             instr->context());
3355     // Set the pointer to the new heap number in tmp.
3356     if (!tmp1.is(v0))
3357       __ mov(tmp1, v0);
3358     // Restore input_reg after call to runtime.
3359     __ LoadFromSafepointRegisterSlot(input, input);
3360     __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3361 
3362     __ bind(&allocated);
3363     // exponent: floating point exponent value.
3364     // tmp1: allocated heap number.
3365     __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3366     __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3367     __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3368     __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3369 
3370     __ StoreToSafepointRegisterSlot(tmp1, result);
3371   }
3372 
3373   __ bind(&done);
3374 }
3375 
3376 
EmitIntegerMathAbs(LMathAbs * instr)3377 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3378   Register input = ToRegister(instr->value());
3379   Register result = ToRegister(instr->result());
3380   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3381   Label done;
3382   __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3383   __ mov(result, input);
3384   __ subu(result, zero_reg, input);
3385   // Overflow if result is still negative, i.e. 0x80000000.
3386   DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
3387                Operand(zero_reg));
3388   __ bind(&done);
3389 }
3390 
3391 
EmitSmiMathAbs(LMathAbs * instr)3392 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3393   Register input = ToRegister(instr->value());
3394   Register result = ToRegister(instr->result());
3395   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3396   Label done;
3397   __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3398   __ mov(result, input);
3399   __ dsubu(result, zero_reg, input);
3400   // Overflow if result is still negative, i.e. 0x80000000 00000000.
3401   DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
3402                Operand(zero_reg));
3403   __ bind(&done);
3404 }
3405 
3406 
DoMathAbs(LMathAbs * instr)3407 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3408   // Class for deferred case.
3409   class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3410    public:
3411     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3412         : LDeferredCode(codegen), instr_(instr) { }
3413     void Generate() override {
3414       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3415     }
3416     LInstruction* instr() override { return instr_; }
3417 
3418    private:
3419     LMathAbs* instr_;
3420   };
3421 
3422   Representation r = instr->hydrogen()->value()->representation();
3423   if (r.IsDouble()) {
3424     FPURegister input = ToDoubleRegister(instr->value());
3425     FPURegister result = ToDoubleRegister(instr->result());
3426     __ abs_d(result, input);
3427   } else if (r.IsInteger32()) {
3428     EmitIntegerMathAbs(instr);
3429   } else if (r.IsSmi()) {
3430     EmitSmiMathAbs(instr);
3431   } else {
3432     // Representation is tagged.
3433     DeferredMathAbsTaggedHeapNumber* deferred =
3434         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3435     Register input = ToRegister(instr->value());
3436     // Smi check.
3437     __ JumpIfNotSmi(input, deferred->entry());
3438     // If smi, handle it directly.
3439     EmitSmiMathAbs(instr);
3440     __ bind(deferred->exit());
3441   }
3442 }
3443 
3444 
DoMathFloor(LMathFloor * instr)3445 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3446   DoubleRegister input = ToDoubleRegister(instr->value());
3447   Register result = ToRegister(instr->result());
3448   Register scratch1 = scratch0();
3449   Register except_flag = ToRegister(instr->temp());
3450 
3451   __ EmitFPUTruncate(kRoundToMinusInf,
3452                      result,
3453                      input,
3454                      scratch1,
3455                      double_scratch0(),
3456                      except_flag);
3457 
3458   // Deopt if the operation did not succeed.
3459   DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
3460                Operand(zero_reg));
3461 
3462   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3463     // Test for -0.
3464     Label done;
3465     __ Branch(&done, ne, result, Operand(zero_reg));
3466     __ mfhc1(scratch1, input);  // Get exponent/sign bits.
3467     __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3468     DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
3469                  Operand(zero_reg));
3470     __ bind(&done);
3471   }
3472 }
3473 
3474 
DoMathRound(LMathRound * instr)3475 void LCodeGen::DoMathRound(LMathRound* instr) {
3476   DoubleRegister input = ToDoubleRegister(instr->value());
3477   Register result = ToRegister(instr->result());
3478   DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3479   Register scratch = scratch0();
3480   Label done, check_sign_on_zero;
3481 
3482   // Extract exponent bits.
3483   __ mfhc1(result, input);
3484   __ Ext(scratch,
3485          result,
3486          HeapNumber::kExponentShift,
3487          HeapNumber::kExponentBits);
3488 
3489   // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3490   Label skip1;
3491   __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3492   __ mov(result, zero_reg);
3493   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3494     __ Branch(&check_sign_on_zero);
3495   } else {
3496     __ Branch(&done);
3497   }
3498   __ bind(&skip1);
3499 
3500   // The following conversion will not work with numbers
3501   // outside of ]-2^32, 2^32[.
3502   DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
3503                Operand(HeapNumber::kExponentBias + 32));
3504 
3505   // Save the original sign for later comparison.
3506   __ And(scratch, result, Operand(HeapNumber::kSignMask));
3507 
3508   __ Move(double_scratch0(), 0.5);
3509   __ add_d(double_scratch0(), input, double_scratch0());
3510 
3511   // Check sign of the result: if the sign changed, the input
3512   // value was in ]0.5, 0[ and the result should be -0.
3513   __ mfhc1(result, double_scratch0());
3514   // mfhc1 sign-extends, clear the upper bits.
3515   __ dsll32(result, result, 0);
3516   __ dsrl32(result, result, 0);
3517   __ Xor(result, result, Operand(scratch));
3518   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3519     // ARM uses 'mi' here, which is 'lt'
3520     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, result,
3521                  Operand(zero_reg));
3522   } else {
3523     Label skip2;
3524     // ARM uses 'mi' here, which is 'lt'
3525     // Negating it results in 'ge'
3526     __ Branch(&skip2, ge, result, Operand(zero_reg));
3527     __ mov(result, zero_reg);
3528     __ Branch(&done);
3529     __ bind(&skip2);
3530   }
3531 
3532   Register except_flag = scratch;
3533   __ EmitFPUTruncate(kRoundToMinusInf,
3534                      result,
3535                      double_scratch0(),
3536                      at,
3537                      double_scratch1,
3538                      except_flag);
3539 
3540   DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
3541                Operand(zero_reg));
3542 
3543   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3544     // Test for -0.
3545     __ Branch(&done, ne, result, Operand(zero_reg));
3546     __ bind(&check_sign_on_zero);
3547     __ mfhc1(scratch, input);  // Get exponent/sign bits.
3548     __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3549     DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch,
3550                  Operand(zero_reg));
3551   }
3552   __ bind(&done);
3553 }
3554 
3555 
DoMathFround(LMathFround * instr)3556 void LCodeGen::DoMathFround(LMathFround* instr) {
3557   DoubleRegister input = ToDoubleRegister(instr->value());
3558   DoubleRegister result = ToDoubleRegister(instr->result());
3559   __ cvt_s_d(result, input);
3560   __ cvt_d_s(result, result);
3561 }
3562 
3563 
DoMathSqrt(LMathSqrt * instr)3564 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3565   DoubleRegister input = ToDoubleRegister(instr->value());
3566   DoubleRegister result = ToDoubleRegister(instr->result());
3567   __ sqrt_d(result, input);
3568 }
3569 
3570 
DoMathPowHalf(LMathPowHalf * instr)3571 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3572   DoubleRegister input = ToDoubleRegister(instr->value());
3573   DoubleRegister result = ToDoubleRegister(instr->result());
3574   DoubleRegister temp = ToDoubleRegister(instr->temp());
3575 
3576   DCHECK(!input.is(result));
3577 
3578   // Note that according to ECMA-262 15.8.2.13:
3579   // Math.pow(-Infinity, 0.5) == Infinity
3580   // Math.sqrt(-Infinity) == NaN
3581   Label done;
3582   __ Move(temp, static_cast<double>(-V8_INFINITY));
3583   // Set up Infinity.
3584   __ Neg_d(result, temp);
3585   // result is overwritten if the branch is not taken.
3586   __ BranchF(&done, NULL, eq, temp, input);
3587 
3588   // Add +0 to convert -0 to +0.
3589   __ add_d(result, input, kDoubleRegZero);
3590   __ sqrt_d(result, result);
3591   __ bind(&done);
3592 }
3593 
3594 
DoPower(LPower * instr)3595 void LCodeGen::DoPower(LPower* instr) {
3596   Representation exponent_type = instr->hydrogen()->right()->representation();
3597   // Having marked this as a call, we can use any registers.
3598   // Just make sure that the input/output registers are the expected ones.
3599   Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3600   DCHECK(!instr->right()->IsDoubleRegister() ||
3601          ToDoubleRegister(instr->right()).is(f4));
3602   DCHECK(!instr->right()->IsRegister() ||
3603          ToRegister(instr->right()).is(tagged_exponent));
3604   DCHECK(ToDoubleRegister(instr->left()).is(f2));
3605   DCHECK(ToDoubleRegister(instr->result()).is(f0));
3606 
3607   if (exponent_type.IsSmi()) {
3608     MathPowStub stub(isolate(), MathPowStub::TAGGED);
3609     __ CallStub(&stub);
3610   } else if (exponent_type.IsTagged()) {
3611     Label no_deopt;
3612     __ JumpIfSmi(tagged_exponent, &no_deopt);
3613     DCHECK(!a7.is(tagged_exponent));
3614     __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3615     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3616     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, a7, Operand(at));
3617     __ bind(&no_deopt);
3618     MathPowStub stub(isolate(), MathPowStub::TAGGED);
3619     __ CallStub(&stub);
3620   } else if (exponent_type.IsInteger32()) {
3621     MathPowStub stub(isolate(), MathPowStub::INTEGER);
3622     __ CallStub(&stub);
3623   } else {
3624     DCHECK(exponent_type.IsDouble());
3625     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3626     __ CallStub(&stub);
3627   }
3628 }
3629 
DoMathCos(LMathCos * instr)3630 void LCodeGen::DoMathCos(LMathCos* instr) {
3631   __ PrepareCallCFunction(0, 1, scratch0());
3632   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3633   __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
3634   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3635 }
3636 
DoMathSin(LMathSin * instr)3637 void LCodeGen::DoMathSin(LMathSin* instr) {
3638   __ PrepareCallCFunction(0, 1, scratch0());
3639   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3640   __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
3641   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3642 }
3643 
DoMathExp(LMathExp * instr)3644 void LCodeGen::DoMathExp(LMathExp* instr) {
3645   __ PrepareCallCFunction(0, 1, scratch0());
3646   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3647   __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
3648   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3649 }
3650 
3651 
DoMathLog(LMathLog * instr)3652 void LCodeGen::DoMathLog(LMathLog* instr) {
3653   __ PrepareCallCFunction(0, 1, scratch0());
3654   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3655   __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
3656   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3657 }
3658 
3659 
DoMathClz32(LMathClz32 * instr)3660 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3661   Register input = ToRegister(instr->value());
3662   Register result = ToRegister(instr->result());
3663   __ Clz(result, input);
3664 }
3665 
PrepareForTailCall(const ParameterCount & actual,Register scratch1,Register scratch2,Register scratch3)3666 void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3667                                   Register scratch1, Register scratch2,
3668                                   Register scratch3) {
3669 #if DEBUG
3670   if (actual.is_reg()) {
3671     DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3672   } else {
3673     DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3674   }
3675 #endif
3676   if (FLAG_code_comments) {
3677     if (actual.is_reg()) {
3678       Comment(";;; PrepareForTailCall, actual: %s {",
3679               RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
3680                   actual.reg().code()));
3681     } else {
3682       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3683     }
3684   }
3685 
3686   // Check if next frame is an arguments adaptor frame.
3687   Register caller_args_count_reg = scratch1;
3688   Label no_arguments_adaptor, formal_parameter_count_loaded;
3689   __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3690   __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
3691   __ Branch(&no_arguments_adaptor, ne, scratch3,
3692             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3693 
3694   // Drop current frame and load arguments count from arguments adaptor frame.
3695   __ mov(fp, scratch2);
3696   __ ld(caller_args_count_reg,
3697         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3698   __ SmiUntag(caller_args_count_reg);
3699   __ Branch(&formal_parameter_count_loaded);
3700 
3701   __ bind(&no_arguments_adaptor);
3702   // Load caller's formal parameter count
3703   __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
3704 
3705   __ bind(&formal_parameter_count_loaded);
3706   __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
3707 
3708   Comment(";;; }");
3709 }
3710 
DoInvokeFunction(LInvokeFunction * instr)3711 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3712   HInvokeFunction* hinstr = instr->hydrogen();
3713   DCHECK(ToRegister(instr->context()).is(cp));
3714   DCHECK(ToRegister(instr->function()).is(a1));
3715   DCHECK(instr->HasPointerMap());
3716 
3717   bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3718 
3719   if (is_tail_call) {
3720     DCHECK(!info()->saves_caller_doubles());
3721     ParameterCount actual(instr->arity());
3722     // It is safe to use t0, t1 and t2 as scratch registers here given that
3723     // we are not going to return to caller function anyway.
3724     PrepareForTailCall(actual, t0, t1, t2);
3725   }
3726 
3727   Handle<JSFunction> known_function = hinstr->known_function();
3728   if (known_function.is_null()) {
3729     LPointerMap* pointers = instr->pointer_map();
3730     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3731     ParameterCount actual(instr->arity());
3732     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3733     __ InvokeFunction(a1, no_reg, actual, flag, generator);
3734   } else {
3735     CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3736                       instr->arity(), is_tail_call, instr);
3737   }
3738 }
3739 
3740 
DoCallWithDescriptor(LCallWithDescriptor * instr)3741 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3742   DCHECK(ToRegister(instr->result()).is(v0));
3743 
3744   if (instr->hydrogen()->IsTailCall()) {
3745     if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3746 
3747     if (instr->target()->IsConstantOperand()) {
3748       LConstantOperand* target = LConstantOperand::cast(instr->target());
3749       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3750       __ Jump(code, RelocInfo::CODE_TARGET);
3751     } else {
3752       DCHECK(instr->target()->IsRegister());
3753       Register target = ToRegister(instr->target());
3754       __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3755       __ Jump(target);
3756     }
3757   } else {
3758     LPointerMap* pointers = instr->pointer_map();
3759     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3760 
3761     if (instr->target()->IsConstantOperand()) {
3762       LConstantOperand* target = LConstantOperand::cast(instr->target());
3763       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3764       generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3765       __ Call(code, RelocInfo::CODE_TARGET);
3766     } else {
3767       DCHECK(instr->target()->IsRegister());
3768       Register target = ToRegister(instr->target());
3769       generator.BeforeCall(__ CallSize(target));
3770       __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3771       __ Call(target);
3772     }
3773     generator.AfterCall();
3774   }
3775 }
3776 
3777 
DoCallNewArray(LCallNewArray * instr)3778 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3779   DCHECK(ToRegister(instr->context()).is(cp));
3780   DCHECK(ToRegister(instr->constructor()).is(a1));
3781   DCHECK(ToRegister(instr->result()).is(v0));
3782 
3783   __ li(a0, Operand(instr->arity()));
3784   __ li(a2, instr->hydrogen()->site());
3785 
3786   ElementsKind kind = instr->hydrogen()->elements_kind();
3787   AllocationSiteOverrideMode override_mode =
3788       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3789           ? DISABLE_ALLOCATION_SITES
3790           : DONT_OVERRIDE;
3791 
3792   if (instr->arity() == 0) {
3793     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3794     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3795   } else if (instr->arity() == 1) {
3796     Label done;
3797     if (IsFastPackedElementsKind(kind)) {
3798       Label packed_case;
3799       // We might need a change here,
3800       // look at the first argument.
3801       __ ld(a5, MemOperand(sp, 0));
3802       __ Branch(&packed_case, eq, a5, Operand(zero_reg));
3803 
3804       ElementsKind holey_kind = GetHoleyElementsKind(kind);
3805       ArraySingleArgumentConstructorStub stub(isolate(),
3806                                               holey_kind,
3807                                               override_mode);
3808       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3809       __ jmp(&done);
3810       __ bind(&packed_case);
3811     }
3812 
3813     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3814     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3815     __ bind(&done);
3816   } else {
3817     ArrayNArgumentsConstructorStub stub(isolate());
3818     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3819   }
3820 }
3821 
3822 
DoCallRuntime(LCallRuntime * instr)3823 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3824   CallRuntime(instr->function(), instr->arity(), instr);
3825 }
3826 
3827 
DoStoreCodeEntry(LStoreCodeEntry * instr)3828 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3829   Register function = ToRegister(instr->function());
3830   Register code_object = ToRegister(instr->code_object());
3831   __ Daddu(code_object, code_object,
3832           Operand(Code::kHeaderSize - kHeapObjectTag));
3833   __ sd(code_object,
3834         FieldMemOperand(function, JSFunction::kCodeEntryOffset));
3835 }
3836 
3837 
DoInnerAllocatedObject(LInnerAllocatedObject * instr)3838 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3839   Register result = ToRegister(instr->result());
3840   Register base = ToRegister(instr->base_object());
3841   if (instr->offset()->IsConstantOperand()) {
3842     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3843     __ Daddu(result, base, Operand(ToInteger32(offset)));
3844   } else {
3845     Register offset = ToRegister(instr->offset());
3846     __ Daddu(result, base, offset);
3847   }
3848 }
3849 
3850 
DoStoreNamedField(LStoreNamedField * instr)3851 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3852   Representation representation = instr->representation();
3853 
3854   Register object = ToRegister(instr->object());
3855   Register scratch2 = scratch1();
3856   Register scratch1 = scratch0();
3857 
3858   HObjectAccess access = instr->hydrogen()->access();
3859   int offset = access.offset();
3860   if (access.IsExternalMemory()) {
3861     Register value = ToRegister(instr->value());
3862     MemOperand operand = MemOperand(object, offset);
3863     __ Store(value, operand, representation);
3864     return;
3865   }
3866 
3867   __ AssertNotSmi(object);
3868 
3869   DCHECK(!representation.IsSmi() ||
3870          !instr->value()->IsConstantOperand() ||
3871          IsSmi(LConstantOperand::cast(instr->value())));
3872   if (!FLAG_unbox_double_fields && representation.IsDouble()) {
3873     DCHECK(access.IsInobject());
3874     DCHECK(!instr->hydrogen()->has_transition());
3875     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3876     DoubleRegister value = ToDoubleRegister(instr->value());
3877     __ sdc1(value, FieldMemOperand(object, offset));
3878     return;
3879   }
3880 
3881   if (instr->hydrogen()->has_transition()) {
3882     Handle<Map> transition = instr->hydrogen()->transition_map();
3883     AddDeprecationDependency(transition);
3884     __ li(scratch1, Operand(transition));
3885     __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
3886     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3887       Register temp = ToRegister(instr->temp());
3888       // Update the write barrier for the map field.
3889       __ RecordWriteForMap(object,
3890                            scratch1,
3891                            temp,
3892                            GetRAState(),
3893                            kSaveFPRegs);
3894     }
3895   }
3896 
3897   // Do the store.
3898   Register destination = object;
3899   if (!access.IsInobject()) {
3900        destination = scratch1;
3901     __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
3902   }
3903 
3904   if (representation.IsSmi() && SmiValuesAre32Bits() &&
3905       instr->hydrogen()->value()->representation().IsInteger32()) {
3906     DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
3907     if (FLAG_debug_code) {
3908       __ Load(scratch2, FieldMemOperand(destination, offset), representation);
3909       __ AssertSmi(scratch2);
3910     }
3911     // Store int value directly to upper half of the smi.
3912     offset = SmiWordOffset(offset);
3913     representation = Representation::Integer32();
3914   }
3915   MemOperand operand = FieldMemOperand(destination, offset);
3916 
3917   if (FLAG_unbox_double_fields && representation.IsDouble()) {
3918     DCHECK(access.IsInobject());
3919     DoubleRegister value = ToDoubleRegister(instr->value());
3920     __ sdc1(value, operand);
3921   } else {
3922     DCHECK(instr->value()->IsRegister());
3923     Register value = ToRegister(instr->value());
3924     __ Store(value, operand, representation);
3925   }
3926 
3927   if (instr->hydrogen()->NeedsWriteBarrier()) {
3928     // Update the write barrier for the object for in-object properties.
3929     Register value = ToRegister(instr->value());
3930     __ RecordWriteField(destination,
3931                         offset,
3932                         value,
3933                         scratch2,
3934                         GetRAState(),
3935                         kSaveFPRegs,
3936                         EMIT_REMEMBERED_SET,
3937                         instr->hydrogen()->SmiCheckForWriteBarrier(),
3938                         instr->hydrogen()->PointersToHereCheckForValue());
3939   }
3940 }
3941 
3942 
DoBoundsCheck(LBoundsCheck * instr)3943 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3944   Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
3945   Operand operand((int64_t)0);
3946   Register reg;
3947   if (instr->index()->IsConstantOperand()) {
3948     operand = ToOperand(instr->index());
3949     reg = ToRegister(instr->length());
3950     cc = CommuteCondition(cc);
3951   } else {
3952     reg = ToRegister(instr->index());
3953     operand = ToOperand(instr->length());
3954   }
3955   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
3956     Label done;
3957     __ Branch(&done, NegateCondition(cc), reg, operand);
3958     __ stop("eliminated bounds check failed");
3959     __ bind(&done);
3960   } else {
3961     DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds, reg, operand);
3962   }
3963 }
3964 
3965 
DoStoreKeyedExternalArray(LStoreKeyed * instr)3966 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
3967   Register external_pointer = ToRegister(instr->elements());
3968   Register key = no_reg;
3969   ElementsKind elements_kind = instr->elements_kind();
3970   bool key_is_constant = instr->key()->IsConstantOperand();
3971   int constant_key = 0;
3972   if (key_is_constant) {
3973     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3974     if (constant_key & 0xF0000000) {
3975       Abort(kArrayIndexConstantValueTooBig);
3976     }
3977   } else {
3978     key = ToRegister(instr->key());
3979   }
3980   int element_size_shift = ElementsKindToShiftSize(elements_kind);
3981   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3982       ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
3983       : element_size_shift;
3984   int base_offset = instr->base_offset();
3985 
3986   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
3987     Register address = scratch0();
3988     FPURegister value(ToDoubleRegister(instr->value()));
3989     if (key_is_constant) {
3990       if (constant_key != 0) {
3991         __ Daddu(address, external_pointer,
3992                 Operand(constant_key << element_size_shift));
3993       } else {
3994         address = external_pointer;
3995       }
3996     } else {
3997       if (shift_size < 0) {
3998         if (shift_size == -32) {
3999           __ dsra32(address, key, 0);
4000         } else {
4001           __ dsra(address, key, -shift_size);
4002         }
4003       } else {
4004         __ dsll(address, key, shift_size);
4005       }
4006       __ Daddu(address, external_pointer, address);
4007     }
4008 
4009     if (elements_kind == FLOAT32_ELEMENTS) {
4010       __ cvt_s_d(double_scratch0(), value);
4011       __ swc1(double_scratch0(), MemOperand(address, base_offset));
4012     } else {  // Storing doubles, not floats.
4013       __ sdc1(value, MemOperand(address, base_offset));
4014     }
4015   } else {
4016     Register value(ToRegister(instr->value()));
4017     MemOperand mem_operand = PrepareKeyedOperand(
4018         key, external_pointer, key_is_constant, constant_key,
4019         element_size_shift, shift_size,
4020         base_offset);
4021     switch (elements_kind) {
4022       case UINT8_ELEMENTS:
4023       case UINT8_CLAMPED_ELEMENTS:
4024       case INT8_ELEMENTS:
4025         __ sb(value, mem_operand);
4026         break;
4027       case INT16_ELEMENTS:
4028       case UINT16_ELEMENTS:
4029         __ sh(value, mem_operand);
4030         break;
4031       case INT32_ELEMENTS:
4032       case UINT32_ELEMENTS:
4033         __ sw(value, mem_operand);
4034         break;
4035       case FLOAT32_ELEMENTS:
4036       case FLOAT64_ELEMENTS:
4037       case FAST_DOUBLE_ELEMENTS:
4038       case FAST_ELEMENTS:
4039       case FAST_SMI_ELEMENTS:
4040       case FAST_HOLEY_DOUBLE_ELEMENTS:
4041       case FAST_HOLEY_ELEMENTS:
4042       case FAST_HOLEY_SMI_ELEMENTS:
4043       case DICTIONARY_ELEMENTS:
4044       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4045       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4046       case FAST_STRING_WRAPPER_ELEMENTS:
4047       case SLOW_STRING_WRAPPER_ELEMENTS:
4048       case NO_ELEMENTS:
4049         UNREACHABLE();
4050         break;
4051     }
4052   }
4053 }
4054 
4055 
DoStoreKeyedFixedDoubleArray(LStoreKeyed * instr)4056 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4057   DoubleRegister value = ToDoubleRegister(instr->value());
4058   Register elements = ToRegister(instr->elements());
4059   Register scratch = scratch0();
4060   DoubleRegister double_scratch = double_scratch0();
4061   bool key_is_constant = instr->key()->IsConstantOperand();
4062   int base_offset = instr->base_offset();
4063   Label not_nan, done;
4064 
4065   // Calculate the effective address of the slot in the array to store the
4066   // double value.
4067   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4068   if (key_is_constant) {
4069     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4070     if (constant_key & 0xF0000000) {
4071       Abort(kArrayIndexConstantValueTooBig);
4072     }
4073     __ Daddu(scratch, elements,
4074              Operand((constant_key << element_size_shift) + base_offset));
4075   } else {
4076     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4077         ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4078         : element_size_shift;
4079     __ Daddu(scratch, elements, Operand(base_offset));
4080     DCHECK((shift_size == 3) || (shift_size == -29));
4081     if (shift_size == 3) {
4082       __ dsll(at, ToRegister(instr->key()), 3);
4083     } else if (shift_size == -29) {
4084       __ dsra(at, ToRegister(instr->key()), 29);
4085     }
4086     __ Daddu(scratch, scratch, at);
4087   }
4088 
4089   if (instr->NeedsCanonicalization()) {
4090     __ FPUCanonicalizeNaN(double_scratch, value);
4091     __ sdc1(double_scratch, MemOperand(scratch, 0));
4092   } else {
4093     __ sdc1(value, MemOperand(scratch, 0));
4094   }
4095 }
4096 
4097 
DoStoreKeyedFixedArray(LStoreKeyed * instr)4098 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4099   Register value = ToRegister(instr->value());
4100   Register elements = ToRegister(instr->elements());
4101   Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4102       : no_reg;
4103   Register scratch = scratch0();
4104   Register store_base = scratch;
4105   int offset = instr->base_offset();
4106 
4107   // Do the store.
4108   if (instr->key()->IsConstantOperand()) {
4109     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4110     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4111     offset += ToInteger32(const_operand) * kPointerSize;
4112     store_base = elements;
4113   } else {
4114     // Even though the HLoadKeyed instruction forces the input
4115     // representation for the key to be an integer, the input gets replaced
4116     // during bound check elimination with the index argument to the bounds
4117     // check, which can be tagged, so that case must be handled here, too.
4118     if (instr->hydrogen()->key()->representation().IsSmi()) {
4119       __ SmiScale(scratch, key, kPointerSizeLog2);
4120       __ daddu(store_base, elements, scratch);
4121     } else {
4122       __ Dlsa(store_base, elements, key, kPointerSizeLog2);
4123     }
4124   }
4125 
4126   Representation representation = instr->hydrogen()->value()->representation();
4127   if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4128     DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4129     DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
4130     if (FLAG_debug_code) {
4131       Register temp = scratch1();
4132       __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
4133       __ AssertSmi(temp);
4134     }
4135 
4136     // Store int value directly to upper half of the smi.
4137     STATIC_ASSERT(kSmiTag == 0);
4138     STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4139     offset = SmiWordOffset(offset);
4140     representation = Representation::Integer32();
4141   }
4142 
4143   __ Store(value, MemOperand(store_base, offset), representation);
4144 
4145   if (instr->hydrogen()->NeedsWriteBarrier()) {
4146     SmiCheck check_needed =
4147         instr->hydrogen()->value()->type().IsHeapObject()
4148             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4149     // Compute address of modified element and store it into key register.
4150     __ Daddu(key, store_base, Operand(offset));
4151     __ RecordWrite(elements,
4152                    key,
4153                    value,
4154                    GetRAState(),
4155                    kSaveFPRegs,
4156                    EMIT_REMEMBERED_SET,
4157                    check_needed,
4158                    instr->hydrogen()->PointersToHereCheckForValue());
4159   }
4160 }
4161 
4162 
DoStoreKeyed(LStoreKeyed * instr)4163 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4164   // By cases: external, fast double
4165   if (instr->is_fixed_typed_array()) {
4166     DoStoreKeyedExternalArray(instr);
4167   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4168     DoStoreKeyedFixedDoubleArray(instr);
4169   } else {
4170     DoStoreKeyedFixedArray(instr);
4171   }
4172 }
4173 
4174 
DoMaybeGrowElements(LMaybeGrowElements * instr)4175 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4176   class DeferredMaybeGrowElements final : public LDeferredCode {
4177    public:
4178     DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4179         : LDeferredCode(codegen), instr_(instr) {}
4180     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4181     LInstruction* instr() override { return instr_; }
4182 
4183    private:
4184     LMaybeGrowElements* instr_;
4185   };
4186 
4187   Register result = v0;
4188   DeferredMaybeGrowElements* deferred =
4189       new (zone()) DeferredMaybeGrowElements(this, instr);
4190   LOperand* key = instr->key();
4191   LOperand* current_capacity = instr->current_capacity();
4192 
4193   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4194   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4195   DCHECK(key->IsConstantOperand() || key->IsRegister());
4196   DCHECK(current_capacity->IsConstantOperand() ||
4197          current_capacity->IsRegister());
4198 
4199   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4200     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4201     int32_t constant_capacity =
4202         ToInteger32(LConstantOperand::cast(current_capacity));
4203     if (constant_key >= constant_capacity) {
4204       // Deferred case.
4205       __ jmp(deferred->entry());
4206     }
4207   } else if (key->IsConstantOperand()) {
4208     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4209     __ Branch(deferred->entry(), le, ToRegister(current_capacity),
4210               Operand(constant_key));
4211   } else if (current_capacity->IsConstantOperand()) {
4212     int32_t constant_capacity =
4213         ToInteger32(LConstantOperand::cast(current_capacity));
4214     __ Branch(deferred->entry(), ge, ToRegister(key),
4215               Operand(constant_capacity));
4216   } else {
4217     __ Branch(deferred->entry(), ge, ToRegister(key),
4218               Operand(ToRegister(current_capacity)));
4219   }
4220 
4221   if (instr->elements()->IsRegister()) {
4222     __ mov(result, ToRegister(instr->elements()));
4223   } else {
4224     __ ld(result, ToMemOperand(instr->elements()));
4225   }
4226 
4227   __ bind(deferred->exit());
4228 }
4229 
4230 
DoDeferredMaybeGrowElements(LMaybeGrowElements * instr)4231 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4232   // TODO(3095996): Get rid of this. For now, we need to make the
4233   // result register contain a valid pointer because it is already
4234   // contained in the register pointer map.
4235   Register result = v0;
4236   __ mov(result, zero_reg);
4237 
4238   // We have to call a stub.
4239   {
4240     PushSafepointRegistersScope scope(this);
4241     if (instr->object()->IsRegister()) {
4242       __ mov(result, ToRegister(instr->object()));
4243     } else {
4244       __ ld(result, ToMemOperand(instr->object()));
4245     }
4246 
4247     LOperand* key = instr->key();
4248     if (key->IsConstantOperand()) {
4249       LConstantOperand* constant_key = LConstantOperand::cast(key);
4250       int32_t int_key = ToInteger32(constant_key);
4251       if (Smi::IsValid(int_key)) {
4252         __ li(a3, Operand(Smi::FromInt(int_key)));
4253       } else {
4254         // We should never get here at runtime because there is a smi check on
4255         // the key before this point.
4256         __ stop("expected smi");
4257       }
4258     } else {
4259       __ mov(a3, ToRegister(key));
4260       __ SmiTag(a3);
4261     }
4262 
4263     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
4264     __ mov(a0, result);
4265     __ CallStub(&stub);
4266     RecordSafepointWithLazyDeopt(
4267         instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4268     __ StoreToSafepointRegisterSlot(result, result);
4269   }
4270 
4271   // Deopt on smi, which means the elements array changed to dictionary mode.
4272   __ SmiTst(result, at);
4273   DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
4274 }
4275 
4276 
DoTransitionElementsKind(LTransitionElementsKind * instr)4277 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4278   Register object_reg = ToRegister(instr->object());
4279   Register scratch = scratch0();
4280 
4281   Handle<Map> from_map = instr->original_map();
4282   Handle<Map> to_map = instr->transitioned_map();
4283   ElementsKind from_kind = instr->from_kind();
4284   ElementsKind to_kind = instr->to_kind();
4285 
4286   Label not_applicable;
4287   __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4288   __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4289 
4290   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4291     Register new_map_reg = ToRegister(instr->new_map_temp());
4292     __ li(new_map_reg, Operand(to_map));
4293     __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4294     // Write barrier.
4295     __ RecordWriteForMap(object_reg,
4296                          new_map_reg,
4297                          scratch,
4298                          GetRAState(),
4299                          kDontSaveFPRegs);
4300   } else {
4301     DCHECK(object_reg.is(a0));
4302     DCHECK(ToRegister(instr->context()).is(cp));
4303     PushSafepointRegistersScope scope(this);
4304     __ li(a1, Operand(to_map));
4305     TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
4306     __ CallStub(&stub);
4307     RecordSafepointWithRegisters(
4308         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4309   }
4310   __ bind(&not_applicable);
4311 }
4312 
4313 
DoTrapAllocationMemento(LTrapAllocationMemento * instr)4314 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4315   Register object = ToRegister(instr->object());
4316   Register temp = ToRegister(instr->temp());
4317   Label no_memento_found;
4318   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4319   DeoptimizeIf(al, instr, DeoptimizeReason::kMementoFound);
4320   __ bind(&no_memento_found);
4321 }
4322 
4323 
DoStringAdd(LStringAdd * instr)4324 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4325   DCHECK(ToRegister(instr->context()).is(cp));
4326   DCHECK(ToRegister(instr->left()).is(a1));
4327   DCHECK(ToRegister(instr->right()).is(a0));
4328   StringAddStub stub(isolate(),
4329                      instr->hydrogen()->flags(),
4330                      instr->hydrogen()->pretenure_flag());
4331   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4332 }
4333 
4334 
DoStringCharCodeAt(LStringCharCodeAt * instr)4335 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4336   class DeferredStringCharCodeAt final : public LDeferredCode {
4337    public:
4338     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4339         : LDeferredCode(codegen), instr_(instr) { }
4340     void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4341     LInstruction* instr() override { return instr_; }
4342 
4343    private:
4344     LStringCharCodeAt* instr_;
4345   };
4346 
4347   DeferredStringCharCodeAt* deferred =
4348       new(zone()) DeferredStringCharCodeAt(this, instr);
4349   StringCharLoadGenerator::Generate(masm(),
4350                                     ToRegister(instr->string()),
4351                                     ToRegister(instr->index()),
4352                                     ToRegister(instr->result()),
4353                                     deferred->entry());
4354   __ bind(deferred->exit());
4355 }
4356 
4357 
DoDeferredStringCharCodeAt(LStringCharCodeAt * instr)4358 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4359   Register string = ToRegister(instr->string());
4360   Register result = ToRegister(instr->result());
4361   Register scratch = scratch0();
4362 
4363   // TODO(3095996): Get rid of this. For now, we need to make the
4364   // result register contain a valid pointer because it is already
4365   // contained in the register pointer map.
4366   __ mov(result, zero_reg);
4367 
4368   PushSafepointRegistersScope scope(this);
4369   __ push(string);
4370   // Push the index as a smi. This is safe because of the checks in
4371   // DoStringCharCodeAt above.
4372   if (instr->index()->IsConstantOperand()) {
4373     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4374     __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4375     __ push(scratch);
4376   } else {
4377     Register index = ToRegister(instr->index());
4378     __ SmiTag(index);
4379     __ push(index);
4380   }
4381   CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4382                           instr->context());
4383   __ AssertSmi(v0);
4384   __ SmiUntag(v0);
4385   __ StoreToSafepointRegisterSlot(v0, result);
4386 }
4387 
4388 
DoStringCharFromCode(LStringCharFromCode * instr)4389 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4390   class DeferredStringCharFromCode final : public LDeferredCode {
4391    public:
4392     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4393         : LDeferredCode(codegen), instr_(instr) { }
4394     void Generate() override {
4395       codegen()->DoDeferredStringCharFromCode(instr_);
4396     }
4397     LInstruction* instr() override { return instr_; }
4398 
4399    private:
4400     LStringCharFromCode* instr_;
4401   };
4402 
4403   DeferredStringCharFromCode* deferred =
4404       new(zone()) DeferredStringCharFromCode(this, instr);
4405 
4406   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4407   Register char_code = ToRegister(instr->char_code());
4408   Register result = ToRegister(instr->result());
4409   Register scratch = scratch0();
4410   DCHECK(!char_code.is(result));
4411 
4412   __ Branch(deferred->entry(), hi,
4413             char_code, Operand(String::kMaxOneByteCharCode));
4414   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4415   __ Dlsa(result, result, char_code, kPointerSizeLog2);
4416   __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4417   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4418   __ Branch(deferred->entry(), eq, result, Operand(scratch));
4419   __ bind(deferred->exit());
4420 }
4421 
4422 
DoDeferredStringCharFromCode(LStringCharFromCode * instr)4423 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4424   Register char_code = ToRegister(instr->char_code());
4425   Register result = ToRegister(instr->result());
4426 
4427   // TODO(3095996): Get rid of this. For now, we need to make the
4428   // result register contain a valid pointer because it is already
4429   // contained in the register pointer map.
4430   __ mov(result, zero_reg);
4431 
4432   PushSafepointRegistersScope scope(this);
4433   __ SmiTag(char_code);
4434   __ push(char_code);
4435   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4436                           instr->context());
4437   __ StoreToSafepointRegisterSlot(v0, result);
4438 }
4439 
4440 
DoInteger32ToDouble(LInteger32ToDouble * instr)4441 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4442   LOperand* input = instr->value();
4443   DCHECK(input->IsRegister() || input->IsStackSlot());
4444   LOperand* output = instr->result();
4445   DCHECK(output->IsDoubleRegister());
4446   FPURegister single_scratch = double_scratch0().low();
4447   if (input->IsStackSlot()) {
4448     Register scratch = scratch0();
4449     __ ld(scratch, ToMemOperand(input));
4450     __ mtc1(scratch, single_scratch);
4451   } else {
4452     __ mtc1(ToRegister(input), single_scratch);
4453   }
4454   __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4455 }
4456 
4457 
DoUint32ToDouble(LUint32ToDouble * instr)4458 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4459   LOperand* input = instr->value();
4460   LOperand* output = instr->result();
4461 
4462   FPURegister dbl_scratch = double_scratch0();
4463   __ mtc1(ToRegister(input), dbl_scratch);
4464   __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch);
4465 }
4466 
4467 
DoNumberTagU(LNumberTagU * instr)4468 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4469   class DeferredNumberTagU final : public LDeferredCode {
4470    public:
4471     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4472         : LDeferredCode(codegen), instr_(instr) { }
4473     void Generate() override {
4474       codegen()->DoDeferredNumberTagIU(instr_,
4475                                        instr_->value(),
4476                                        instr_->temp1(),
4477                                        instr_->temp2(),
4478                                        UNSIGNED_INT32);
4479     }
4480     LInstruction* instr() override { return instr_; }
4481 
4482    private:
4483     LNumberTagU* instr_;
4484   };
4485 
4486   Register input = ToRegister(instr->value());
4487   Register result = ToRegister(instr->result());
4488 
4489   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4490   __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4491   __ SmiTag(result, input);
4492   __ bind(deferred->exit());
4493 }
4494 
4495 
DoDeferredNumberTagIU(LInstruction * instr,LOperand * value,LOperand * temp1,LOperand * temp2,IntegerSignedness signedness)4496 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4497                                      LOperand* value,
4498                                      LOperand* temp1,
4499                                      LOperand* temp2,
4500                                      IntegerSignedness signedness) {
4501   Label done, slow;
4502   Register src = ToRegister(value);
4503   Register dst = ToRegister(instr->result());
4504   Register tmp1 = scratch0();
4505   Register tmp2 = ToRegister(temp1);
4506   Register tmp3 = ToRegister(temp2);
4507   DoubleRegister dbl_scratch = double_scratch0();
4508 
4509   if (signedness == SIGNED_INT32) {
4510     // There was overflow, so bits 30 and 31 of the original integer
4511     // disagree. Try to allocate a heap number in new space and store
4512     // the value in there. If that fails, call the runtime system.
4513     if (dst.is(src)) {
4514       __ SmiUntag(src, dst);
4515       __ Xor(src, src, Operand(0x80000000));
4516     }
4517     __ mtc1(src, dbl_scratch);
4518     __ cvt_d_w(dbl_scratch, dbl_scratch);
4519   } else {
4520     __ mtc1(src, dbl_scratch);
4521     __ Cvt_d_uw(dbl_scratch, dbl_scratch);
4522   }
4523 
4524   if (FLAG_inline_new) {
4525     __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4526     __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
4527     __ Branch(&done);
4528   }
4529 
4530   // Slow case: Call the runtime system to do the number allocation.
4531   __ bind(&slow);
4532   {
4533     // TODO(3095996): Put a valid pointer value in the stack slot where the
4534     // result register is stored, as this register is in the pointer map, but
4535     // contains an integer value.
4536     __ mov(dst, zero_reg);
4537     // Preserve the value of all registers.
4538     PushSafepointRegistersScope scope(this);
4539     // Reset the context register.
4540     if (!dst.is(cp)) {
4541       __ mov(cp, zero_reg);
4542     }
4543     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4544     RecordSafepointWithRegisters(
4545         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4546     __ StoreToSafepointRegisterSlot(v0, dst);
4547   }
4548 
4549   // Done. Put the value in dbl_scratch into the value of the allocated heap
4550   // number.
4551   __ bind(&done);
4552   __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4553 }
4554 
4555 
DoNumberTagD(LNumberTagD * instr)4556 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4557   class DeferredNumberTagD final : public LDeferredCode {
4558    public:
4559     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4560         : LDeferredCode(codegen), instr_(instr) { }
4561     void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4562     LInstruction* instr() override { return instr_; }
4563 
4564    private:
4565     LNumberTagD* instr_;
4566   };
4567 
4568   DoubleRegister input_reg = ToDoubleRegister(instr->value());
4569   Register scratch = scratch0();
4570   Register reg = ToRegister(instr->result());
4571   Register temp1 = ToRegister(instr->temp());
4572   Register temp2 = ToRegister(instr->temp2());
4573 
4574   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4575   if (FLAG_inline_new) {
4576     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4577     // We want the untagged address first for performance
4578     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4579   } else {
4580     __ Branch(deferred->entry());
4581   }
4582   __ bind(deferred->exit());
4583   __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
4584 }
4585 
4586 
DoDeferredNumberTagD(LNumberTagD * instr)4587 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4588   // TODO(3095996): Get rid of this. For now, we need to make the
4589   // result register contain a valid pointer because it is already
4590   // contained in the register pointer map.
4591   Register reg = ToRegister(instr->result());
4592   __ mov(reg, zero_reg);
4593 
4594   PushSafepointRegistersScope scope(this);
4595   // Reset the context register.
4596   if (!reg.is(cp)) {
4597     __ mov(cp, zero_reg);
4598   }
4599   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4600   RecordSafepointWithRegisters(
4601       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4602   __ StoreToSafepointRegisterSlot(v0, reg);
4603 }
4604 
4605 
DoSmiTag(LSmiTag * instr)4606 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4607   HChange* hchange = instr->hydrogen();
4608   Register input = ToRegister(instr->value());
4609   Register output = ToRegister(instr->result());
4610   if (hchange->CheckFlag(HValue::kCanOverflow) &&
4611       hchange->value()->CheckFlag(HValue::kUint32)) {
4612     __ And(at, input, Operand(0x80000000));
4613     DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
4614   }
4615   if (hchange->CheckFlag(HValue::kCanOverflow) &&
4616       !hchange->value()->CheckFlag(HValue::kUint32)) {
4617     __ SmiTagCheckOverflow(output, input, at);
4618     DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
4619   } else {
4620     __ SmiTag(output, input);
4621   }
4622 }
4623 
4624 
DoSmiUntag(LSmiUntag * instr)4625 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4626   Register scratch = scratch0();
4627   Register input = ToRegister(instr->value());
4628   Register result = ToRegister(instr->result());
4629   if (instr->needs_check()) {
4630     STATIC_ASSERT(kHeapObjectTag == 1);
4631     // If the input is a HeapObject, value of scratch won't be zero.
4632     __ And(scratch, input, Operand(kHeapObjectTag));
4633     __ SmiUntag(result, input);
4634     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
4635                  Operand(zero_reg));
4636   } else {
4637     __ SmiUntag(result, input);
4638   }
4639 }
4640 
4641 
EmitNumberUntagD(LNumberUntagD * instr,Register input_reg,DoubleRegister result_reg,NumberUntagDMode mode)4642 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4643                                 DoubleRegister result_reg,
4644                                 NumberUntagDMode mode) {
4645   bool can_convert_undefined_to_nan = instr->truncating();
4646   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4647 
4648   Register scratch = scratch0();
4649   Label convert, load_smi, done;
4650   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4651     // Smi check.
4652     __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4653     // Heap number map check.
4654     __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4655     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4656     if (can_convert_undefined_to_nan) {
4657       __ Branch(&convert, ne, scratch, Operand(at));
4658     } else {
4659       DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
4660                    Operand(at));
4661     }
4662     // Load heap number.
4663     __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4664     if (deoptimize_on_minus_zero) {
4665       __ mfc1(at, result_reg);
4666       __ Branch(&done, ne, at, Operand(zero_reg));
4667       __ mfhc1(scratch, result_reg);  // Get exponent/sign bits.
4668       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, scratch,
4669                    Operand(HeapNumber::kSignMask));
4670     }
4671     __ Branch(&done);
4672     if (can_convert_undefined_to_nan) {
4673       __ bind(&convert);
4674       // Convert undefined (and hole) to NaN.
4675       __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4676       DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined,
4677                    input_reg, Operand(at));
4678       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4679       __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4680       __ Branch(&done);
4681     }
4682   } else {
4683     __ SmiUntag(scratch, input_reg);
4684     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4685   }
4686   // Smi to double register conversion
4687   __ bind(&load_smi);
4688   // scratch: untagged value of input_reg
4689   __ mtc1(scratch, result_reg);
4690   __ cvt_d_w(result_reg, result_reg);
4691   __ bind(&done);
4692 }
4693 
4694 
DoDeferredTaggedToI(LTaggedToI * instr)4695 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4696   Register input_reg = ToRegister(instr->value());
4697   Register scratch1 = scratch0();
4698   Register scratch2 = ToRegister(instr->temp());
4699   DoubleRegister double_scratch = double_scratch0();
4700   DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4701 
4702   DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4703   DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4704 
4705   Label done;
4706 
4707   // The input is a tagged HeapObject.
4708   // Heap number map check.
4709   __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4710   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4711   // This 'at' value and scratch1 map value are used for tests in both clauses
4712   // of the if.
4713 
4714   if (instr->truncating()) {
4715     Label truncate;
4716     __ Branch(USE_DELAY_SLOT, &truncate, eq, scratch1, Operand(at));
4717     __ mov(scratch2, input_reg);  // In delay slot.
4718     __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
4719     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball, scratch1,
4720                  Operand(ODDBALL_TYPE));
4721     __ bind(&truncate);
4722     __ TruncateHeapNumberToI(input_reg, scratch2);
4723   } else {
4724     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1,
4725                  Operand(at));
4726 
4727     // Load the double value.
4728     __ ldc1(double_scratch,
4729             FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4730 
4731     Register except_flag = scratch2;
4732     __ EmitFPUTruncate(kRoundToZero,
4733                        input_reg,
4734                        double_scratch,
4735                        scratch1,
4736                        double_scratch2,
4737                        except_flag,
4738                        kCheckForInexactConversion);
4739 
4740     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
4741                  Operand(zero_reg));
4742 
4743     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4744       __ Branch(&done, ne, input_reg, Operand(zero_reg));
4745 
4746       __ mfhc1(scratch1, double_scratch);  // Get exponent/sign bits.
4747       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4748       DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
4749                    Operand(zero_reg));
4750     }
4751   }
4752   __ bind(&done);
4753 }
4754 
4755 
DoTaggedToI(LTaggedToI * instr)4756 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4757   class DeferredTaggedToI final : public LDeferredCode {
4758    public:
4759     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4760         : LDeferredCode(codegen), instr_(instr) { }
4761     void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4762     LInstruction* instr() override { return instr_; }
4763 
4764    private:
4765     LTaggedToI* instr_;
4766   };
4767 
4768   LOperand* input = instr->value();
4769   DCHECK(input->IsRegister());
4770   DCHECK(input->Equals(instr->result()));
4771 
4772   Register input_reg = ToRegister(input);
4773 
4774   if (instr->hydrogen()->value()->representation().IsSmi()) {
4775     __ SmiUntag(input_reg);
4776   } else {
4777     DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4778 
4779     // Let the deferred code handle the HeapObject case.
4780     __ JumpIfNotSmi(input_reg, deferred->entry());
4781 
4782     // Smi to int32 conversion.
4783     __ SmiUntag(input_reg);
4784     __ bind(deferred->exit());
4785   }
4786 }
4787 
4788 
DoNumberUntagD(LNumberUntagD * instr)4789 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4790   LOperand* input = instr->value();
4791   DCHECK(input->IsRegister());
4792   LOperand* result = instr->result();
4793   DCHECK(result->IsDoubleRegister());
4794 
4795   Register input_reg = ToRegister(input);
4796   DoubleRegister result_reg = ToDoubleRegister(result);
4797 
4798   HValue* value = instr->hydrogen()->value();
4799   NumberUntagDMode mode = value->representation().IsSmi()
4800       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4801 
4802   EmitNumberUntagD(instr, input_reg, result_reg, mode);
4803 }
4804 
4805 
DoDoubleToI(LDoubleToI * instr)4806 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4807   Register result_reg = ToRegister(instr->result());
4808   Register scratch1 = scratch0();
4809   DoubleRegister double_input = ToDoubleRegister(instr->value());
4810 
4811   if (instr->truncating()) {
4812     __ TruncateDoubleToI(result_reg, double_input);
4813   } else {
4814     Register except_flag = LCodeGen::scratch1();
4815 
4816     __ EmitFPUTruncate(kRoundToMinusInf,
4817                        result_reg,
4818                        double_input,
4819                        scratch1,
4820                        double_scratch0(),
4821                        except_flag,
4822                        kCheckForInexactConversion);
4823 
4824     // Deopt if the operation did not succeed (except_flag != 0).
4825     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
4826                  Operand(zero_reg));
4827 
4828     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4829       Label done;
4830       __ Branch(&done, ne, result_reg, Operand(zero_reg));
4831       __ mfhc1(scratch1, double_input);  // Get exponent/sign bits.
4832       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4833       DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
4834                    Operand(zero_reg));
4835       __ bind(&done);
4836     }
4837   }
4838 }
4839 
4840 
DoDoubleToSmi(LDoubleToSmi * instr)4841 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4842   Register result_reg = ToRegister(instr->result());
4843   Register scratch1 = LCodeGen::scratch0();
4844   DoubleRegister double_input = ToDoubleRegister(instr->value());
4845 
4846   if (instr->truncating()) {
4847     __ TruncateDoubleToI(result_reg, double_input);
4848   } else {
4849     Register except_flag = LCodeGen::scratch1();
4850 
4851     __ EmitFPUTruncate(kRoundToMinusInf,
4852                        result_reg,
4853                        double_input,
4854                        scratch1,
4855                        double_scratch0(),
4856                        except_flag,
4857                        kCheckForInexactConversion);
4858 
4859     // Deopt if the operation did not succeed (except_flag != 0).
4860     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
4861                  Operand(zero_reg));
4862 
4863     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4864       Label done;
4865       __ Branch(&done, ne, result_reg, Operand(zero_reg));
4866       __ mfhc1(scratch1, double_input);  // Get exponent/sign bits.
4867       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4868       DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
4869                    Operand(zero_reg));
4870       __ bind(&done);
4871     }
4872   }
4873   __ SmiTag(result_reg, result_reg);
4874 }
4875 
4876 
DoCheckSmi(LCheckSmi * instr)4877 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4878   LOperand* input = instr->value();
4879   __ SmiTst(ToRegister(input), at);
4880   DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, at, Operand(zero_reg));
4881 }
4882 
4883 
DoCheckNonSmi(LCheckNonSmi * instr)4884 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4885   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4886     LOperand* input = instr->value();
4887     __ SmiTst(ToRegister(input), at);
4888     DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
4889   }
4890 }
4891 
4892 
DoCheckArrayBufferNotNeutered(LCheckArrayBufferNotNeutered * instr)4893 void LCodeGen::DoCheckArrayBufferNotNeutered(
4894     LCheckArrayBufferNotNeutered* instr) {
4895   Register view = ToRegister(instr->view());
4896   Register scratch = scratch0();
4897 
4898   __ ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
4899   __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
4900   __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
4901   DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at,
4902                Operand(zero_reg));
4903 }
4904 
4905 
DoCheckInstanceType(LCheckInstanceType * instr)4906 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4907   Register input = ToRegister(instr->value());
4908   Register scratch = scratch0();
4909 
4910   __ GetObjectType(input, scratch, scratch);
4911 
4912   if (instr->hydrogen()->is_interval_check()) {
4913     InstanceType first;
4914     InstanceType last;
4915     instr->hydrogen()->GetCheckInterval(&first, &last);
4916 
4917     // If there is only one type in the interval check for equality.
4918     if (first == last) {
4919       DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
4920                    Operand(first));
4921     } else {
4922       DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType, scratch,
4923                    Operand(first));
4924       // Omit check for the last type.
4925       if (last != LAST_TYPE) {
4926         DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType, scratch,
4927                      Operand(last));
4928       }
4929     }
4930   } else {
4931     uint8_t mask;
4932     uint8_t tag;
4933     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4934 
4935     if (base::bits::IsPowerOfTwo32(mask)) {
4936       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
4937       __ And(at, scratch, mask);
4938       DeoptimizeIf(tag == 0 ? ne : eq, instr,
4939                    DeoptimizeReason::kWrongInstanceType, at, Operand(zero_reg));
4940     } else {
4941       __ And(scratch, scratch, Operand(mask));
4942       DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
4943                    Operand(tag));
4944     }
4945   }
4946 }
4947 
4948 
DoCheckValue(LCheckValue * instr)4949 void LCodeGen::DoCheckValue(LCheckValue* instr) {
4950   Register reg = ToRegister(instr->value());
4951   Handle<HeapObject> object = instr->hydrogen()->object().handle();
4952   AllowDeferredHandleDereference smi_check;
4953   if (isolate()->heap()->InNewSpace(*object)) {
4954     Register reg = ToRegister(instr->value());
4955     Handle<Cell> cell = isolate()->factory()->NewCell(object);
4956     __ li(at, Operand(cell));
4957     __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
4958     DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at));
4959   } else {
4960     DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg,
4961                  Operand(object));
4962   }
4963 }
4964 
4965 
DoDeferredInstanceMigration(LCheckMaps * instr,Register object)4966 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
4967   {
4968     PushSafepointRegistersScope scope(this);
4969     __ push(object);
4970     __ mov(cp, zero_reg);
4971     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
4972     RecordSafepointWithRegisters(
4973         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
4974     __ StoreToSafepointRegisterSlot(v0, scratch0());
4975   }
4976   __ SmiTst(scratch0(), at);
4977   DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, at,
4978                Operand(zero_reg));
4979 }
4980 
4981 
DoCheckMaps(LCheckMaps * instr)4982 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4983   class DeferredCheckMaps final : public LDeferredCode {
4984    public:
4985     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
4986         : LDeferredCode(codegen), instr_(instr), object_(object) {
4987       SetExit(check_maps());
4988     }
4989     void Generate() override {
4990       codegen()->DoDeferredInstanceMigration(instr_, object_);
4991     }
4992     Label* check_maps() { return &check_maps_; }
4993     LInstruction* instr() override { return instr_; }
4994 
4995    private:
4996     LCheckMaps* instr_;
4997     Label check_maps_;
4998     Register object_;
4999   };
5000 
5001   if (instr->hydrogen()->IsStabilityCheck()) {
5002     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5003     for (int i = 0; i < maps->size(); ++i) {
5004       AddStabilityDependency(maps->at(i).handle());
5005     }
5006     return;
5007   }
5008 
5009   Register map_reg = scratch0();
5010   LOperand* input = instr->value();
5011   DCHECK(input->IsRegister());
5012   Register reg = ToRegister(input);
5013   __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5014 
5015   DeferredCheckMaps* deferred = NULL;
5016   if (instr->hydrogen()->HasMigrationTarget()) {
5017     deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5018     __ bind(deferred->check_maps());
5019   }
5020 
5021   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5022   Label success;
5023   for (int i = 0; i < maps->size() - 1; i++) {
5024     Handle<Map> map = maps->at(i).handle();
5025     __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5026   }
5027   Handle<Map> map = maps->at(maps->size() - 1).handle();
5028   // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5029   if (instr->hydrogen()->HasMigrationTarget()) {
5030     __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5031   } else {
5032     DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map_reg, Operand(map));
5033   }
5034 
5035   __ bind(&success);
5036 }
5037 
5038 
DoClampDToUint8(LClampDToUint8 * instr)5039 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5040   DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5041   Register result_reg = ToRegister(instr->result());
5042   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5043   __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5044 }
5045 
5046 
DoClampIToUint8(LClampIToUint8 * instr)5047 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5048   Register unclamped_reg = ToRegister(instr->unclamped());
5049   Register result_reg = ToRegister(instr->result());
5050   __ ClampUint8(result_reg, unclamped_reg);
5051 }
5052 
5053 
DoClampTToUint8(LClampTToUint8 * instr)5054 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5055   Register scratch = scratch0();
5056   Register input_reg = ToRegister(instr->unclamped());
5057   Register result_reg = ToRegister(instr->result());
5058   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5059   Label is_smi, done, heap_number;
5060 
5061   // Both smi and heap number cases are handled.
5062   __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5063 
5064   // Check for heap number
5065   __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5066   __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5067 
5068   // Check for undefined. Undefined is converted to zero for clamping
5069   // conversions.
5070   DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, input_reg,
5071                Operand(factory()->undefined_value()));
5072   __ mov(result_reg, zero_reg);
5073   __ jmp(&done);
5074 
5075   // Heap number
5076   __ bind(&heap_number);
5077   __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5078                                              HeapNumber::kValueOffset));
5079   __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5080   __ jmp(&done);
5081 
5082   __ bind(&is_smi);
5083   __ ClampUint8(result_reg, scratch);
5084 
5085   __ bind(&done);
5086 }
5087 
5088 
DoAllocate(LAllocate * instr)5089 void LCodeGen::DoAllocate(LAllocate* instr) {
5090   class DeferredAllocate final : public LDeferredCode {
5091    public:
5092     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5093         : LDeferredCode(codegen), instr_(instr) { }
5094     void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5095     LInstruction* instr() override { return instr_; }
5096 
5097    private:
5098     LAllocate* instr_;
5099   };
5100 
5101   DeferredAllocate* deferred =
5102       new(zone()) DeferredAllocate(this, instr);
5103 
5104   Register result = ToRegister(instr->result());
5105   Register scratch = ToRegister(instr->temp1());
5106   Register scratch2 = ToRegister(instr->temp2());
5107 
5108   // Allocate memory for the object.
5109   AllocationFlags flags = NO_ALLOCATION_FLAGS;
5110   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5111     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5112   }
5113   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5114     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5115     flags = static_cast<AllocationFlags>(flags | PRETENURE);
5116   }
5117 
5118   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5119     flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
5120   }
5121   DCHECK(!instr->hydrogen()->IsAllocationFolded());
5122 
5123   if (instr->size()->IsConstantOperand()) {
5124     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5125     CHECK(size <= kMaxRegularHeapObjectSize);
5126     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5127   } else {
5128     Register size = ToRegister(instr->size());
5129     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5130   }
5131 
5132   __ bind(deferred->exit());
5133 
5134   if (instr->hydrogen()->MustPrefillWithFiller()) {
5135     STATIC_ASSERT(kHeapObjectTag == 1);
5136     if (instr->size()->IsConstantOperand()) {
5137       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5138       __ li(scratch, Operand(size - kHeapObjectTag));
5139     } else {
5140       __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5141     }
5142     __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5143     Label loop;
5144     __ bind(&loop);
5145     __ Dsubu(scratch, scratch, Operand(kPointerSize));
5146     __ Daddu(at, result, Operand(scratch));
5147     __ sd(scratch2, MemOperand(at));
5148     __ Branch(&loop, ge, scratch, Operand(zero_reg));
5149   }
5150 }
5151 
5152 
DoDeferredAllocate(LAllocate * instr)5153 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5154   Register result = ToRegister(instr->result());
5155 
5156   // TODO(3095996): Get rid of this. For now, we need to make the
5157   // result register contain a valid pointer because it is already
5158   // contained in the register pointer map.
5159   __ mov(result, zero_reg);
5160 
5161   PushSafepointRegistersScope scope(this);
5162   if (instr->size()->IsRegister()) {
5163     Register size = ToRegister(instr->size());
5164     DCHECK(!size.is(result));
5165     __ SmiTag(size);
5166     __ push(size);
5167   } else {
5168     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5169     if (size >= 0 && size <= Smi::kMaxValue) {
5170       __ li(v0, Operand(Smi::FromInt(size)));
5171       __ Push(v0);
5172     } else {
5173       // We should never get here at runtime => abort
5174       __ stop("invalid allocation size");
5175       return;
5176     }
5177   }
5178 
5179   int flags = AllocateDoubleAlignFlag::encode(
5180       instr->hydrogen()->MustAllocateDoubleAligned());
5181   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5182     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5183     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5184   } else {
5185     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5186   }
5187   __ li(v0, Operand(Smi::FromInt(flags)));
5188   __ Push(v0);
5189 
5190   CallRuntimeFromDeferred(
5191       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5192   __ StoreToSafepointRegisterSlot(v0, result);
5193 
5194   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5195     AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
5196     if (instr->hydrogen()->IsOldSpaceAllocation()) {
5197       DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5198       allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
5199     }
5200     // If the allocation folding dominator allocate triggered a GC, allocation
5201     // happend in the runtime. We have to reset the top pointer to virtually
5202     // undo the allocation.
5203     ExternalReference allocation_top =
5204         AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
5205     Register top_address = scratch0();
5206     __ Dsubu(v0, v0, Operand(kHeapObjectTag));
5207     __ li(top_address, Operand(allocation_top));
5208     __ sd(v0, MemOperand(top_address));
5209     __ Daddu(v0, v0, Operand(kHeapObjectTag));
5210   }
5211 }
5212 
DoFastAllocate(LFastAllocate * instr)5213 void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
5214   DCHECK(instr->hydrogen()->IsAllocationFolded());
5215   DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
5216   Register result = ToRegister(instr->result());
5217   Register scratch1 = ToRegister(instr->temp1());
5218   Register scratch2 = ToRegister(instr->temp2());
5219 
5220   AllocationFlags flags = ALLOCATION_FOLDED;
5221   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5222     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5223   }
5224   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5225     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5226     flags = static_cast<AllocationFlags>(flags | PRETENURE);
5227   }
5228   if (instr->size()->IsConstantOperand()) {
5229     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5230     CHECK(size <= kMaxRegularHeapObjectSize);
5231     __ FastAllocate(size, result, scratch1, scratch2, flags);
5232   } else {
5233     Register size = ToRegister(instr->size());
5234     __ FastAllocate(size, result, scratch1, scratch2, flags);
5235   }
5236 }
5237 
5238 
DoTypeof(LTypeof * instr)5239 void LCodeGen::DoTypeof(LTypeof* instr) {
5240   DCHECK(ToRegister(instr->value()).is(a3));
5241   DCHECK(ToRegister(instr->result()).is(v0));
5242   Label end, do_call;
5243   Register value_register = ToRegister(instr->value());
5244   __ JumpIfNotSmi(value_register, &do_call);
5245   __ li(v0, Operand(isolate()->factory()->number_string()));
5246   __ jmp(&end);
5247   __ bind(&do_call);
5248   Callable callable = CodeFactory::Typeof(isolate());
5249   CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
5250   __ bind(&end);
5251 }
5252 
5253 
DoTypeofIsAndBranch(LTypeofIsAndBranch * instr)5254 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5255   Register input = ToRegister(instr->value());
5256 
5257   Register cmp1 = no_reg;
5258   Operand cmp2 = Operand(no_reg);
5259 
5260   Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5261                                                   instr->FalseLabel(chunk_),
5262                                                   input,
5263                                                   instr->type_literal(),
5264                                                   &cmp1,
5265                                                   &cmp2);
5266 
5267   DCHECK(cmp1.is_valid());
5268   DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5269 
5270   if (final_branch_condition != kNoCondition) {
5271     EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5272   }
5273 }
5274 
5275 
EmitTypeofIs(Label * true_label,Label * false_label,Register input,Handle<String> type_name,Register * cmp1,Operand * cmp2)5276 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5277                                  Label* false_label,
5278                                  Register input,
5279                                  Handle<String> type_name,
5280                                  Register* cmp1,
5281                                  Operand* cmp2) {
5282   // This function utilizes the delay slot heavily. This is used to load
5283   // values that are always usable without depending on the type of the input
5284   // register.
5285   Condition final_branch_condition = kNoCondition;
5286   Register scratch = scratch0();
5287   Factory* factory = isolate()->factory();
5288   if (String::Equals(type_name, factory->number_string())) {
5289     __ JumpIfSmi(input, true_label);
5290     __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5291     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5292     *cmp1 = input;
5293     *cmp2 = Operand(at);
5294     final_branch_condition = eq;
5295 
5296   } else if (String::Equals(type_name, factory->string_string())) {
5297     __ JumpIfSmi(input, false_label);
5298     __ GetObjectType(input, input, scratch);
5299     *cmp1 = scratch;
5300     *cmp2 = Operand(FIRST_NONSTRING_TYPE);
5301     final_branch_condition = lt;
5302 
5303   } else if (String::Equals(type_name, factory->symbol_string())) {
5304     __ JumpIfSmi(input, false_label);
5305     __ GetObjectType(input, input, scratch);
5306     *cmp1 = scratch;
5307     *cmp2 = Operand(SYMBOL_TYPE);
5308     final_branch_condition = eq;
5309 
5310   } else if (String::Equals(type_name, factory->boolean_string())) {
5311     __ LoadRoot(at, Heap::kTrueValueRootIndex);
5312     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5313     __ LoadRoot(at, Heap::kFalseValueRootIndex);
5314     *cmp1 = at;
5315     *cmp2 = Operand(input);
5316     final_branch_condition = eq;
5317 
5318   } else if (String::Equals(type_name, factory->undefined_string())) {
5319     __ LoadRoot(at, Heap::kNullValueRootIndex);
5320     __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input));
5321     // The first instruction of JumpIfSmi is an And - it is safe in the delay
5322     // slot.
5323     __ JumpIfSmi(input, false_label);
5324     // Check for undetectable objects => true.
5325     __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5326     __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5327     __ And(at, at, 1 << Map::kIsUndetectable);
5328     *cmp1 = at;
5329     *cmp2 = Operand(zero_reg);
5330     final_branch_condition = ne;
5331 
5332   } else if (String::Equals(type_name, factory->function_string())) {
5333     __ JumpIfSmi(input, false_label);
5334     __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5335     __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5336     __ And(scratch, scratch,
5337            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5338     *cmp1 = scratch;
5339     *cmp2 = Operand(1 << Map::kIsCallable);
5340     final_branch_condition = eq;
5341 
5342   } else if (String::Equals(type_name, factory->object_string())) {
5343     __ JumpIfSmi(input, false_label);
5344     __ LoadRoot(at, Heap::kNullValueRootIndex);
5345     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5346     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5347     __ GetObjectType(input, scratch, scratch1());
5348     __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
5349     // Check for callable or undetectable objects => false.
5350     __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5351     __ And(at, scratch,
5352            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5353     *cmp1 = at;
5354     *cmp2 = Operand(zero_reg);
5355     final_branch_condition = eq;
5356 
5357 // clang-format off
5358 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
5359   } else if (String::Equals(type_name, factory->type##_string())) {  \
5360     __ JumpIfSmi(input, false_label);                                \
5361     __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));    \
5362     __ LoadRoot(at, Heap::k##Type##MapRootIndex);                    \
5363     *cmp1 = input;                                                   \
5364     *cmp2 = Operand(at);                                             \
5365     final_branch_condition = eq;
5366   SIMD128_TYPES(SIMD128_TYPE)
5367 #undef SIMD128_TYPE
5368     // clang-format on
5369 
5370 
5371   } else {
5372     *cmp1 = at;
5373     *cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
5374     __ Branch(false_label);
5375   }
5376 
5377   return final_branch_condition;
5378 }
5379 
5380 
EnsureSpaceForLazyDeopt(int space_needed)5381 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5382   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5383     // Ensure that we have enough space after the previous lazy-bailout
5384     // instruction for patching the code here.
5385     int current_pc = masm()->pc_offset();
5386     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5387       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5388       DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5389       while (padding_size > 0) {
5390         __ nop();
5391         padding_size -= Assembler::kInstrSize;
5392       }
5393     }
5394   }
5395   last_lazy_deopt_pc_ = masm()->pc_offset();
5396 }
5397 
5398 
DoLazyBailout(LLazyBailout * instr)5399 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5400   last_lazy_deopt_pc_ = masm()->pc_offset();
5401   DCHECK(instr->HasEnvironment());
5402   LEnvironment* env = instr->environment();
5403   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5404   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5405 }
5406 
5407 
DoDeoptimize(LDeoptimize * instr)5408 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5409   Deoptimizer::BailoutType type = instr->hydrogen()->type();
5410   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5411   // needed return address), even though the implementation of LAZY and EAGER is
5412   // now identical. When LAZY is eventually completely folded into EAGER, remove
5413   // the special case below.
5414   if (info()->IsStub() && type == Deoptimizer::EAGER) {
5415     type = Deoptimizer::LAZY;
5416   }
5417 
5418   DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
5419                Operand(zero_reg));
5420 }
5421 
5422 
DoDummy(LDummy * instr)5423 void LCodeGen::DoDummy(LDummy* instr) {
5424   // Nothing to see here, move on!
5425 }
5426 
5427 
DoDummyUse(LDummyUse * instr)5428 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5429   // Nothing to see here, move on!
5430 }
5431 
5432 
DoDeferredStackCheck(LStackCheck * instr)5433 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5434   PushSafepointRegistersScope scope(this);
5435   LoadContextFromDeferred(instr->context());
5436   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5437   RecordSafepointWithLazyDeopt(
5438       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5439   DCHECK(instr->HasEnvironment());
5440   LEnvironment* env = instr->environment();
5441   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5442 }
5443 
5444 
DoStackCheck(LStackCheck * instr)5445 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5446   class DeferredStackCheck final : public LDeferredCode {
5447    public:
5448     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5449         : LDeferredCode(codegen), instr_(instr) { }
5450     void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5451     LInstruction* instr() override { return instr_; }
5452 
5453    private:
5454     LStackCheck* instr_;
5455   };
5456 
5457   DCHECK(instr->HasEnvironment());
5458   LEnvironment* env = instr->environment();
5459   // There is no LLazyBailout instruction for stack-checks. We have to
5460   // prepare for lazy deoptimization explicitly here.
5461   if (instr->hydrogen()->is_function_entry()) {
5462     // Perform stack overflow check.
5463     Label done;
5464     __ LoadRoot(at, Heap::kStackLimitRootIndex);
5465     __ Branch(&done, hs, sp, Operand(at));
5466     DCHECK(instr->context()->IsRegister());
5467     DCHECK(ToRegister(instr->context()).is(cp));
5468     CallCode(isolate()->builtins()->StackCheck(),
5469              RelocInfo::CODE_TARGET,
5470              instr);
5471     __ bind(&done);
5472   } else {
5473     DCHECK(instr->hydrogen()->is_backwards_branch());
5474     // Perform stack overflow check if this goto needs it before jumping.
5475     DeferredStackCheck* deferred_stack_check =
5476         new(zone()) DeferredStackCheck(this, instr);
5477     __ LoadRoot(at, Heap::kStackLimitRootIndex);
5478     __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5479     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5480     __ bind(instr->done_label());
5481     deferred_stack_check->SetExit(instr->done_label());
5482     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5483     // Don't record a deoptimization index for the safepoint here.
5484     // This will be done explicitly when emitting call and the safepoint in
5485     // the deferred code.
5486   }
5487 }
5488 
5489 
DoOsrEntry(LOsrEntry * instr)5490 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5491   // This is a pseudo-instruction that ensures that the environment here is
5492   // properly registered for deoptimization and records the assembler's PC
5493   // offset.
5494   LEnvironment* environment = instr->environment();
5495 
5496   // If the environment were already registered, we would have no way of
5497   // backpatching it with the spill slot operands.
5498   DCHECK(!environment->HasBeenRegistered());
5499   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5500 
5501   GenerateOsrPrologue();
5502 }
5503 
5504 
DoForInPrepareMap(LForInPrepareMap * instr)5505 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5506   Register result = ToRegister(instr->result());
5507   Register object = ToRegister(instr->object());
5508 
5509   Label use_cache, call_runtime;
5510   DCHECK(object.is(a0));
5511   __ CheckEnumCache(&call_runtime);
5512 
5513   __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
5514   __ Branch(&use_cache);
5515 
5516   // Get the set of properties to enumerate.
5517   __ bind(&call_runtime);
5518   __ push(object);
5519   CallRuntime(Runtime::kForInEnumerate, instr);
5520   __ bind(&use_cache);
5521 }
5522 
5523 
DoForInCacheArray(LForInCacheArray * instr)5524 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5525   Register map = ToRegister(instr->map());
5526   Register result = ToRegister(instr->result());
5527   Label load_cache, done;
5528   __ EnumLength(result, map);
5529   __ Branch(&load_cache, ne, result, Operand(Smi::kZero));
5530   __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5531   __ jmp(&done);
5532 
5533   __ bind(&load_cache);
5534   __ LoadInstanceDescriptors(map, result);
5535   __ ld(result,
5536         FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5537   __ ld(result,
5538         FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5539   DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result,
5540                Operand(zero_reg));
5541 
5542   __ bind(&done);
5543 }
5544 
5545 
DoCheckMapValue(LCheckMapValue * instr)5546 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5547   Register object = ToRegister(instr->value());
5548   Register map = ToRegister(instr->map());
5549   __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5550   DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map,
5551                Operand(scratch0()));
5552 }
5553 
5554 
DoDeferredLoadMutableDouble(LLoadFieldByIndex * instr,Register result,Register object,Register index)5555 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5556                                            Register result,
5557                                            Register object,
5558                                            Register index) {
5559   PushSafepointRegistersScope scope(this);
5560   __ Push(object, index);
5561   __ mov(cp, zero_reg);
5562   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5563   RecordSafepointWithRegisters(
5564      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5565   __ StoreToSafepointRegisterSlot(v0, result);
5566 }
5567 
5568 
DoLoadFieldByIndex(LLoadFieldByIndex * instr)5569 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5570   class DeferredLoadMutableDouble final : public LDeferredCode {
5571    public:
5572     DeferredLoadMutableDouble(LCodeGen* codegen,
5573                               LLoadFieldByIndex* instr,
5574                               Register result,
5575                               Register object,
5576                               Register index)
5577         : LDeferredCode(codegen),
5578           instr_(instr),
5579           result_(result),
5580           object_(object),
5581           index_(index) {
5582     }
5583     void Generate() override {
5584       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5585     }
5586     LInstruction* instr() override { return instr_; }
5587 
5588    private:
5589     LLoadFieldByIndex* instr_;
5590     Register result_;
5591     Register object_;
5592     Register index_;
5593   };
5594 
5595   Register object = ToRegister(instr->object());
5596   Register index = ToRegister(instr->index());
5597   Register result = ToRegister(instr->result());
5598   Register scratch = scratch0();
5599 
5600   DeferredLoadMutableDouble* deferred;
5601   deferred = new(zone()) DeferredLoadMutableDouble(
5602       this, instr, result, object, index);
5603 
5604   Label out_of_object, done;
5605 
5606   __ And(scratch, index, Operand(Smi::FromInt(1)));
5607   __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
5608   __ dsra(index, index, 1);
5609 
5610   __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5611   __ SmiScale(scratch, index, kPointerSizeLog2);  // In delay slot.
5612   __ Daddu(scratch, object, scratch);
5613   __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5614 
5615   __ Branch(&done);
5616 
5617   __ bind(&out_of_object);
5618   __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5619   // Index is equal to negated out of object property index plus 1.
5620   __ Dsubu(scratch, result, scratch);
5621   __ ld(result, FieldMemOperand(scratch,
5622                                 FixedArray::kHeaderSize - kPointerSize));
5623   __ bind(deferred->exit());
5624   __ bind(&done);
5625 }
5626 
5627 #undef __
5628 
5629 }  // namespace internal
5630 }  // namespace v8
5631