1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/crankshaft/mips64/lithium-codegen-mips64.h"
6 
7 #include "src/code-factory.h"
8 #include "src/code-stubs.h"
9 #include "src/crankshaft/hydrogen-osr.h"
10 #include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
11 #include "src/ic/ic.h"
12 #include "src/ic/stub-cache.h"
13 #include "src/profiler/cpu-profiler.h"
14 
15 namespace v8 {
16 namespace internal {
17 
18 
19 class SafepointGenerator final : public CallWrapper {
20  public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)21   SafepointGenerator(LCodeGen* codegen,
22                      LPointerMap* pointers,
23                      Safepoint::DeoptMode mode)
24       : codegen_(codegen),
25         pointers_(pointers),
26         deopt_mode_(mode) { }
~SafepointGenerator()27   virtual ~SafepointGenerator() {}
28 
BeforeCall(int call_size) const29   void BeforeCall(int call_size) const override {}
30 
AfterCall() const31   void AfterCall() const override {
32     codegen_->RecordSafepoint(pointers_, deopt_mode_);
33   }
34 
35  private:
36   LCodeGen* codegen_;
37   LPointerMap* pointers_;
38   Safepoint::DeoptMode deopt_mode_;
39 };
40 
41 
42 #define __ masm()->
43 
GenerateCode()44 bool LCodeGen::GenerateCode() {
45   LPhase phase("Z_Code generation", chunk());
46   DCHECK(is_unused());
47   status_ = GENERATING;
48 
49   // Open a frame scope to indicate that there is a frame on the stack.  The
50   // NONE indicates that the scope shouldn't actually generate code to set up
51   // the frame (that is done in GeneratePrologue).
52   FrameScope frame_scope(masm_, StackFrame::NONE);
53 
54   return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
55          GenerateJumpTable() && GenerateSafepointTable();
56 }
57 
58 
FinishCode(Handle<Code> code)59 void LCodeGen::FinishCode(Handle<Code> code) {
60   DCHECK(is_done());
61   code->set_stack_slots(GetStackSlotCount());
62   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
63   PopulateDeoptimizationData(code);
64 }
65 
66 
SaveCallerDoubles()67 void LCodeGen::SaveCallerDoubles() {
68   DCHECK(info()->saves_caller_doubles());
69   DCHECK(NeedsEagerFrame());
70   Comment(";;; Save clobbered callee double registers");
71   int count = 0;
72   BitVector* doubles = chunk()->allocated_double_registers();
73   BitVector::Iterator save_iterator(doubles);
74   while (!save_iterator.Done()) {
75     __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
76             MemOperand(sp, count * kDoubleSize));
77     save_iterator.Advance();
78     count++;
79   }
80 }
81 
82 
RestoreCallerDoubles()83 void LCodeGen::RestoreCallerDoubles() {
84   DCHECK(info()->saves_caller_doubles());
85   DCHECK(NeedsEagerFrame());
86   Comment(";;; Restore clobbered callee double registers");
87   BitVector* doubles = chunk()->allocated_double_registers();
88   BitVector::Iterator save_iterator(doubles);
89   int count = 0;
90   while (!save_iterator.Done()) {
91     __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
92             MemOperand(sp, count * kDoubleSize));
93     save_iterator.Advance();
94     count++;
95   }
96 }
97 
98 
GeneratePrologue()99 bool LCodeGen::GeneratePrologue() {
100   DCHECK(is_generating());
101 
102   if (info()->IsOptimizing()) {
103     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
104 
105 #ifdef DEBUG
106     if (strlen(FLAG_stop_at) > 0 &&
107         info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
108       __ stop("stop_at");
109     }
110 #endif
111 
112     // a1: Callee's JS function.
113     // cp: Callee's context.
114     // fp: Caller's frame pointer.
115     // lr: Caller's pc.
116   }
117 
118   info()->set_prologue_offset(masm_->pc_offset());
119   if (NeedsEagerFrame()) {
120     if (info()->IsStub()) {
121       __ StubPrologue();
122     } else {
123       __ Prologue(info()->GeneratePreagedPrologue());
124     }
125     frame_is_built_ = true;
126   }
127 
128   // Reserve space for the stack slots needed by the code.
129   int slots = GetStackSlotCount();
130   if (slots > 0) {
131     if (FLAG_debug_code) {
132       __ Dsubu(sp,  sp, Operand(slots * kPointerSize));
133       __ Push(a0, a1);
134       __ Daddu(a0, sp, Operand(slots *  kPointerSize));
135       __ li(a1, Operand(kSlotsZapValue));
136       Label loop;
137       __ bind(&loop);
138       __ Dsubu(a0, a0, Operand(kPointerSize));
139       __ sd(a1, MemOperand(a0, 2 * kPointerSize));
140       __ Branch(&loop, ne, a0, Operand(sp));
141       __ Pop(a0, a1);
142     } else {
143       __ Dsubu(sp, sp, Operand(slots * kPointerSize));
144     }
145   }
146 
147   if (info()->saves_caller_doubles()) {
148     SaveCallerDoubles();
149   }
150   return !is_aborted();
151 }
152 
153 
DoPrologue(LPrologue * instr)154 void LCodeGen::DoPrologue(LPrologue* instr) {
155   Comment(";;; Prologue begin");
156 
157   // Possibly allocate a local context.
158   if (info()->scope()->num_heap_slots() > 0) {
159     Comment(";;; Allocate local context");
160     bool need_write_barrier = true;
161     // Argument to NewContext is the function, which is in a1.
162     int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
163     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
164     if (info()->scope()->is_script_scope()) {
165       __ push(a1);
166       __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
167       __ CallRuntime(Runtime::kNewScriptContext);
168       deopt_mode = Safepoint::kLazyDeopt;
169     } else if (slots <= FastNewContextStub::kMaximumSlots) {
170       FastNewContextStub stub(isolate(), slots);
171       __ CallStub(&stub);
172       // Result of FastNewContextStub is always in new space.
173       need_write_barrier = false;
174     } else {
175       __ push(a1);
176       __ CallRuntime(Runtime::kNewFunctionContext);
177     }
178     RecordSafepoint(deopt_mode);
179 
180     // Context is returned in both v0. It replaces the context passed to us.
181     // It's saved in the stack and kept live in cp.
182     __ mov(cp, v0);
183     __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
184     // Copy any necessary parameters into the context.
185     int num_parameters = scope()->num_parameters();
186     int first_parameter = scope()->has_this_declaration() ? -1 : 0;
187     for (int i = first_parameter; i < num_parameters; i++) {
188       Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
189       if (var->IsContextSlot()) {
190         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
191             (num_parameters - 1 - i) * kPointerSize;
192         // Load parameter from stack.
193         __ ld(a0, MemOperand(fp, parameter_offset));
194         // Store it in the context.
195         MemOperand target = ContextMemOperand(cp, var->index());
196         __ sd(a0, target);
197         // Update the write barrier. This clobbers a3 and a0.
198         if (need_write_barrier) {
199           __ RecordWriteContextSlot(
200               cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
201         } else if (FLAG_debug_code) {
202           Label done;
203           __ JumpIfInNewSpace(cp, a0, &done);
204           __ Abort(kExpectedNewSpaceObject);
205           __ bind(&done);
206         }
207       }
208     }
209     Comment(";;; End allocate local context");
210   }
211 
212   Comment(";;; Prologue end");
213 }
214 
215 
GenerateOsrPrologue()216 void LCodeGen::GenerateOsrPrologue() {
217   // Generate the OSR entry prologue at the first unknown OSR value, or if there
218   // are none, at the OSR entrypoint instruction.
219   if (osr_pc_offset_ >= 0) return;
220 
221   osr_pc_offset_ = masm()->pc_offset();
222 
223   // Adjust the frame size, subsuming the unoptimized frame into the
224   // optimized frame.
225   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
226   DCHECK(slots >= 0);
227   __ Dsubu(sp, sp, Operand(slots * kPointerSize));
228 }
229 
230 
GenerateBodyInstructionPre(LInstruction * instr)231 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
232   if (instr->IsCall()) {
233     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
234   }
235   if (!instr->IsLazyBailout() && !instr->IsGap()) {
236     safepoints_.BumpLastLazySafepointIndex();
237   }
238 }
239 
240 
GenerateDeferredCode()241 bool LCodeGen::GenerateDeferredCode() {
242   DCHECK(is_generating());
243   if (deferred_.length() > 0) {
244     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
245       LDeferredCode* code = deferred_[i];
246 
247       HValue* value =
248           instructions_->at(code->instruction_index())->hydrogen_value();
249       RecordAndWritePosition(
250           chunk()->graph()->SourcePositionToScriptPosition(value->position()));
251 
252       Comment(";;; <@%d,#%d> "
253               "-------------------- Deferred %s --------------------",
254               code->instruction_index(),
255               code->instr()->hydrogen_value()->id(),
256               code->instr()->Mnemonic());
257       __ bind(code->entry());
258       if (NeedsDeferredFrame()) {
259         Comment(";;; Build frame");
260         DCHECK(!frame_is_built_);
261         DCHECK(info()->IsStub());
262         frame_is_built_ = true;
263         __ MultiPush(cp.bit() | fp.bit() | ra.bit());
264         __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
265         __ push(scratch0());
266         __ Daddu(fp, sp,
267             Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
268         Comment(";;; Deferred code");
269       }
270       code->Generate();
271       if (NeedsDeferredFrame()) {
272         Comment(";;; Destroy frame");
273         DCHECK(frame_is_built_);
274         __ pop(at);
275         __ MultiPop(cp.bit() | fp.bit() | ra.bit());
276         frame_is_built_ = false;
277       }
278       __ jmp(code->exit());
279     }
280   }
281   // Deferred code is the last part of the instruction sequence. Mark
282   // the generated code as done unless we bailed out.
283   if (!is_aborted()) status_ = DONE;
284   return !is_aborted();
285 }
286 
287 
GenerateJumpTable()288 bool LCodeGen::GenerateJumpTable() {
289   if (jump_table_.length() > 0) {
290     Comment(";;; -------------------- Jump table --------------------");
291     Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
292     Label table_start, call_deopt_entry;
293 
294     __ bind(&table_start);
295     Label needs_frame;
296     Address base = jump_table_[0]->address;
297     for (int i = 0; i < jump_table_.length(); i++) {
298       Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
299       __ bind(&table_entry->label);
300       Address entry = table_entry->address;
301       DeoptComment(table_entry->deopt_info);
302 
303       // Second-level deopt table entries are contiguous and small, so instead
304       // of loading the full, absolute address of each one, load the base
305       // address and add an immediate offset.
306       if (is_int16(entry - base)) {
307         if (table_entry->needs_frame) {
308           DCHECK(!info()->saves_caller_doubles());
309           Comment(";;; call deopt with frame");
310           __ MultiPush(cp.bit() | fp.bit() | ra.bit());
311           __ BranchAndLink(&needs_frame, USE_DELAY_SLOT);
312           __ li(t9, Operand(entry - base));
313         } else {
314           __ BranchAndLink(&call_deopt_entry, USE_DELAY_SLOT);
315           __ li(t9, Operand(entry - base));
316         }
317 
318       } else {
319         __ li(t9, Operand(entry - base));
320         if (table_entry->needs_frame) {
321           DCHECK(!info()->saves_caller_doubles());
322           Comment(";;; call deopt with frame");
323           __ MultiPush(cp.bit() | fp.bit() | ra.bit());
324           __ BranchAndLink(&needs_frame);
325         } else {
326           __ BranchAndLink(&call_deopt_entry);
327         }
328       }
329       info()->LogDeoptCallPosition(masm()->pc_offset(),
330                                    table_entry->deopt_info.inlining_id);
331     }
332     if (needs_frame.is_linked()) {
333       __ bind(&needs_frame);
334       // This variant of deopt can only be used with stubs. Since we don't
335       // have a function pointer to install in the stack frame that we're
336       // building, install a special marker there instead.
337       DCHECK(info()->IsStub());
338       __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
339       __ push(at);
340       __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
341     }
342 
343     Comment(";;; call deopt");
344     __ bind(&call_deopt_entry);
345 
346     if (info()->saves_caller_doubles()) {
347       DCHECK(info()->IsStub());
348       RestoreCallerDoubles();
349     }
350 
351     __ li(at,
352           Operand(reinterpret_cast<int64_t>(base), RelocInfo::RUNTIME_ENTRY));
353     __ Daddu(t9, t9, Operand(at));
354     __ Jump(t9);
355   }
356   // The deoptimization jump table is the last part of the instruction
357   // sequence. Mark the generated code as done unless we bailed out.
358   if (!is_aborted()) status_ = DONE;
359   return !is_aborted();
360 }
361 
362 
GenerateSafepointTable()363 bool LCodeGen::GenerateSafepointTable() {
364   DCHECK(is_done());
365   safepoints_.Emit(masm(), GetStackSlotCount());
366   return !is_aborted();
367 }
368 
369 
ToRegister(int index) const370 Register LCodeGen::ToRegister(int index) const {
371   return Register::from_code(index);
372 }
373 
374 
ToDoubleRegister(int index) const375 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
376   return DoubleRegister::from_code(index);
377 }
378 
379 
ToRegister(LOperand * op) const380 Register LCodeGen::ToRegister(LOperand* op) const {
381   DCHECK(op->IsRegister());
382   return ToRegister(op->index());
383 }
384 
385 
EmitLoadRegister(LOperand * op,Register scratch)386 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
387   if (op->IsRegister()) {
388     return ToRegister(op->index());
389   } else if (op->IsConstantOperand()) {
390     LConstantOperand* const_op = LConstantOperand::cast(op);
391     HConstant* constant = chunk_->LookupConstant(const_op);
392     Handle<Object> literal = constant->handle(isolate());
393     Representation r = chunk_->LookupLiteralRepresentation(const_op);
394     if (r.IsInteger32()) {
395       AllowDeferredHandleDereference get_number;
396       DCHECK(literal->IsNumber());
397       __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
398     } else if (r.IsSmi()) {
399       DCHECK(constant->HasSmiValue());
400       __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
401     } else if (r.IsDouble()) {
402       Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
403     } else {
404       DCHECK(r.IsSmiOrTagged());
405       __ li(scratch, literal);
406     }
407     return scratch;
408   } else if (op->IsStackSlot()) {
409     __ ld(scratch, ToMemOperand(op));
410     return scratch;
411   }
412   UNREACHABLE();
413   return scratch;
414 }
415 
416 
ToDoubleRegister(LOperand * op) const417 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
418   DCHECK(op->IsDoubleRegister());
419   return ToDoubleRegister(op->index());
420 }
421 
422 
EmitLoadDoubleRegister(LOperand * op,FloatRegister flt_scratch,DoubleRegister dbl_scratch)423 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
424                                                 FloatRegister flt_scratch,
425                                                 DoubleRegister dbl_scratch) {
426   if (op->IsDoubleRegister()) {
427     return ToDoubleRegister(op->index());
428   } else if (op->IsConstantOperand()) {
429     LConstantOperand* const_op = LConstantOperand::cast(op);
430     HConstant* constant = chunk_->LookupConstant(const_op);
431     Handle<Object> literal = constant->handle(isolate());
432     Representation r = chunk_->LookupLiteralRepresentation(const_op);
433     if (r.IsInteger32()) {
434       DCHECK(literal->IsNumber());
435       __ li(at, Operand(static_cast<int32_t>(literal->Number())));
436       __ mtc1(at, flt_scratch);
437       __ cvt_d_w(dbl_scratch, flt_scratch);
438       return dbl_scratch;
439     } else if (r.IsDouble()) {
440       Abort(kUnsupportedDoubleImmediate);
441     } else if (r.IsTagged()) {
442       Abort(kUnsupportedTaggedImmediate);
443     }
444   } else if (op->IsStackSlot()) {
445     MemOperand mem_op = ToMemOperand(op);
446     __ ldc1(dbl_scratch, mem_op);
447     return dbl_scratch;
448   }
449   UNREACHABLE();
450   return dbl_scratch;
451 }
452 
453 
ToHandle(LConstantOperand * op) const454 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
455   HConstant* constant = chunk_->LookupConstant(op);
456   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
457   return constant->handle(isolate());
458 }
459 
460 
IsInteger32(LConstantOperand * op) const461 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
462   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
463 }
464 
465 
IsSmi(LConstantOperand * op) const466 bool LCodeGen::IsSmi(LConstantOperand* op) const {
467   return chunk_->LookupLiteralRepresentation(op).IsSmi();
468 }
469 
470 
ToInteger32(LConstantOperand * op) const471 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
472   // return ToRepresentation(op, Representation::Integer32());
473   HConstant* constant = chunk_->LookupConstant(op);
474   return constant->Integer32Value();
475 }
476 
477 
ToRepresentation_donotuse(LConstantOperand * op,const Representation & r) const478 int64_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op,
479                                             const Representation& r) const {
480   HConstant* constant = chunk_->LookupConstant(op);
481   int32_t value = constant->Integer32Value();
482   if (r.IsInteger32()) return value;
483   DCHECK(r.IsSmiOrTagged());
484   return reinterpret_cast<int64_t>(Smi::FromInt(value));
485 }
486 
487 
ToSmi(LConstantOperand * op) const488 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
489   HConstant* constant = chunk_->LookupConstant(op);
490   return Smi::FromInt(constant->Integer32Value());
491 }
492 
493 
ToDouble(LConstantOperand * op) const494 double LCodeGen::ToDouble(LConstantOperand* op) const {
495   HConstant* constant = chunk_->LookupConstant(op);
496   DCHECK(constant->HasDoubleValue());
497   return constant->DoubleValue();
498 }
499 
500 
ToOperand(LOperand * op)501 Operand LCodeGen::ToOperand(LOperand* op) {
502   if (op->IsConstantOperand()) {
503     LConstantOperand* const_op = LConstantOperand::cast(op);
504     HConstant* constant = chunk()->LookupConstant(const_op);
505     Representation r = chunk_->LookupLiteralRepresentation(const_op);
506     if (r.IsSmi()) {
507       DCHECK(constant->HasSmiValue());
508       return Operand(Smi::FromInt(constant->Integer32Value()));
509     } else if (r.IsInteger32()) {
510       DCHECK(constant->HasInteger32Value());
511       return Operand(constant->Integer32Value());
512     } else if (r.IsDouble()) {
513       Abort(kToOperandUnsupportedDoubleImmediate);
514     }
515     DCHECK(r.IsTagged());
516     return Operand(constant->handle(isolate()));
517   } else if (op->IsRegister()) {
518     return Operand(ToRegister(op));
519   } else if (op->IsDoubleRegister()) {
520     Abort(kToOperandIsDoubleRegisterUnimplemented);
521     return Operand((int64_t)0);
522   }
523   // Stack slots not implemented, use ToMemOperand instead.
524   UNREACHABLE();
525   return Operand((int64_t)0);
526 }
527 
528 
ArgumentsOffsetWithoutFrame(int index)529 static int ArgumentsOffsetWithoutFrame(int index) {
530   DCHECK(index < 0);
531   return -(index + 1) * kPointerSize;
532 }
533 
534 
ToMemOperand(LOperand * op) const535 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
536   DCHECK(!op->IsRegister());
537   DCHECK(!op->IsDoubleRegister());
538   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
539   if (NeedsEagerFrame()) {
540     return MemOperand(fp, StackSlotOffset(op->index()));
541   } else {
542     // Retrieve parameter without eager stack-frame relative to the
543     // stack-pointer.
544     return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
545   }
546 }
547 
548 
ToHighMemOperand(LOperand * op) const549 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
550   DCHECK(op->IsDoubleStackSlot());
551   if (NeedsEagerFrame()) {
552     // return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
553     return MemOperand(fp, StackSlotOffset(op->index()) + kIntSize);
554   } else {
555     // Retrieve parameter without eager stack-frame relative to the
556     // stack-pointer.
557     // return MemOperand(
558     //    sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
559     return MemOperand(
560         sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize);
561   }
562 }
563 
564 
WriteTranslation(LEnvironment * environment,Translation * translation)565 void LCodeGen::WriteTranslation(LEnvironment* environment,
566                                 Translation* translation) {
567   if (environment == NULL) return;
568 
569   // The translation includes one command per value in the environment.
570   int translation_size = environment->translation_size();
571 
572   WriteTranslation(environment->outer(), translation);
573   WriteTranslationFrame(environment, translation);
574 
575   int object_index = 0;
576   int dematerialized_index = 0;
577   for (int i = 0; i < translation_size; ++i) {
578     LOperand* value = environment->values()->at(i);
579     AddToTranslation(
580         environment, translation, value, environment->HasTaggedValueAt(i),
581         environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
582   }
583 }
584 
585 
AddToTranslation(LEnvironment * environment,Translation * translation,LOperand * op,bool is_tagged,bool is_uint32,int * object_index_pointer,int * dematerialized_index_pointer)586 void LCodeGen::AddToTranslation(LEnvironment* environment,
587                                 Translation* translation,
588                                 LOperand* op,
589                                 bool is_tagged,
590                                 bool is_uint32,
591                                 int* object_index_pointer,
592                                 int* dematerialized_index_pointer) {
593   if (op == LEnvironment::materialization_marker()) {
594     int object_index = (*object_index_pointer)++;
595     if (environment->ObjectIsDuplicateAt(object_index)) {
596       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
597       translation->DuplicateObject(dupe_of);
598       return;
599     }
600     int object_length = environment->ObjectLengthAt(object_index);
601     if (environment->ObjectIsArgumentsAt(object_index)) {
602       translation->BeginArgumentsObject(object_length);
603     } else {
604       translation->BeginCapturedObject(object_length);
605     }
606     int dematerialized_index = *dematerialized_index_pointer;
607     int env_offset = environment->translation_size() + dematerialized_index;
608     *dematerialized_index_pointer += object_length;
609     for (int i = 0; i < object_length; ++i) {
610       LOperand* value = environment->values()->at(env_offset + i);
611       AddToTranslation(environment,
612                        translation,
613                        value,
614                        environment->HasTaggedValueAt(env_offset + i),
615                        environment->HasUint32ValueAt(env_offset + i),
616                        object_index_pointer,
617                        dematerialized_index_pointer);
618     }
619     return;
620   }
621 
622   if (op->IsStackSlot()) {
623     int index = op->index();
624     if (index >= 0) {
625       index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
626     }
627     if (is_tagged) {
628       translation->StoreStackSlot(index);
629     } else if (is_uint32) {
630       translation->StoreUint32StackSlot(index);
631     } else {
632       translation->StoreInt32StackSlot(index);
633     }
634   } else if (op->IsDoubleStackSlot()) {
635     int index = op->index();
636     if (index >= 0) {
637       index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
638     }
639     translation->StoreDoubleStackSlot(index);
640   } else if (op->IsRegister()) {
641     Register reg = ToRegister(op);
642     if (is_tagged) {
643       translation->StoreRegister(reg);
644     } else if (is_uint32) {
645       translation->StoreUint32Register(reg);
646     } else {
647       translation->StoreInt32Register(reg);
648     }
649   } else if (op->IsDoubleRegister()) {
650     DoubleRegister reg = ToDoubleRegister(op);
651     translation->StoreDoubleRegister(reg);
652   } else if (op->IsConstantOperand()) {
653     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
654     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
655     translation->StoreLiteral(src_index);
656   } else {
657     UNREACHABLE();
658   }
659 }
660 
661 
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)662 void LCodeGen::CallCode(Handle<Code> code,
663                         RelocInfo::Mode mode,
664                         LInstruction* instr) {
665   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
666 }
667 
668 
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode)669 void LCodeGen::CallCodeGeneric(Handle<Code> code,
670                                RelocInfo::Mode mode,
671                                LInstruction* instr,
672                                SafepointMode safepoint_mode) {
673   DCHECK(instr != NULL);
674   __ Call(code, mode);
675   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
676 }
677 
678 
CallRuntime(const Runtime::Function * function,int num_arguments,LInstruction * instr,SaveFPRegsMode save_doubles)679 void LCodeGen::CallRuntime(const Runtime::Function* function,
680                            int num_arguments,
681                            LInstruction* instr,
682                            SaveFPRegsMode save_doubles) {
683   DCHECK(instr != NULL);
684 
685   __ CallRuntime(function, num_arguments, save_doubles);
686 
687   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
688 }
689 
690 
LoadContextFromDeferred(LOperand * context)691 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
692   if (context->IsRegister()) {
693     __ Move(cp, ToRegister(context));
694   } else if (context->IsStackSlot()) {
695     __ ld(cp, ToMemOperand(context));
696   } else if (context->IsConstantOperand()) {
697     HConstant* constant =
698         chunk_->LookupConstant(LConstantOperand::cast(context));
699     __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
700   } else {
701     UNREACHABLE();
702   }
703 }
704 
705 
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr,LOperand * context)706 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
707                                        int argc,
708                                        LInstruction* instr,
709                                        LOperand* context) {
710   LoadContextFromDeferred(context);
711   __ CallRuntimeSaveDoubles(id);
712   RecordSafepointWithRegisters(
713       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
714 }
715 
716 
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)717 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
718                                                     Safepoint::DeoptMode mode) {
719   environment->set_has_been_used();
720   if (!environment->HasBeenRegistered()) {
721     // Physical stack frame layout:
722     // -x ............. -4  0 ..................................... y
723     // [incoming arguments] [spill slots] [pushed outgoing arguments]
724 
725     // Layout of the environment:
726     // 0 ..................................................... size-1
727     // [parameters] [locals] [expression stack including arguments]
728 
729     // Layout of the translation:
730     // 0 ........................................................ size - 1 + 4
731     // [expression stack including arguments] [locals] [4 words] [parameters]
732     // |>------------  translation_size ------------<|
733 
734     int frame_count = 0;
735     int jsframe_count = 0;
736     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
737       ++frame_count;
738       if (e->frame_type() == JS_FUNCTION) {
739         ++jsframe_count;
740       }
741     }
742     Translation translation(&translations_, frame_count, jsframe_count, zone());
743     WriteTranslation(environment, &translation);
744     int deoptimization_index = deoptimizations_.length();
745     int pc_offset = masm()->pc_offset();
746     environment->Register(deoptimization_index,
747                           translation.index(),
748                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
749     deoptimizations_.Add(environment, zone());
750   }
751 }
752 
753 
DeoptimizeIf(Condition condition,LInstruction * instr,Deoptimizer::DeoptReason deopt_reason,Deoptimizer::BailoutType bailout_type,Register src1,const Operand & src2)754 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
755                             Deoptimizer::DeoptReason deopt_reason,
756                             Deoptimizer::BailoutType bailout_type,
757                             Register src1, const Operand& src2) {
758   LEnvironment* environment = instr->environment();
759   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
760   DCHECK(environment->HasBeenRegistered());
761   int id = environment->deoptimization_index();
762   Address entry =
763       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
764   if (entry == NULL) {
765     Abort(kBailoutWasNotPrepared);
766     return;
767   }
768 
769   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
770     Register scratch = scratch0();
771     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
772     Label no_deopt;
773     __ Push(a1, scratch);
774     __ li(scratch, Operand(count));
775     __ lw(a1, MemOperand(scratch));
776     __ Subu(a1, a1, Operand(1));
777     __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
778     __ li(a1, Operand(FLAG_deopt_every_n_times));
779     __ sw(a1, MemOperand(scratch));
780     __ Pop(a1, scratch);
781 
782     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
783     __ bind(&no_deopt);
784     __ sw(a1, MemOperand(scratch));
785     __ Pop(a1, scratch);
786   }
787 
788   if (info()->ShouldTrapOnDeopt()) {
789     Label skip;
790     if (condition != al) {
791       __ Branch(&skip, NegateCondition(condition), src1, src2);
792     }
793     __ stop("trap_on_deopt");
794     __ bind(&skip);
795   }
796 
797   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
798 
799   DCHECK(info()->IsStub() || frame_is_built_);
800   // Go through jump table if we need to handle condition, build frame, or
801   // restore caller doubles.
802   if (condition == al && frame_is_built_ &&
803       !info()->saves_caller_doubles()) {
804     DeoptComment(deopt_info);
805     __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
806     info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
807   } else {
808     Deoptimizer::JumpTableEntry* table_entry =
809         new (zone()) Deoptimizer::JumpTableEntry(
810             entry, deopt_info, bailout_type, !frame_is_built_);
811     // We often have several deopts to the same entry, reuse the last
812     // jump entry if this is the case.
813     if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
814         jump_table_.is_empty() ||
815         !table_entry->IsEquivalentTo(*jump_table_.last())) {
816       jump_table_.Add(table_entry, zone());
817     }
818     __ Branch(&jump_table_.last()->label, condition, src1, src2);
819   }
820 }
821 
822 
DeoptimizeIf(Condition condition,LInstruction * instr,Deoptimizer::DeoptReason deopt_reason,Register src1,const Operand & src2)823 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
824                             Deoptimizer::DeoptReason deopt_reason,
825                             Register src1, const Operand& src2) {
826   Deoptimizer::BailoutType bailout_type = info()->IsStub()
827       ? Deoptimizer::LAZY
828       : Deoptimizer::EAGER;
829   DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
830 }
831 
832 
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode)833 void LCodeGen::RecordSafepointWithLazyDeopt(
834     LInstruction* instr, SafepointMode safepoint_mode) {
835   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
836     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
837   } else {
838     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
839     RecordSafepointWithRegisters(
840         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
841   }
842 }
843 
844 
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)845 void LCodeGen::RecordSafepoint(
846     LPointerMap* pointers,
847     Safepoint::Kind kind,
848     int arguments,
849     Safepoint::DeoptMode deopt_mode) {
850   DCHECK(expected_safepoint_kind_ == kind);
851 
852   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
853   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
854       kind, arguments, deopt_mode);
855   for (int i = 0; i < operands->length(); i++) {
856     LOperand* pointer = operands->at(i);
857     if (pointer->IsStackSlot()) {
858       safepoint.DefinePointerSlot(pointer->index(), zone());
859     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
860       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
861     }
862   }
863 }
864 
865 
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode deopt_mode)866 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
867                                Safepoint::DeoptMode deopt_mode) {
868   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
869 }
870 
871 
RecordSafepoint(Safepoint::DeoptMode deopt_mode)872 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
873   LPointerMap empty_pointers(zone());
874   RecordSafepoint(&empty_pointers, deopt_mode);
875 }
876 
877 
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode deopt_mode)878 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
879                                             int arguments,
880                                             Safepoint::DeoptMode deopt_mode) {
881   RecordSafepoint(
882       pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
883 }
884 
885 
RecordAndWritePosition(int position)886 void LCodeGen::RecordAndWritePosition(int position) {
887   if (position == RelocInfo::kNoPosition) return;
888   masm()->positions_recorder()->RecordPosition(position);
889   masm()->positions_recorder()->WriteRecordedPositions();
890 }
891 
892 
LabelType(LLabel * label)893 static const char* LabelType(LLabel* label) {
894   if (label->is_loop_header()) return " (loop header)";
895   if (label->is_osr_entry()) return " (OSR entry)";
896   return "";
897 }
898 
899 
DoLabel(LLabel * label)900 void LCodeGen::DoLabel(LLabel* label) {
901   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
902           current_instruction_,
903           label->hydrogen_value()->id(),
904           label->block_id(),
905           LabelType(label));
906   __ bind(label->label());
907   current_block_ = label->block_id();
908   DoGap(label);
909 }
910 
911 
DoParallelMove(LParallelMove * move)912 void LCodeGen::DoParallelMove(LParallelMove* move) {
913   resolver_.Resolve(move);
914 }
915 
916 
DoGap(LGap * gap)917 void LCodeGen::DoGap(LGap* gap) {
918   for (int i = LGap::FIRST_INNER_POSITION;
919        i <= LGap::LAST_INNER_POSITION;
920        i++) {
921     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
922     LParallelMove* move = gap->GetParallelMove(inner_pos);
923     if (move != NULL) DoParallelMove(move);
924   }
925 }
926 
927 
DoInstructionGap(LInstructionGap * instr)928 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
929   DoGap(instr);
930 }
931 
932 
DoParameter(LParameter * instr)933 void LCodeGen::DoParameter(LParameter* instr) {
934   // Nothing to do.
935 }
936 
937 
DoCallStub(LCallStub * instr)938 void LCodeGen::DoCallStub(LCallStub* instr) {
939   DCHECK(ToRegister(instr->context()).is(cp));
940   DCHECK(ToRegister(instr->result()).is(v0));
941   switch (instr->hydrogen()->major_key()) {
942     case CodeStub::RegExpExec: {
943       RegExpExecStub stub(isolate());
944       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
945       break;
946     }
947     case CodeStub::SubString: {
948       SubStringStub stub(isolate());
949       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
950       break;
951     }
952     default:
953       UNREACHABLE();
954   }
955 }
956 
957 
DoUnknownOSRValue(LUnknownOSRValue * instr)958 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
959   GenerateOsrPrologue();
960 }
961 
962 
DoModByPowerOf2I(LModByPowerOf2I * instr)963 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
964   Register dividend = ToRegister(instr->dividend());
965   int32_t divisor = instr->divisor();
966   DCHECK(dividend.is(ToRegister(instr->result())));
967 
968   // Theoretically, a variation of the branch-free code for integer division by
969   // a power of 2 (calculating the remainder via an additional multiplication
970   // (which gets simplified to an 'and') and subtraction) should be faster, and
971   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
972   // indicate that positive dividends are heavily favored, so the branching
973   // version performs better.
974   HMod* hmod = instr->hydrogen();
975   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
976   Label dividend_is_not_negative, done;
977 
978   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
979     __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
980     // Note: The code below even works when right contains kMinInt.
981     __ dsubu(dividend, zero_reg, dividend);
982     __ And(dividend, dividend, Operand(mask));
983     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
984       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
985                    Operand(zero_reg));
986     }
987     __ Branch(USE_DELAY_SLOT, &done);
988     __ dsubu(dividend, zero_reg, dividend);
989   }
990 
991   __ bind(&dividend_is_not_negative);
992   __ And(dividend, dividend, Operand(mask));
993   __ bind(&done);
994 }
995 
996 
DoModByConstI(LModByConstI * instr)997 void LCodeGen::DoModByConstI(LModByConstI* instr) {
998   Register dividend = ToRegister(instr->dividend());
999   int32_t divisor = instr->divisor();
1000   Register result = ToRegister(instr->result());
1001   DCHECK(!dividend.is(result));
1002 
1003   if (divisor == 0) {
1004     DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1005     return;
1006   }
1007 
1008   __ TruncatingDiv(result, dividend, Abs(divisor));
1009   __ Dmul(result, result, Operand(Abs(divisor)));
1010   __ Dsubu(result, dividend, Operand(result));
1011 
1012   // Check for negative zero.
1013   HMod* hmod = instr->hydrogen();
1014   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1015     Label remainder_not_zero;
1016     __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
1017     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
1018                  Operand(zero_reg));
1019     __ bind(&remainder_not_zero);
1020   }
1021 }
1022 
1023 
DoModI(LModI * instr)1024 void LCodeGen::DoModI(LModI* instr) {
1025   HMod* hmod = instr->hydrogen();
1026   const Register left_reg = ToRegister(instr->left());
1027   const Register right_reg = ToRegister(instr->right());
1028   const Register result_reg = ToRegister(instr->result());
1029 
1030   // div runs in the background while we check for special cases.
1031   __ Dmod(result_reg, left_reg, right_reg);
1032 
1033   Label done;
1034   // Check for x % 0, we have to deopt in this case because we can't return a
1035   // NaN.
1036   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1037     DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
1038                  Operand(zero_reg));
1039   }
1040 
1041   // Check for kMinInt % -1, div will return kMinInt, which is not what we
1042   // want. We have to deopt if we care about -0, because we can't return that.
1043   if (hmod->CheckFlag(HValue::kCanOverflow)) {
1044     Label no_overflow_possible;
1045     __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1046     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1047       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
1048     } else {
1049       __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1050       __ Branch(USE_DELAY_SLOT, &done);
1051       __ mov(result_reg, zero_reg);
1052     }
1053     __ bind(&no_overflow_possible);
1054   }
1055 
1056   // If we care about -0, test if the dividend is <0 and the result is 0.
1057   __ Branch(&done, ge, left_reg, Operand(zero_reg));
1058 
1059   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1060     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
1061                  Operand(zero_reg));
1062   }
1063   __ bind(&done);
1064 }
1065 
1066 
DoDivByPowerOf2I(LDivByPowerOf2I * instr)1067 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1068   Register dividend = ToRegister(instr->dividend());
1069   int32_t divisor = instr->divisor();
1070   Register result = ToRegister(instr->result());
1071   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1072   DCHECK(!result.is(dividend));
1073 
1074   // Check for (0 / -x) that will produce negative zero.
1075   HDiv* hdiv = instr->hydrogen();
1076   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1077     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1078                  Operand(zero_reg));
1079   }
1080   // Check for (kMinInt / -1).
1081   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1082     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
1083   }
1084   // Deoptimize if remainder will not be 0.
1085   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1086       divisor != 1 && divisor != -1) {
1087     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1088     __ And(at, dividend, Operand(mask));
1089     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
1090   }
1091 
1092   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
1093     __ Dsubu(result, zero_reg, dividend);
1094     return;
1095   }
1096   uint16_t shift = WhichPowerOf2Abs(divisor);
1097   if (shift == 0) {
1098     __ Move(result, dividend);
1099   } else if (shift == 1) {
1100     __ dsrl32(result, dividend, 31);
1101     __ Daddu(result, dividend, Operand(result));
1102   } else {
1103     __ dsra32(result, dividend, 31);
1104     __ dsrl32(result, result, 32 - shift);
1105     __ Daddu(result, dividend, Operand(result));
1106   }
1107   if (shift > 0) __ dsra(result, result, shift);
1108   if (divisor < 0) __ Dsubu(result, zero_reg, result);
1109 }
1110 
1111 
DoDivByConstI(LDivByConstI * instr)1112 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1113   Register dividend = ToRegister(instr->dividend());
1114   int32_t divisor = instr->divisor();
1115   Register result = ToRegister(instr->result());
1116   DCHECK(!dividend.is(result));
1117 
1118   if (divisor == 0) {
1119     DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1120     return;
1121   }
1122 
1123   // Check for (0 / -x) that will produce negative zero.
1124   HDiv* hdiv = instr->hydrogen();
1125   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1126     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1127                  Operand(zero_reg));
1128   }
1129 
1130   __ TruncatingDiv(result, dividend, Abs(divisor));
1131   if (divisor < 0) __ Subu(result, zero_reg, result);
1132 
1133   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1134     __ Dmul(scratch0(), result, Operand(divisor));
1135     __ Dsubu(scratch0(), scratch0(), dividend);
1136     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
1137                  Operand(zero_reg));
1138   }
1139 }
1140 
1141 
1142 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
DoDivI(LDivI * instr)1143 void LCodeGen::DoDivI(LDivI* instr) {
1144   HBinaryOperation* hdiv = instr->hydrogen();
1145   Register dividend = ToRegister(instr->dividend());
1146   Register divisor = ToRegister(instr->divisor());
1147   const Register result = ToRegister(instr->result());
1148 
1149   // On MIPS div is asynchronous - it will run in the background while we
1150   // check for special cases.
1151   __ Div(result, dividend, divisor);
1152 
1153   // Check for x / 0.
1154   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1155     DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1156                  Operand(zero_reg));
1157   }
1158 
1159   // Check for (0 / -x) that will produce negative zero.
1160   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1161     Label left_not_zero;
1162     __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1163     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1164                  Operand(zero_reg));
1165     __ bind(&left_not_zero);
1166   }
1167 
1168   // Check for (kMinInt / -1).
1169   if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1170       !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1171     Label left_not_min_int;
1172     __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1173     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1174     __ bind(&left_not_min_int);
1175   }
1176 
1177   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1178     // Calculate remainder.
1179     Register remainder = ToRegister(instr->temp());
1180     if (kArchVariant != kMips64r6) {
1181       __ mfhi(remainder);
1182     } else {
1183       __ dmod(remainder, dividend, divisor);
1184     }
1185     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
1186                  Operand(zero_reg));
1187   }
1188 }
1189 
1190 
DoMultiplyAddD(LMultiplyAddD * instr)1191 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1192   DoubleRegister addend = ToDoubleRegister(instr->addend());
1193   DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1194   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1195 
1196   // This is computed in-place.
1197   DCHECK(addend.is(ToDoubleRegister(instr->result())));
1198 
1199   __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0());
1200 }
1201 
1202 
DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I * instr)1203 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1204   Register dividend = ToRegister(instr->dividend());
1205   Register result = ToRegister(instr->result());
1206   int32_t divisor = instr->divisor();
1207   Register scratch = result.is(dividend) ? scratch0() : dividend;
1208   DCHECK(!result.is(dividend) || !scratch.is(dividend));
1209 
1210   // If the divisor is 1, return the dividend.
1211   if (divisor == 1) {
1212     __ Move(result, dividend);
1213     return;
1214   }
1215 
1216   // If the divisor is positive, things are easy: There can be no deopts and we
1217   // can simply do an arithmetic right shift.
1218   uint16_t shift = WhichPowerOf2Abs(divisor);
1219   if (divisor > 1) {
1220     __ dsra(result, dividend, shift);
1221     return;
1222   }
1223 
1224   // If the divisor is negative, we have to negate and handle edge cases.
1225   // Dividend can be the same register as result so save the value of it
1226   // for checking overflow.
1227   __ Move(scratch, dividend);
1228 
1229   __ Dsubu(result, zero_reg, dividend);
1230   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1231     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
1232   }
1233 
1234   __ Xor(scratch, scratch, result);
1235   // Dividing by -1 is basically negation, unless we overflow.
1236   if (divisor == -1) {
1237     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1238       DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, result, Operand(kMaxInt));
1239     }
1240     return;
1241   }
1242 
1243   // If the negation could not overflow, simply shifting is OK.
1244   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1245     __ dsra(result, result, shift);
1246     return;
1247   }
1248 
1249   Label no_overflow, done;
1250   __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1251   __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE);
1252   __ Branch(&done);
1253   __ bind(&no_overflow);
1254   __ dsra(result, result, shift);
1255   __ bind(&done);
1256 }
1257 
1258 
DoFlooringDivByConstI(LFlooringDivByConstI * instr)1259 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1260   Register dividend = ToRegister(instr->dividend());
1261   int32_t divisor = instr->divisor();
1262   Register result = ToRegister(instr->result());
1263   DCHECK(!dividend.is(result));
1264 
1265   if (divisor == 0) {
1266     DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1267     return;
1268   }
1269 
1270   // Check for (0 / -x) that will produce negative zero.
1271   HMathFloorOfDiv* hdiv = instr->hydrogen();
1272   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1273     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1274                  Operand(zero_reg));
1275   }
1276 
1277   // Easy case: We need no dynamic check for the dividend and the flooring
1278   // division is the same as the truncating division.
1279   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1280       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1281     __ TruncatingDiv(result, dividend, Abs(divisor));
1282     if (divisor < 0) __ Dsubu(result, zero_reg, result);
1283     return;
1284   }
1285 
1286   // In the general case we may need to adjust before and after the truncating
1287   // division to get a flooring division.
1288   Register temp = ToRegister(instr->temp());
1289   DCHECK(!temp.is(dividend) && !temp.is(result));
1290   Label needs_adjustment, done;
1291   __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1292             dividend, Operand(zero_reg));
1293   __ TruncatingDiv(result, dividend, Abs(divisor));
1294   if (divisor < 0) __ Dsubu(result, zero_reg, result);
1295   __ jmp(&done);
1296   __ bind(&needs_adjustment);
1297   __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1298   __ TruncatingDiv(result, temp, Abs(divisor));
1299   if (divisor < 0) __ Dsubu(result, zero_reg, result);
1300   __ Dsubu(result, result, Operand(1));
1301   __ bind(&done);
1302 }
1303 
1304 
1305 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
DoFlooringDivI(LFlooringDivI * instr)1306 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1307   HBinaryOperation* hdiv = instr->hydrogen();
1308   Register dividend = ToRegister(instr->dividend());
1309   Register divisor = ToRegister(instr->divisor());
1310   const Register result = ToRegister(instr->result());
1311 
1312   // On MIPS div is asynchronous - it will run in the background while we
1313   // check for special cases.
1314   __ Ddiv(result, dividend, divisor);
1315 
1316   // Check for x / 0.
1317   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1318     DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1319                  Operand(zero_reg));
1320   }
1321 
1322   // Check for (0 / -x) that will produce negative zero.
1323   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1324     Label left_not_zero;
1325     __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1326     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1327                  Operand(zero_reg));
1328     __ bind(&left_not_zero);
1329   }
1330 
1331   // Check for (kMinInt / -1).
1332   if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1333       !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1334     Label left_not_min_int;
1335     __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1336     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1337     __ bind(&left_not_min_int);
1338   }
1339 
1340   // We performed a truncating division. Correct the result if necessary.
1341   Label done;
1342   Register remainder = scratch0();
1343   if (kArchVariant != kMips64r6) {
1344     __ mfhi(remainder);
1345   } else {
1346     __ dmod(remainder, dividend, divisor);
1347   }
1348   __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1349   __ Xor(remainder, remainder, Operand(divisor));
1350   __ Branch(&done, ge, remainder, Operand(zero_reg));
1351   __ Dsubu(result, result, Operand(1));
1352   __ bind(&done);
1353 }
1354 
1355 
DoMulS(LMulS * instr)1356 void LCodeGen::DoMulS(LMulS* instr) {
1357   Register scratch = scratch0();
1358   Register result = ToRegister(instr->result());
1359   // Note that result may alias left.
1360   Register left = ToRegister(instr->left());
1361   LOperand* right_op = instr->right();
1362 
1363   bool bailout_on_minus_zero =
1364     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1365   bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1366 
1367   if (right_op->IsConstantOperand()) {
1368     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1369 
1370     if (bailout_on_minus_zero && (constant < 0)) {
1371       // The case of a null constant will be handled separately.
1372       // If constant is negative and left is null, the result should be -0.
1373       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
1374     }
1375 
1376     switch (constant) {
1377       case -1:
1378         if (overflow) {
1379           __ DsubuAndCheckForOverflow(result, zero_reg, left, scratch);
1380           DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
1381                        Operand(zero_reg));
1382         } else {
1383           __ Dsubu(result, zero_reg, left);
1384         }
1385         break;
1386       case 0:
1387         if (bailout_on_minus_zero) {
1388           // If left is strictly negative and the constant is null, the
1389           // result is -0. Deoptimize if required, otherwise return 0.
1390           DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
1391                        Operand(zero_reg));
1392         }
1393         __ mov(result, zero_reg);
1394         break;
1395       case 1:
1396         // Nothing to do.
1397         __ Move(result, left);
1398         break;
1399       default:
1400         // Multiplying by powers of two and powers of two plus or minus
1401         // one can be done faster with shifted operands.
1402         // For other constants we emit standard code.
1403         int32_t mask = constant >> 31;
1404         uint32_t constant_abs = (constant + mask) ^ mask;
1405 
1406         if (base::bits::IsPowerOfTwo32(constant_abs)) {
1407           int32_t shift = WhichPowerOf2(constant_abs);
1408           __ dsll(result, left, shift);
1409           // Correct the sign of the result if the constant is negative.
1410           if (constant < 0) __ Dsubu(result, zero_reg, result);
1411         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1412           int32_t shift = WhichPowerOf2(constant_abs - 1);
1413           __ dsll(scratch, left, shift);
1414           __ Daddu(result, scratch, left);
1415           // Correct the sign of the result if the constant is negative.
1416           if (constant < 0) __ Dsubu(result, zero_reg, result);
1417         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1418           int32_t shift = WhichPowerOf2(constant_abs + 1);
1419           __ dsll(scratch, left, shift);
1420           __ Dsubu(result, scratch, left);
1421           // Correct the sign of the result if the constant is negative.
1422           if (constant < 0) __ Dsubu(result, zero_reg, result);
1423         } else {
1424           // Generate standard code.
1425           __ li(at, constant);
1426           __ Dmul(result, left, at);
1427         }
1428     }
1429   } else {
1430     DCHECK(right_op->IsRegister());
1431     Register right = ToRegister(right_op);
1432 
1433     if (overflow) {
1434       // hi:lo = left * right.
1435       __ Dmulh(result, left, right);
1436       __ dsra32(scratch, result, 0);
1437       __ sra(at, result, 31);
1438       __ SmiTag(result);
1439       DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
1440     } else {
1441       __ SmiUntag(result, left);
1442       __ dmul(result, result, right);
1443     }
1444 
1445     if (bailout_on_minus_zero) {
1446       Label done;
1447       __ Xor(at, left, right);
1448       __ Branch(&done, ge, at, Operand(zero_reg));
1449       // Bail out if the result is minus zero.
1450       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
1451                    Operand(zero_reg));
1452       __ bind(&done);
1453     }
1454   }
1455 }
1456 
1457 
DoMulI(LMulI * instr)1458 void LCodeGen::DoMulI(LMulI* instr) {
1459   Register scratch = scratch0();
1460   Register result = ToRegister(instr->result());
1461   // Note that result may alias left.
1462   Register left = ToRegister(instr->left());
1463   LOperand* right_op = instr->right();
1464 
1465   bool bailout_on_minus_zero =
1466       instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1467   bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1468 
1469   if (right_op->IsConstantOperand()) {
1470     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1471 
1472     if (bailout_on_minus_zero && (constant < 0)) {
1473       // The case of a null constant will be handled separately.
1474       // If constant is negative and left is null, the result should be -0.
1475       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
1476     }
1477 
1478     switch (constant) {
1479       case -1:
1480         if (overflow) {
1481           __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1482           DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
1483                        Operand(zero_reg));
1484         } else {
1485           __ Subu(result, zero_reg, left);
1486         }
1487         break;
1488       case 0:
1489         if (bailout_on_minus_zero) {
1490           // If left is strictly negative and the constant is null, the
1491           // result is -0. Deoptimize if required, otherwise return 0.
1492           DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
1493                        Operand(zero_reg));
1494         }
1495         __ mov(result, zero_reg);
1496         break;
1497       case 1:
1498         // Nothing to do.
1499         __ Move(result, left);
1500         break;
1501       default:
1502         // Multiplying by powers of two and powers of two plus or minus
1503         // one can be done faster with shifted operands.
1504         // For other constants we emit standard code.
1505         int32_t mask = constant >> 31;
1506         uint32_t constant_abs = (constant + mask) ^ mask;
1507 
1508         if (base::bits::IsPowerOfTwo32(constant_abs)) {
1509           int32_t shift = WhichPowerOf2(constant_abs);
1510           __ sll(result, left, shift);
1511           // Correct the sign of the result if the constant is negative.
1512           if (constant < 0) __ Subu(result, zero_reg, result);
1513         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1514           int32_t shift = WhichPowerOf2(constant_abs - 1);
1515           __ sll(scratch, left, shift);
1516           __ addu(result, scratch, left);
1517           // Correct the sign of the result if the constant is negative.
1518           if (constant < 0) __ Subu(result, zero_reg, result);
1519         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1520           int32_t shift = WhichPowerOf2(constant_abs + 1);
1521           __ sll(scratch, left, shift);
1522           __ Subu(result, scratch, left);
1523           // Correct the sign of the result if the constant is negative.
1524           if (constant < 0) __ Subu(result, zero_reg, result);
1525         } else {
1526           // Generate standard code.
1527           __ li(at, constant);
1528           __ Mul(result, left, at);
1529         }
1530     }
1531 
1532   } else {
1533     DCHECK(right_op->IsRegister());
1534     Register right = ToRegister(right_op);
1535 
1536     if (overflow) {
1537       // hi:lo = left * right.
1538       __ Dmul(result, left, right);
1539       __ dsra32(scratch, result, 0);
1540       __ sra(at, result, 31);
1541 
1542       DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
1543     } else {
1544       __ mul(result, left, right);
1545     }
1546 
1547     if (bailout_on_minus_zero) {
1548       Label done;
1549       __ Xor(at, left, right);
1550       __ Branch(&done, ge, at, Operand(zero_reg));
1551       // Bail out if the result is minus zero.
1552       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
1553                    Operand(zero_reg));
1554       __ bind(&done);
1555     }
1556   }
1557 }
1558 
1559 
DoBitI(LBitI * instr)1560 void LCodeGen::DoBitI(LBitI* instr) {
1561   LOperand* left_op = instr->left();
1562   LOperand* right_op = instr->right();
1563   DCHECK(left_op->IsRegister());
1564   Register left = ToRegister(left_op);
1565   Register result = ToRegister(instr->result());
1566   Operand right(no_reg);
1567 
1568   if (right_op->IsStackSlot()) {
1569     right = Operand(EmitLoadRegister(right_op, at));
1570   } else {
1571     DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1572     right = ToOperand(right_op);
1573   }
1574 
1575   switch (instr->op()) {
1576     case Token::BIT_AND:
1577       __ And(result, left, right);
1578       break;
1579     case Token::BIT_OR:
1580       __ Or(result, left, right);
1581       break;
1582     case Token::BIT_XOR:
1583       if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1584         __ Nor(result, zero_reg, left);
1585       } else {
1586         __ Xor(result, left, right);
1587       }
1588       break;
1589     default:
1590       UNREACHABLE();
1591       break;
1592   }
1593 }
1594 
1595 
DoShiftI(LShiftI * instr)1596 void LCodeGen::DoShiftI(LShiftI* instr) {
1597   // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1598   // result may alias either of them.
1599   LOperand* right_op = instr->right();
1600   Register left = ToRegister(instr->left());
1601   Register result = ToRegister(instr->result());
1602 
1603   if (right_op->IsRegister()) {
1604     // No need to mask the right operand on MIPS, it is built into the variable
1605     // shift instructions.
1606     switch (instr->op()) {
1607       case Token::ROR:
1608         __ Ror(result, left, Operand(ToRegister(right_op)));
1609         break;
1610       case Token::SAR:
1611         __ srav(result, left, ToRegister(right_op));
1612         break;
1613       case Token::SHR:
1614         __ srlv(result, left, ToRegister(right_op));
1615         if (instr->can_deopt()) {
1616            // TODO(yy): (-1) >>> 0. anything else?
1617           DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
1618                        Operand(zero_reg));
1619           DeoptimizeIf(gt, instr, Deoptimizer::kNegativeValue, result,
1620                        Operand(kMaxInt));
1621         }
1622         break;
1623       case Token::SHL:
1624         __ sllv(result, left, ToRegister(right_op));
1625         break;
1626       default:
1627         UNREACHABLE();
1628         break;
1629     }
1630   } else {
1631     // Mask the right_op operand.
1632     int value = ToInteger32(LConstantOperand::cast(right_op));
1633     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1634     switch (instr->op()) {
1635       case Token::ROR:
1636         if (shift_count != 0) {
1637           __ Ror(result, left, Operand(shift_count));
1638         } else {
1639           __ Move(result, left);
1640         }
1641         break;
1642       case Token::SAR:
1643         if (shift_count != 0) {
1644           __ sra(result, left, shift_count);
1645         } else {
1646           __ Move(result, left);
1647         }
1648         break;
1649       case Token::SHR:
1650         if (shift_count != 0) {
1651           __ srl(result, left, shift_count);
1652         } else {
1653           if (instr->can_deopt()) {
1654             __ And(at, left, Operand(0x80000000));
1655             DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
1656                          Operand(zero_reg));
1657           }
1658           __ Move(result, left);
1659         }
1660         break;
1661       case Token::SHL:
1662         if (shift_count != 0) {
1663           if (instr->hydrogen_value()->representation().IsSmi()) {
1664             __ dsll(result, left, shift_count);
1665           } else {
1666             __ sll(result, left, shift_count);
1667           }
1668         } else {
1669           __ Move(result, left);
1670         }
1671         break;
1672       default:
1673         UNREACHABLE();
1674         break;
1675     }
1676   }
1677 }
1678 
1679 
DoSubS(LSubS * instr)1680 void LCodeGen::DoSubS(LSubS* instr) {
1681   LOperand* left = instr->left();
1682   LOperand* right = instr->right();
1683   LOperand* result = instr->result();
1684   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1685 
1686   if (!can_overflow) {
1687     DCHECK(right->IsRegister() || right->IsConstantOperand());
1688     __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
1689   } else {  // can_overflow.
1690     Register overflow = scratch0();
1691     Register scratch = scratch1();
1692     DCHECK(right->IsRegister() || right->IsConstantOperand());
1693     __ DsubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
1694                                 ToOperand(right), overflow, scratch);
1695     DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
1696                  Operand(zero_reg));
1697   }
1698 }
1699 
1700 
DoSubI(LSubI * instr)1701 void LCodeGen::DoSubI(LSubI* instr) {
1702   LOperand* left = instr->left();
1703   LOperand* right = instr->right();
1704   LOperand* result = instr->result();
1705   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1706 
1707   if (!can_overflow) {
1708     DCHECK(right->IsRegister() || right->IsConstantOperand());
1709     __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1710   } else {  // can_overflow.
1711     Register overflow = scratch0();
1712     Register scratch = scratch1();
1713     DCHECK(right->IsRegister() || right->IsConstantOperand());
1714     __ SubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
1715                                ToOperand(right), overflow, scratch);
1716     DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
1717                  Operand(zero_reg));
1718   }
1719 }
1720 
1721 
DoConstantI(LConstantI * instr)1722 void LCodeGen::DoConstantI(LConstantI* instr) {
1723   __ li(ToRegister(instr->result()), Operand(instr->value()));
1724 }
1725 
1726 
DoConstantS(LConstantS * instr)1727 void LCodeGen::DoConstantS(LConstantS* instr) {
1728   __ li(ToRegister(instr->result()), Operand(instr->value()));
1729 }
1730 
1731 
DoConstantD(LConstantD * instr)1732 void LCodeGen::DoConstantD(LConstantD* instr) {
1733   DCHECK(instr->result()->IsDoubleRegister());
1734   DoubleRegister result = ToDoubleRegister(instr->result());
1735   double v = instr->value();
1736   __ Move(result, v);
1737 }
1738 
1739 
DoConstantE(LConstantE * instr)1740 void LCodeGen::DoConstantE(LConstantE* instr) {
1741   __ li(ToRegister(instr->result()), Operand(instr->value()));
1742 }
1743 
1744 
DoConstantT(LConstantT * instr)1745 void LCodeGen::DoConstantT(LConstantT* instr) {
1746   Handle<Object> object = instr->value(isolate());
1747   AllowDeferredHandleDereference smi_check;
1748   __ li(ToRegister(instr->result()), object);
1749 }
1750 
1751 
DoMapEnumLength(LMapEnumLength * instr)1752 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1753   Register result = ToRegister(instr->result());
1754   Register map = ToRegister(instr->value());
1755   __ EnumLength(result, map);
1756 }
1757 
1758 
BuildSeqStringOperand(Register string,LOperand * index,String::Encoding encoding)1759 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1760                                            LOperand* index,
1761                                            String::Encoding encoding) {
1762   if (index->IsConstantOperand()) {
1763     int offset = ToInteger32(LConstantOperand::cast(index));
1764     if (encoding == String::TWO_BYTE_ENCODING) {
1765       offset *= kUC16Size;
1766     }
1767     STATIC_ASSERT(kCharSize == 1);
1768     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1769   }
1770   Register scratch = scratch0();
1771   DCHECK(!scratch.is(string));
1772   DCHECK(!scratch.is(ToRegister(index)));
1773   if (encoding == String::ONE_BYTE_ENCODING) {
1774     __ Daddu(scratch, string, ToRegister(index));
1775   } else {
1776     STATIC_ASSERT(kUC16Size == 2);
1777     __ dsll(scratch, ToRegister(index), 1);
1778     __ Daddu(scratch, string, scratch);
1779   }
1780   return FieldMemOperand(scratch, SeqString::kHeaderSize);
1781 }
1782 
1783 
DoSeqStringGetChar(LSeqStringGetChar * instr)1784 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1785   String::Encoding encoding = instr->hydrogen()->encoding();
1786   Register string = ToRegister(instr->string());
1787   Register result = ToRegister(instr->result());
1788 
1789   if (FLAG_debug_code) {
1790     Register scratch = scratch0();
1791     __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1792     __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1793 
1794     __ And(scratch, scratch,
1795            Operand(kStringRepresentationMask | kStringEncodingMask));
1796     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1797     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1798     __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1799                                 ? one_byte_seq_type : two_byte_seq_type));
1800     __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1801   }
1802 
1803   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1804   if (encoding == String::ONE_BYTE_ENCODING) {
1805     __ lbu(result, operand);
1806   } else {
1807     __ lhu(result, operand);
1808   }
1809 }
1810 
1811 
DoSeqStringSetChar(LSeqStringSetChar * instr)1812 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1813   String::Encoding encoding = instr->hydrogen()->encoding();
1814   Register string = ToRegister(instr->string());
1815   Register value = ToRegister(instr->value());
1816 
1817   if (FLAG_debug_code) {
1818     Register scratch = scratch0();
1819     Register index = ToRegister(instr->index());
1820     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1821     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1822     int encoding_mask =
1823         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1824         ? one_byte_seq_type : two_byte_seq_type;
1825     __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1826   }
1827 
1828   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1829   if (encoding == String::ONE_BYTE_ENCODING) {
1830     __ sb(value, operand);
1831   } else {
1832     __ sh(value, operand);
1833   }
1834 }
1835 
1836 
DoAddE(LAddE * instr)1837 void LCodeGen::DoAddE(LAddE* instr) {
1838   LOperand* result = instr->result();
1839   LOperand* left = instr->left();
1840   LOperand* right = instr->right();
1841 
1842   DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1843   DCHECK(right->IsRegister() || right->IsConstantOperand());
1844   __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
1845 }
1846 
1847 
DoAddS(LAddS * instr)1848 void LCodeGen::DoAddS(LAddS* instr) {
1849   LOperand* left = instr->left();
1850   LOperand* right = instr->right();
1851   LOperand* result = instr->result();
1852   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1853 
1854   if (!can_overflow) {
1855     DCHECK(right->IsRegister() || right->IsConstantOperand());
1856     __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
1857   } else {  // can_overflow.
1858     Register overflow = scratch0();
1859     Register scratch = scratch1();
1860     DCHECK(right->IsRegister() || right->IsConstantOperand());
1861     __ DadduAndCheckForOverflow(ToRegister(result), ToRegister(left),
1862                                 ToOperand(right), overflow, scratch);
1863     DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
1864                  Operand(zero_reg));
1865   }
1866 }
1867 
1868 
DoAddI(LAddI * instr)1869 void LCodeGen::DoAddI(LAddI* instr) {
1870   LOperand* left = instr->left();
1871   LOperand* right = instr->right();
1872   LOperand* result = instr->result();
1873   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1874 
1875   if (!can_overflow) {
1876     DCHECK(right->IsRegister() || right->IsConstantOperand());
1877     __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1878   } else {  // can_overflow.
1879     Register overflow = scratch0();
1880     Register scratch = scratch1();
1881     DCHECK(right->IsRegister() || right->IsConstantOperand());
1882     __ AdduAndCheckForOverflow(ToRegister(result), ToRegister(left),
1883                                ToOperand(right), overflow, scratch);
1884     DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
1885                  Operand(zero_reg));
1886   }
1887 }
1888 
1889 
DoMathMinMax(LMathMinMax * instr)1890 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1891   LOperand* left = instr->left();
1892   LOperand* right = instr->right();
1893   HMathMinMax::Operation operation = instr->hydrogen()->operation();
1894   Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1895   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1896     Register left_reg = ToRegister(left);
1897     Register right_reg = EmitLoadRegister(right, scratch0());
1898     Register result_reg = ToRegister(instr->result());
1899     Label return_right, done;
1900     Register scratch = scratch1();
1901     __ Slt(scratch, left_reg, Operand(right_reg));
1902     if (condition == ge) {
1903      __  Movz(result_reg, left_reg, scratch);
1904      __  Movn(result_reg, right_reg, scratch);
1905     } else {
1906      DCHECK(condition == le);
1907      __  Movn(result_reg, left_reg, scratch);
1908      __  Movz(result_reg, right_reg, scratch);
1909     }
1910   } else {
1911     DCHECK(instr->hydrogen()->representation().IsDouble());
1912     FPURegister left_reg = ToDoubleRegister(left);
1913     FPURegister right_reg = ToDoubleRegister(right);
1914     FPURegister result_reg = ToDoubleRegister(instr->result());
1915     Label check_nan_left, check_zero, return_left, return_right, done;
1916     __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1917     __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1918     __ Branch(&return_right);
1919 
1920     __ bind(&check_zero);
1921     // left == right != 0.
1922     __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1923     // At this point, both left and right are either 0 or -0.
1924     if (operation == HMathMinMax::kMathMin) {
1925       __ neg_d(left_reg, left_reg);
1926       __ sub_d(result_reg, left_reg, right_reg);
1927       __ neg_d(result_reg, result_reg);
1928     } else {
1929       __ add_d(result_reg, left_reg, right_reg);
1930     }
1931     __ Branch(&done);
1932 
1933     __ bind(&check_nan_left);
1934     // left == NaN.
1935     __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1936     __ bind(&return_right);
1937     if (!right_reg.is(result_reg)) {
1938       __ mov_d(result_reg, right_reg);
1939     }
1940     __ Branch(&done);
1941 
1942     __ bind(&return_left);
1943     if (!left_reg.is(result_reg)) {
1944       __ mov_d(result_reg, left_reg);
1945     }
1946     __ bind(&done);
1947   }
1948 }
1949 
1950 
DoArithmeticD(LArithmeticD * instr)1951 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1952   DoubleRegister left = ToDoubleRegister(instr->left());
1953   DoubleRegister right = ToDoubleRegister(instr->right());
1954   DoubleRegister result = ToDoubleRegister(instr->result());
1955   switch (instr->op()) {
1956     case Token::ADD:
1957       __ add_d(result, left, right);
1958       break;
1959     case Token::SUB:
1960       __ sub_d(result, left, right);
1961       break;
1962     case Token::MUL:
1963       __ mul_d(result, left, right);
1964       break;
1965     case Token::DIV:
1966       __ div_d(result, left, right);
1967       break;
1968     case Token::MOD: {
1969       // Save a0-a3 on the stack.
1970       RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1971       __ MultiPush(saved_regs);
1972 
1973       __ PrepareCallCFunction(0, 2, scratch0());
1974       __ MovToFloatParameters(left, right);
1975       __ CallCFunction(
1976           ExternalReference::mod_two_doubles_operation(isolate()),
1977           0, 2);
1978       // Move the result in the double result register.
1979       __ MovFromFloatResult(result);
1980 
1981       // Restore saved register.
1982       __ MultiPop(saved_regs);
1983       break;
1984     }
1985     default:
1986       UNREACHABLE();
1987       break;
1988   }
1989 }
1990 
1991 
DoArithmeticT(LArithmeticT * instr)1992 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1993   DCHECK(ToRegister(instr->context()).is(cp));
1994   DCHECK(ToRegister(instr->left()).is(a1));
1995   DCHECK(ToRegister(instr->right()).is(a0));
1996   DCHECK(ToRegister(instr->result()).is(v0));
1997 
1998   Handle<Code> code =
1999       CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
2000   CallCode(code, RelocInfo::CODE_TARGET, instr);
2001   // Other arch use a nop here, to signal that there is no inlined
2002   // patchable code. Mips does not need the nop, since our marker
2003   // instruction (andi zero_reg) will never be used in normal code.
2004 }
2005 
2006 
2007 template<class InstrType>
EmitBranch(InstrType instr,Condition condition,Register src1,const Operand & src2)2008 void LCodeGen::EmitBranch(InstrType instr,
2009                           Condition condition,
2010                           Register src1,
2011                           const Operand& src2) {
2012   int left_block = instr->TrueDestination(chunk_);
2013   int right_block = instr->FalseDestination(chunk_);
2014 
2015   int next_block = GetNextEmittedBlock();
2016   if (right_block == left_block || condition == al) {
2017     EmitGoto(left_block);
2018   } else if (left_block == next_block) {
2019     __ Branch(chunk_->GetAssemblyLabel(right_block),
2020               NegateCondition(condition), src1, src2);
2021   } else if (right_block == next_block) {
2022     __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2023   } else {
2024     __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2025     __ Branch(chunk_->GetAssemblyLabel(right_block));
2026   }
2027 }
2028 
2029 
2030 template<class InstrType>
EmitBranchF(InstrType instr,Condition condition,FPURegister src1,FPURegister src2)2031 void LCodeGen::EmitBranchF(InstrType instr,
2032                            Condition condition,
2033                            FPURegister src1,
2034                            FPURegister src2) {
2035   int right_block = instr->FalseDestination(chunk_);
2036   int left_block = instr->TrueDestination(chunk_);
2037 
2038   int next_block = GetNextEmittedBlock();
2039   if (right_block == left_block) {
2040     EmitGoto(left_block);
2041   } else if (left_block == next_block) {
2042     __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
2043                NegateFpuCondition(condition), src1, src2);
2044   } else if (right_block == next_block) {
2045     __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2046                condition, src1, src2);
2047   } else {
2048     __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2049                condition, src1, src2);
2050     __ Branch(chunk_->GetAssemblyLabel(right_block));
2051   }
2052 }
2053 
2054 
2055 template <class InstrType>
EmitTrueBranch(InstrType instr,Condition condition,Register src1,const Operand & src2)2056 void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
2057                               Register src1, const Operand& src2) {
2058   int true_block = instr->TrueDestination(chunk_);
2059   __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
2060 }
2061 
2062 
2063 template <class InstrType>
EmitFalseBranch(InstrType instr,Condition condition,Register src1,const Operand & src2)2064 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
2065                                Register src1, const Operand& src2) {
2066   int false_block = instr->FalseDestination(chunk_);
2067   __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
2068 }
2069 
2070 
2071 template<class InstrType>
EmitFalseBranchF(InstrType instr,Condition condition,FPURegister src1,FPURegister src2)2072 void LCodeGen::EmitFalseBranchF(InstrType instr,
2073                                 Condition condition,
2074                                 FPURegister src1,
2075                                 FPURegister src2) {
2076   int false_block = instr->FalseDestination(chunk_);
2077   __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2078              condition, src1, src2);
2079 }
2080 
2081 
DoDebugBreak(LDebugBreak * instr)2082 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2083   __ stop("LDebugBreak");
2084 }
2085 
2086 
DoBranch(LBranch * instr)2087 void LCodeGen::DoBranch(LBranch* instr) {
2088   Representation r = instr->hydrogen()->value()->representation();
2089   if (r.IsInteger32() || r.IsSmi()) {
2090     DCHECK(!info()->IsStub());
2091     Register reg = ToRegister(instr->value());
2092     EmitBranch(instr, ne, reg, Operand(zero_reg));
2093   } else if (r.IsDouble()) {
2094     DCHECK(!info()->IsStub());
2095     DoubleRegister reg = ToDoubleRegister(instr->value());
2096     // Test the double value. Zero and NaN are false.
2097     EmitBranchF(instr, ogl, reg, kDoubleRegZero);
2098   } else {
2099     DCHECK(r.IsTagged());
2100     Register reg = ToRegister(instr->value());
2101     HType type = instr->hydrogen()->value()->type();
2102     if (type.IsBoolean()) {
2103       DCHECK(!info()->IsStub());
2104       __ LoadRoot(at, Heap::kTrueValueRootIndex);
2105       EmitBranch(instr, eq, reg, Operand(at));
2106     } else if (type.IsSmi()) {
2107       DCHECK(!info()->IsStub());
2108       EmitBranch(instr, ne, reg, Operand(zero_reg));
2109     } else if (type.IsJSArray()) {
2110       DCHECK(!info()->IsStub());
2111       EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2112     } else if (type.IsHeapNumber()) {
2113       DCHECK(!info()->IsStub());
2114       DoubleRegister dbl_scratch = double_scratch0();
2115       __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2116       // Test the double value. Zero and NaN are false.
2117       EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
2118     } else if (type.IsString()) {
2119       DCHECK(!info()->IsStub());
2120       __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
2121       EmitBranch(instr, ne, at, Operand(zero_reg));
2122     } else {
2123       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2124       // Avoid deopts in the case where we've never executed this path before.
2125       if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2126 
2127       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2128         // undefined -> false.
2129         __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2130         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2131       }
2132       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2133         // Boolean -> its value.
2134         __ LoadRoot(at, Heap::kTrueValueRootIndex);
2135         __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2136         __ LoadRoot(at, Heap::kFalseValueRootIndex);
2137         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2138       }
2139       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2140         // 'null' -> false.
2141         __ LoadRoot(at, Heap::kNullValueRootIndex);
2142         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2143       }
2144 
2145       if (expected.Contains(ToBooleanStub::SMI)) {
2146         // Smis: 0 -> false, all other -> true.
2147         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2148         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2149       } else if (expected.NeedsMap()) {
2150         // If we need a map later and have a Smi -> deopt.
2151         __ SmiTst(reg, at);
2152         DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
2153       }
2154 
2155       const Register map = scratch0();
2156       if (expected.NeedsMap()) {
2157         __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2158         if (expected.CanBeUndetectable()) {
2159           // Undetectable -> false.
2160           __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2161           __ And(at, at, Operand(1 << Map::kIsUndetectable));
2162           __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2163         }
2164       }
2165 
2166       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2167         // spec object -> true.
2168         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2169         __ Branch(instr->TrueLabel(chunk_),
2170                   ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
2171       }
2172 
2173       if (expected.Contains(ToBooleanStub::STRING)) {
2174         // String value -> false iff empty.
2175         Label not_string;
2176         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2177         __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2178         __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
2179         __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2180         __ Branch(instr->FalseLabel(chunk_));
2181         __ bind(&not_string);
2182       }
2183 
2184       if (expected.Contains(ToBooleanStub::SYMBOL)) {
2185         // Symbol value -> true.
2186         const Register scratch = scratch1();
2187         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2188         __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2189       }
2190 
2191       if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2192         // SIMD value -> true.
2193         const Register scratch = scratch1();
2194         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2195         __ Branch(instr->TrueLabel(chunk_), eq, scratch,
2196                   Operand(SIMD128_VALUE_TYPE));
2197       }
2198 
2199       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2200         // heap number -> false iff +0, -0, or NaN.
2201         DoubleRegister dbl_scratch = double_scratch0();
2202         Label not_heap_number;
2203         __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2204         __ Branch(&not_heap_number, ne, map, Operand(at));
2205         __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2206         __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2207                    ne, dbl_scratch, kDoubleRegZero);
2208         // Falls through if dbl_scratch == 0.
2209         __ Branch(instr->FalseLabel(chunk_));
2210         __ bind(&not_heap_number);
2211       }
2212 
2213       if (!expected.IsGeneric()) {
2214         // We've seen something for the first time -> deopt.
2215         // This can only happen if we are not generic already.
2216         DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
2217                      Operand(zero_reg));
2218       }
2219     }
2220   }
2221 }
2222 
2223 
EmitGoto(int block)2224 void LCodeGen::EmitGoto(int block) {
2225   if (!IsNextEmittedBlock(block)) {
2226     __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2227   }
2228 }
2229 
2230 
DoGoto(LGoto * instr)2231 void LCodeGen::DoGoto(LGoto* instr) {
2232   EmitGoto(instr->block_id());
2233 }
2234 
2235 
TokenToCondition(Token::Value op,bool is_unsigned)2236 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2237   Condition cond = kNoCondition;
2238   switch (op) {
2239     case Token::EQ:
2240     case Token::EQ_STRICT:
2241       cond = eq;
2242       break;
2243     case Token::NE:
2244     case Token::NE_STRICT:
2245       cond = ne;
2246       break;
2247     case Token::LT:
2248       cond = is_unsigned ? lo : lt;
2249       break;
2250     case Token::GT:
2251       cond = is_unsigned ? hi : gt;
2252       break;
2253     case Token::LTE:
2254       cond = is_unsigned ? ls : le;
2255       break;
2256     case Token::GTE:
2257       cond = is_unsigned ? hs : ge;
2258       break;
2259     case Token::IN:
2260     case Token::INSTANCEOF:
2261     default:
2262       UNREACHABLE();
2263   }
2264   return cond;
2265 }
2266 
2267 
DoCompareNumericAndBranch(LCompareNumericAndBranch * instr)2268 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2269   LOperand* left = instr->left();
2270   LOperand* right = instr->right();
2271   bool is_unsigned =
2272       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2273       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2274   Condition cond = TokenToCondition(instr->op(), is_unsigned);
2275 
2276   if (left->IsConstantOperand() && right->IsConstantOperand()) {
2277     // We can statically evaluate the comparison.
2278     double left_val = ToDouble(LConstantOperand::cast(left));
2279     double right_val = ToDouble(LConstantOperand::cast(right));
2280     int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2281         instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2282     EmitGoto(next_block);
2283   } else {
2284     if (instr->is_double()) {
2285       // Compare left and right as doubles and load the
2286       // resulting flags into the normal status register.
2287       FPURegister left_reg = ToDoubleRegister(left);
2288       FPURegister right_reg = ToDoubleRegister(right);
2289 
2290       // If a NaN is involved, i.e. the result is unordered,
2291       // jump to false block label.
2292       __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2293                  left_reg, right_reg);
2294 
2295       EmitBranchF(instr, cond, left_reg, right_reg);
2296     } else {
2297       Register cmp_left;
2298       Operand cmp_right = Operand((int64_t)0);
2299       if (right->IsConstantOperand()) {
2300         int32_t value = ToInteger32(LConstantOperand::cast(right));
2301         if (instr->hydrogen_value()->representation().IsSmi()) {
2302           cmp_left = ToRegister(left);
2303           cmp_right = Operand(Smi::FromInt(value));
2304         } else {
2305           cmp_left = ToRegister(left);
2306           cmp_right = Operand(value);
2307         }
2308       } else if (left->IsConstantOperand()) {
2309         int32_t value = ToInteger32(LConstantOperand::cast(left));
2310         if (instr->hydrogen_value()->representation().IsSmi()) {
2311           cmp_left = ToRegister(right);
2312           cmp_right = Operand(Smi::FromInt(value));
2313         } else {
2314           cmp_left = ToRegister(right);
2315           cmp_right = Operand(value);
2316         }
2317         // We commuted the operands, so commute the condition.
2318         cond = CommuteCondition(cond);
2319       } else {
2320         cmp_left = ToRegister(left);
2321         cmp_right = Operand(ToRegister(right));
2322       }
2323 
2324       EmitBranch(instr, cond, cmp_left, cmp_right);
2325     }
2326   }
2327 }
2328 
2329 
DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch * instr)2330 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2331   Register left = ToRegister(instr->left());
2332   Register right = ToRegister(instr->right());
2333 
2334   EmitBranch(instr, eq, left, Operand(right));
2335 }
2336 
2337 
DoCmpHoleAndBranch(LCmpHoleAndBranch * instr)2338 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2339   if (instr->hydrogen()->representation().IsTagged()) {
2340     Register input_reg = ToRegister(instr->object());
2341     __ li(at, Operand(factory()->the_hole_value()));
2342     EmitBranch(instr, eq, input_reg, Operand(at));
2343     return;
2344   }
2345 
2346   DoubleRegister input_reg = ToDoubleRegister(instr->object());
2347   EmitFalseBranchF(instr, eq, input_reg, input_reg);
2348 
2349   Register scratch = scratch0();
2350   __ FmoveHigh(scratch, input_reg);
2351   EmitBranch(instr, eq, scratch,
2352              Operand(static_cast<int32_t>(kHoleNanUpper32)));
2353 }
2354 
2355 
DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch * instr)2356 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2357   Representation rep = instr->hydrogen()->value()->representation();
2358   DCHECK(!rep.IsInteger32());
2359   Register scratch = ToRegister(instr->temp());
2360 
2361   if (rep.IsDouble()) {
2362     DoubleRegister value = ToDoubleRegister(instr->value());
2363     EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
2364     __ FmoveHigh(scratch, value);
2365     // Only use low 32-bits of value.
2366     __ dsll32(scratch, scratch, 0);
2367     __ dsrl32(scratch, scratch, 0);
2368     __ li(at, 0x80000000);
2369   } else {
2370     Register value = ToRegister(instr->value());
2371     __ CheckMap(value,
2372                 scratch,
2373                 Heap::kHeapNumberMapRootIndex,
2374                 instr->FalseLabel(chunk()),
2375                 DO_SMI_CHECK);
2376     __ lwu(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2377     EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
2378     __ lwu(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2379     __ mov(at, zero_reg);
2380   }
2381   EmitBranch(instr, eq, scratch, Operand(at));
2382 }
2383 
2384 
EmitIsString(Register input,Register temp1,Label * is_not_string,SmiCheck check_needed=INLINE_SMI_CHECK)2385 Condition LCodeGen::EmitIsString(Register input,
2386                                  Register temp1,
2387                                  Label* is_not_string,
2388                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
2389   if (check_needed == INLINE_SMI_CHECK) {
2390     __ JumpIfSmi(input, is_not_string);
2391   }
2392   __ GetObjectType(input, temp1, temp1);
2393 
2394   return lt;
2395 }
2396 
2397 
DoIsStringAndBranch(LIsStringAndBranch * instr)2398 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2399   Register reg = ToRegister(instr->value());
2400   Register temp1 = ToRegister(instr->temp());
2401 
2402   SmiCheck check_needed =
2403       instr->hydrogen()->value()->type().IsHeapObject()
2404           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2405   Condition true_cond =
2406       EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2407 
2408   EmitBranch(instr, true_cond, temp1,
2409              Operand(FIRST_NONSTRING_TYPE));
2410 }
2411 
2412 
DoIsSmiAndBranch(LIsSmiAndBranch * instr)2413 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2414   Register input_reg = EmitLoadRegister(instr->value(), at);
2415   __ And(at, input_reg, kSmiTagMask);
2416   EmitBranch(instr, eq, at, Operand(zero_reg));
2417 }
2418 
2419 
DoIsUndetectableAndBranch(LIsUndetectableAndBranch * instr)2420 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2421   Register input = ToRegister(instr->value());
2422   Register temp = ToRegister(instr->temp());
2423 
2424   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2425     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2426   }
2427   __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2428   __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2429   __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2430   EmitBranch(instr, ne, at, Operand(zero_reg));
2431 }
2432 
2433 
ComputeCompareCondition(Token::Value op)2434 static Condition ComputeCompareCondition(Token::Value op) {
2435   switch (op) {
2436     case Token::EQ_STRICT:
2437     case Token::EQ:
2438       return eq;
2439     case Token::LT:
2440       return lt;
2441     case Token::GT:
2442       return gt;
2443     case Token::LTE:
2444       return le;
2445     case Token::GTE:
2446       return ge;
2447     default:
2448       UNREACHABLE();
2449       return kNoCondition;
2450   }
2451 }
2452 
2453 
DoStringCompareAndBranch(LStringCompareAndBranch * instr)2454 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2455   DCHECK(ToRegister(instr->context()).is(cp));
2456   DCHECK(ToRegister(instr->left()).is(a1));
2457   DCHECK(ToRegister(instr->right()).is(a0));
2458 
2459   Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
2460   CallCode(code, RelocInfo::CODE_TARGET, instr);
2461 
2462   EmitBranch(instr, ComputeCompareCondition(instr->op()), v0,
2463              Operand(zero_reg));
2464 }
2465 
2466 
TestType(HHasInstanceTypeAndBranch * instr)2467 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2468   InstanceType from = instr->from();
2469   InstanceType to = instr->to();
2470   if (from == FIRST_TYPE) return to;
2471   DCHECK(from == to || to == LAST_TYPE);
2472   return from;
2473 }
2474 
2475 
BranchCondition(HHasInstanceTypeAndBranch * instr)2476 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2477   InstanceType from = instr->from();
2478   InstanceType to = instr->to();
2479   if (from == to) return eq;
2480   if (to == LAST_TYPE) return hs;
2481   if (from == FIRST_TYPE) return ls;
2482   UNREACHABLE();
2483   return eq;
2484 }
2485 
2486 
DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch * instr)2487 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2488   Register scratch = scratch0();
2489   Register input = ToRegister(instr->value());
2490 
2491   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2492     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2493   }
2494 
2495   __ GetObjectType(input, scratch, scratch);
2496   EmitBranch(instr,
2497              BranchCondition(instr->hydrogen()),
2498              scratch,
2499              Operand(TestType(instr->hydrogen())));
2500 }
2501 
2502 
DoGetCachedArrayIndex(LGetCachedArrayIndex * instr)2503 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2504   Register input = ToRegister(instr->value());
2505   Register result = ToRegister(instr->result());
2506 
2507   __ AssertString(input);
2508 
2509   __ lwu(result, FieldMemOperand(input, String::kHashFieldOffset));
2510   __ IndexFromHash(result, result);
2511 }
2512 
2513 
DoHasCachedArrayIndexAndBranch(LHasCachedArrayIndexAndBranch * instr)2514 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2515     LHasCachedArrayIndexAndBranch* instr) {
2516   Register input = ToRegister(instr->value());
2517   Register scratch = scratch0();
2518 
2519   __ lwu(scratch,
2520          FieldMemOperand(input, String::kHashFieldOffset));
2521   __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2522   EmitBranch(instr, eq, at, Operand(zero_reg));
2523 }
2524 
2525 
2526 // Branches to a label or falls through with the answer in flags.  Trashes
2527 // the temp registers, but not the input.
EmitClassOfTest(Label * is_true,Label * is_false,Handle<String> class_name,Register input,Register temp,Register temp2)2528 void LCodeGen::EmitClassOfTest(Label* is_true,
2529                                Label* is_false,
2530                                Handle<String>class_name,
2531                                Register input,
2532                                Register temp,
2533                                Register temp2) {
2534   DCHECK(!input.is(temp));
2535   DCHECK(!input.is(temp2));
2536   DCHECK(!temp.is(temp2));
2537 
2538   __ JumpIfSmi(input, is_false);
2539 
2540   __ GetObjectType(input, temp, temp2);
2541   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2542     __ Branch(is_true, eq, temp2, Operand(JS_FUNCTION_TYPE));
2543   } else {
2544     __ Branch(is_false, eq, temp2, Operand(JS_FUNCTION_TYPE));
2545   }
2546 
2547   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2548   // Check if the constructor in the map is a function.
2549   Register instance_type = scratch1();
2550   DCHECK(!instance_type.is(temp));
2551   __ GetMapConstructor(temp, temp, temp2, instance_type);
2552 
2553   // Objects with a non-function constructor have class 'Object'.
2554   if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2555     __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2556   } else {
2557     __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2558   }
2559 
2560   // temp now contains the constructor function. Grab the
2561   // instance class name from there.
2562   __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2563   __ ld(temp, FieldMemOperand(temp,
2564                                SharedFunctionInfo::kInstanceClassNameOffset));
2565   // The class name we are testing against is internalized since it's a literal.
2566   // The name in the constructor is internalized because of the way the context
2567   // is booted.  This routine isn't expected to work for random API-created
2568   // classes and it doesn't have to because you can't access it with natives
2569   // syntax.  Since both sides are internalized it is sufficient to use an
2570   // identity comparison.
2571 
2572   // End with the address of this class_name instance in temp register.
2573   // On MIPS, the caller must do the comparison with Handle<String>class_name.
2574 }
2575 
2576 
DoClassOfTestAndBranch(LClassOfTestAndBranch * instr)2577 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2578   Register input = ToRegister(instr->value());
2579   Register temp = scratch0();
2580   Register temp2 = ToRegister(instr->temp());
2581   Handle<String> class_name = instr->hydrogen()->class_name();
2582 
2583   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2584                   class_name, input, temp, temp2);
2585 
2586   EmitBranch(instr, eq, temp, Operand(class_name));
2587 }
2588 
2589 
DoCmpMapAndBranch(LCmpMapAndBranch * instr)2590 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2591   Register reg = ToRegister(instr->value());
2592   Register temp = ToRegister(instr->temp());
2593 
2594   __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2595   EmitBranch(instr, eq, temp, Operand(instr->map()));
2596 }
2597 
2598 
DoInstanceOf(LInstanceOf * instr)2599 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2600   DCHECK(ToRegister(instr->context()).is(cp));
2601   Label true_label, done;
2602   DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
2603   DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
2604   DCHECK(ToRegister(instr->result()).is(v0));
2605 
2606   InstanceOfStub stub(isolate());
2607   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2608 }
2609 
2610 
DoHasInPrototypeChainAndBranch(LHasInPrototypeChainAndBranch * instr)2611 void LCodeGen::DoHasInPrototypeChainAndBranch(
2612     LHasInPrototypeChainAndBranch* instr) {
2613   Register const object = ToRegister(instr->object());
2614   Register const object_map = scratch0();
2615   Register const object_instance_type = scratch1();
2616   Register const object_prototype = object_map;
2617   Register const prototype = ToRegister(instr->prototype());
2618 
2619   // The {object} must be a spec object.  It's sufficient to know that {object}
2620   // is not a smi, since all other non-spec objects have {null} prototypes and
2621   // will be ruled out below.
2622   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2623     __ SmiTst(object, at);
2624     EmitFalseBranch(instr, eq, at, Operand(zero_reg));
2625   }
2626 
2627   // Loop through the {object}s prototype chain looking for the {prototype}.
2628   __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2629   Label loop;
2630   __ bind(&loop);
2631 
2632   // Deoptimize if the object needs to be access checked.
2633   __ lbu(object_instance_type,
2634          FieldMemOperand(object_map, Map::kBitFieldOffset));
2635   __ And(object_instance_type, object_instance_type,
2636          Operand(1 << Map::kIsAccessCheckNeeded));
2637   DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
2638                Operand(zero_reg));
2639   __ lbu(object_instance_type,
2640          FieldMemOperand(object_map, Map::kInstanceTypeOffset));
2641   DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
2642                Operand(JS_PROXY_TYPE));
2643 
2644   __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
2645   EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
2646   __ LoadRoot(at, Heap::kNullValueRootIndex);
2647   EmitFalseBranch(instr, eq, object_prototype, Operand(at));
2648   __ Branch(&loop, USE_DELAY_SLOT);
2649   __ ld(object_map, FieldMemOperand(object_prototype,
2650                                     HeapObject::kMapOffset));  // In delay slot.
2651 }
2652 
2653 
DoCmpT(LCmpT * instr)2654 void LCodeGen::DoCmpT(LCmpT* instr) {
2655   DCHECK(ToRegister(instr->context()).is(cp));
2656   Token::Value op = instr->op();
2657 
2658   Handle<Code> ic =
2659       CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
2660   CallCode(ic, RelocInfo::CODE_TARGET, instr);
2661   // On MIPS there is no need for a "no inlined smi code" marker (nop).
2662 
2663   Condition condition = ComputeCompareCondition(op);
2664   // A minor optimization that relies on LoadRoot always emitting one
2665   // instruction.
2666   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2667   Label done, check;
2668   __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2669   __ bind(&check);
2670   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2671   DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2672   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2673   __ bind(&done);
2674 }
2675 
2676 
DoReturn(LReturn * instr)2677 void LCodeGen::DoReturn(LReturn* instr) {
2678   if (FLAG_trace && info()->IsOptimizing()) {
2679     // Push the return value on the stack as the parameter.
2680     // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2681     // managed by the register allocator and tearing down the frame, it's
2682     // safe to write to the context register.
2683     __ push(v0);
2684     __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2685     __ CallRuntime(Runtime::kTraceExit);
2686   }
2687   if (info()->saves_caller_doubles()) {
2688     RestoreCallerDoubles();
2689   }
2690   if (NeedsEagerFrame()) {
2691     __ mov(sp, fp);
2692     __ Pop(ra, fp);
2693   }
2694   if (instr->has_constant_parameter_count()) {
2695     int parameter_count = ToInteger32(instr->constant_parameter_count());
2696     int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2697     if (sp_delta != 0) {
2698       __ Daddu(sp, sp, Operand(sp_delta));
2699     }
2700   } else {
2701     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
2702     Register reg = ToRegister(instr->parameter_count());
2703     // The argument count parameter is a smi
2704     __ SmiUntag(reg);
2705     __ dsll(at, reg, kPointerSizeLog2);
2706     __ Daddu(sp, sp, at);
2707   }
2708 
2709   __ Jump(ra);
2710 }
2711 
2712 
2713 template <class T>
EmitVectorLoadICRegisters(T * instr)2714 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2715   Register vector_register = ToRegister(instr->temp_vector());
2716   Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2717   DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2718   DCHECK(slot_register.is(a0));
2719 
2720   AllowDeferredHandleDereference vector_structure_check;
2721   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2722   __ li(vector_register, vector);
2723   // No need to allocate this register.
2724   FeedbackVectorSlot slot = instr->hydrogen()->slot();
2725   int index = vector->GetIndex(slot);
2726   __ li(slot_register, Operand(Smi::FromInt(index)));
2727 }
2728 
2729 
2730 template <class T>
EmitVectorStoreICRegisters(T * instr)2731 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2732   Register vector_register = ToRegister(instr->temp_vector());
2733   Register slot_register = ToRegister(instr->temp_slot());
2734 
2735   AllowDeferredHandleDereference vector_structure_check;
2736   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2737   __ li(vector_register, vector);
2738   FeedbackVectorSlot slot = instr->hydrogen()->slot();
2739   int index = vector->GetIndex(slot);
2740   __ li(slot_register, Operand(Smi::FromInt(index)));
2741 }
2742 
2743 
DoLoadGlobalGeneric(LLoadGlobalGeneric * instr)2744 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2745   DCHECK(ToRegister(instr->context()).is(cp));
2746   DCHECK(ToRegister(instr->global_object())
2747             .is(LoadDescriptor::ReceiverRegister()));
2748   DCHECK(ToRegister(instr->result()).is(v0));
2749 
2750   __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2751   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2752   Handle<Code> ic =
2753       CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
2754                                          SLOPPY, PREMONOMORPHIC).code();
2755   CallCode(ic, RelocInfo::CODE_TARGET, instr);
2756 }
2757 
2758 
DoLoadContextSlot(LLoadContextSlot * instr)2759 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2760   Register context = ToRegister(instr->context());
2761   Register result = ToRegister(instr->result());
2762 
2763   __ ld(result, ContextMemOperand(context, instr->slot_index()));
2764   if (instr->hydrogen()->RequiresHoleCheck()) {
2765     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2766 
2767     if (instr->hydrogen()->DeoptimizesOnHole()) {
2768       DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2769     } else {
2770       Label is_not_hole;
2771       __ Branch(&is_not_hole, ne, result, Operand(at));
2772       __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2773       __ bind(&is_not_hole);
2774     }
2775   }
2776 }
2777 
2778 
DoStoreContextSlot(LStoreContextSlot * instr)2779 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2780   Register context = ToRegister(instr->context());
2781   Register value = ToRegister(instr->value());
2782   Register scratch = scratch0();
2783   MemOperand target = ContextMemOperand(context, instr->slot_index());
2784 
2785   Label skip_assignment;
2786 
2787   if (instr->hydrogen()->RequiresHoleCheck()) {
2788     __ ld(scratch, target);
2789     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2790 
2791     if (instr->hydrogen()->DeoptimizesOnHole()) {
2792       DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
2793     } else {
2794       __ Branch(&skip_assignment, ne, scratch, Operand(at));
2795     }
2796   }
2797 
2798   __ sd(value, target);
2799   if (instr->hydrogen()->NeedsWriteBarrier()) {
2800     SmiCheck check_needed =
2801         instr->hydrogen()->value()->type().IsHeapObject()
2802             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2803     __ RecordWriteContextSlot(context,
2804                               target.offset(),
2805                               value,
2806                               scratch0(),
2807                               GetRAState(),
2808                               kSaveFPRegs,
2809                               EMIT_REMEMBERED_SET,
2810                               check_needed);
2811   }
2812 
2813   __ bind(&skip_assignment);
2814 }
2815 
2816 
DoLoadNamedField(LLoadNamedField * instr)2817 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2818   HObjectAccess access = instr->hydrogen()->access();
2819   int offset = access.offset();
2820   Register object = ToRegister(instr->object());
2821   if (access.IsExternalMemory()) {
2822     Register result = ToRegister(instr->result());
2823     MemOperand operand = MemOperand(object, offset);
2824     __ Load(result, operand, access.representation());
2825     return;
2826   }
2827 
2828   if (instr->hydrogen()->representation().IsDouble()) {
2829     DoubleRegister result = ToDoubleRegister(instr->result());
2830     __ ldc1(result, FieldMemOperand(object, offset));
2831     return;
2832   }
2833 
2834   Register result = ToRegister(instr->result());
2835   if (!access.IsInobject()) {
2836     __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2837     object = result;
2838   }
2839 
2840   Representation representation = access.representation();
2841   if (representation.IsSmi() && SmiValuesAre32Bits() &&
2842       instr->hydrogen()->representation().IsInteger32()) {
2843     if (FLAG_debug_code) {
2844       // Verify this is really an Smi.
2845       Register scratch = scratch0();
2846       __ Load(scratch, FieldMemOperand(object, offset), representation);
2847       __ AssertSmi(scratch);
2848     }
2849 
2850     // Read int value directly from upper half of the smi.
2851     STATIC_ASSERT(kSmiTag == 0);
2852     STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
2853     offset = SmiWordOffset(offset);
2854     representation = Representation::Integer32();
2855   }
2856   __ Load(result, FieldMemOperand(object, offset), representation);
2857 }
2858 
2859 
DoLoadNamedGeneric(LLoadNamedGeneric * instr)2860 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2861   DCHECK(ToRegister(instr->context()).is(cp));
2862   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2863   DCHECK(ToRegister(instr->result()).is(v0));
2864 
2865   // Name is always in a2.
2866   __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2867   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
2868   Handle<Code> ic =
2869       CodeFactory::LoadICInOptimizedCode(
2870           isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
2871           instr->hydrogen()->initialization_state()).code();
2872   CallCode(ic, RelocInfo::CODE_TARGET, instr);
2873 }
2874 
2875 
DoLoadFunctionPrototype(LLoadFunctionPrototype * instr)2876 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2877   Register scratch = scratch0();
2878   Register function = ToRegister(instr->function());
2879   Register result = ToRegister(instr->result());
2880 
2881   // Get the prototype or initial map from the function.
2882   __ ld(result,
2883          FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2884 
2885   // Check that the function has a prototype or an initial map.
2886   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2887   DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2888 
2889   // If the function does not have an initial map, we're done.
2890   Label done;
2891   __ GetObjectType(result, scratch, scratch);
2892   __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2893 
2894   // Get the prototype from the initial map.
2895   __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
2896 
2897   // All done.
2898   __ bind(&done);
2899 }
2900 
2901 
DoLoadRoot(LLoadRoot * instr)2902 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2903   Register result = ToRegister(instr->result());
2904   __ LoadRoot(result, instr->index());
2905 }
2906 
2907 
DoAccessArgumentsAt(LAccessArgumentsAt * instr)2908 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2909   Register arguments = ToRegister(instr->arguments());
2910   Register result = ToRegister(instr->result());
2911   // There are two words between the frame pointer and the last argument.
2912   // Subtracting from length accounts for one of them add one more.
2913   if (instr->length()->IsConstantOperand()) {
2914     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2915     if (instr->index()->IsConstantOperand()) {
2916       int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2917       int index = (const_length - const_index) + 1;
2918       __ ld(result, MemOperand(arguments, index * kPointerSize));
2919     } else {
2920       Register index = ToRegister(instr->index());
2921       __ li(at, Operand(const_length + 1));
2922       __ Dsubu(result, at, index);
2923       __ dsll(at, result, kPointerSizeLog2);
2924       __ Daddu(at, arguments, at);
2925       __ ld(result, MemOperand(at));
2926     }
2927   } else if (instr->index()->IsConstantOperand()) {
2928     Register length = ToRegister(instr->length());
2929     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2930     int loc = const_index - 1;
2931     if (loc != 0) {
2932       __ Dsubu(result, length, Operand(loc));
2933       __ dsll(at, result, kPointerSizeLog2);
2934       __ Daddu(at, arguments, at);
2935       __ ld(result, MemOperand(at));
2936     } else {
2937       __ dsll(at, length, kPointerSizeLog2);
2938       __ Daddu(at, arguments, at);
2939       __ ld(result, MemOperand(at));
2940     }
2941   } else {
2942     Register length = ToRegister(instr->length());
2943     Register index = ToRegister(instr->index());
2944     __ Dsubu(result, length, index);
2945     __ Daddu(result, result, 1);
2946     __ dsll(at, result, kPointerSizeLog2);
2947     __ Daddu(at, arguments, at);
2948     __ ld(result, MemOperand(at));
2949   }
2950 }
2951 
2952 
DoLoadKeyedExternalArray(LLoadKeyed * instr)2953 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2954   Register external_pointer = ToRegister(instr->elements());
2955   Register key = no_reg;
2956   ElementsKind elements_kind = instr->elements_kind();
2957   bool key_is_constant = instr->key()->IsConstantOperand();
2958   int constant_key = 0;
2959   if (key_is_constant) {
2960     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2961     if (constant_key & 0xF0000000) {
2962       Abort(kArrayIndexConstantValueTooBig);
2963     }
2964   } else {
2965     key = ToRegister(instr->key());
2966   }
2967   int element_size_shift = ElementsKindToShiftSize(elements_kind);
2968   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2969       ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
2970       : element_size_shift;
2971   int base_offset = instr->base_offset();
2972 
2973   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2974     FPURegister result = ToDoubleRegister(instr->result());
2975     if (key_is_constant) {
2976       __ Daddu(scratch0(), external_pointer,
2977           constant_key << element_size_shift);
2978     } else {
2979       if (shift_size < 0) {
2980          if (shift_size == -32) {
2981            __ dsra32(scratch0(), key, 0);
2982          } else {
2983            __ dsra(scratch0(), key, -shift_size);
2984          }
2985       } else {
2986         __ dsll(scratch0(), key, shift_size);
2987       }
2988       __ Daddu(scratch0(), scratch0(), external_pointer);
2989     }
2990     if (elements_kind == FLOAT32_ELEMENTS) {
2991       __ lwc1(result, MemOperand(scratch0(), base_offset));
2992       __ cvt_d_s(result, result);
2993     } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2994       __ ldc1(result, MemOperand(scratch0(), base_offset));
2995     }
2996   } else {
2997     Register result = ToRegister(instr->result());
2998     MemOperand mem_operand = PrepareKeyedOperand(
2999         key, external_pointer, key_is_constant, constant_key,
3000         element_size_shift, shift_size, base_offset);
3001     switch (elements_kind) {
3002       case INT8_ELEMENTS:
3003         __ lb(result, mem_operand);
3004         break;
3005       case UINT8_ELEMENTS:
3006       case UINT8_CLAMPED_ELEMENTS:
3007         __ lbu(result, mem_operand);
3008         break;
3009       case INT16_ELEMENTS:
3010         __ lh(result, mem_operand);
3011         break;
3012       case UINT16_ELEMENTS:
3013         __ lhu(result, mem_operand);
3014         break;
3015       case INT32_ELEMENTS:
3016         __ lw(result, mem_operand);
3017         break;
3018       case UINT32_ELEMENTS:
3019         __ lw(result, mem_operand);
3020         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3021           DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
3022                        result, Operand(0x80000000));
3023         }
3024         break;
3025       case FLOAT32_ELEMENTS:
3026       case FLOAT64_ELEMENTS:
3027       case FAST_DOUBLE_ELEMENTS:
3028       case FAST_ELEMENTS:
3029       case FAST_SMI_ELEMENTS:
3030       case FAST_HOLEY_DOUBLE_ELEMENTS:
3031       case FAST_HOLEY_ELEMENTS:
3032       case FAST_HOLEY_SMI_ELEMENTS:
3033       case DICTIONARY_ELEMENTS:
3034       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3035       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3036         UNREACHABLE();
3037         break;
3038     }
3039   }
3040 }
3041 
3042 
DoLoadKeyedFixedDoubleArray(LLoadKeyed * instr)3043 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3044   Register elements = ToRegister(instr->elements());
3045   bool key_is_constant = instr->key()->IsConstantOperand();
3046   Register key = no_reg;
3047   DoubleRegister result = ToDoubleRegister(instr->result());
3048   Register scratch = scratch0();
3049 
3050   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3051 
3052   int base_offset = instr->base_offset();
3053   if (key_is_constant) {
3054     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3055     if (constant_key & 0xF0000000) {
3056       Abort(kArrayIndexConstantValueTooBig);
3057     }
3058     base_offset += constant_key * kDoubleSize;
3059   }
3060   __ Daddu(scratch, elements, Operand(base_offset));
3061 
3062   if (!key_is_constant) {
3063     key = ToRegister(instr->key());
3064     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3065         ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
3066         : element_size_shift;
3067     if (shift_size > 0) {
3068       __ dsll(at, key, shift_size);
3069     } else if (shift_size == -32) {
3070       __ dsra32(at, key, 0);
3071     } else {
3072       __ dsra(at, key, -shift_size);
3073     }
3074     __ Daddu(scratch, scratch, at);
3075   }
3076 
3077   __ ldc1(result, MemOperand(scratch));
3078 
3079   if (instr->hydrogen()->RequiresHoleCheck()) {
3080     __ FmoveHigh(scratch, result);
3081     DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
3082                  Operand(static_cast<int32_t>(kHoleNanUpper32)));
3083   }
3084 }
3085 
3086 
DoLoadKeyedFixedArray(LLoadKeyed * instr)3087 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3088   HLoadKeyed* hinstr = instr->hydrogen();
3089   Register elements = ToRegister(instr->elements());
3090   Register result = ToRegister(instr->result());
3091   Register scratch = scratch0();
3092   Register store_base = scratch;
3093   int offset = instr->base_offset();
3094 
3095   if (instr->key()->IsConstantOperand()) {
3096     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3097     offset += ToInteger32(const_operand) * kPointerSize;
3098     store_base = elements;
3099   } else {
3100     Register key = ToRegister(instr->key());
3101     // Even though the HLoadKeyed instruction forces the input
3102     // representation for the key to be an integer, the input gets replaced
3103     // during bound check elimination with the index argument to the bounds
3104     // check, which can be tagged, so that case must be handled here, too.
3105     if (instr->hydrogen()->key()->representation().IsSmi()) {
3106     __ SmiScale(scratch, key, kPointerSizeLog2);
3107     __ daddu(scratch, elements, scratch);
3108     } else {
3109       __ dsll(scratch, key, kPointerSizeLog2);
3110       __ daddu(scratch, elements, scratch);
3111     }
3112   }
3113 
3114   Representation representation = hinstr->representation();
3115   if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3116       hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3117     DCHECK(!hinstr->RequiresHoleCheck());
3118     if (FLAG_debug_code) {
3119       Register temp = scratch1();
3120       __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
3121       __ AssertSmi(temp);
3122     }
3123 
3124     // Read int value directly from upper half of the smi.
3125     STATIC_ASSERT(kSmiTag == 0);
3126     STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3127     offset = SmiWordOffset(offset);
3128   }
3129 
3130   __ Load(result, MemOperand(store_base, offset), representation);
3131 
3132   // Check for the hole value.
3133   if (hinstr->RequiresHoleCheck()) {
3134     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3135       __ SmiTst(result, scratch);
3136       DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
3137                    Operand(zero_reg));
3138     } else {
3139       __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3140       DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
3141     }
3142   } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3143     DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3144     Label done;
3145     __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3146     __ Branch(&done, ne, result, Operand(scratch));
3147     if (info()->IsStub()) {
3148       // A stub can safely convert the hole to undefined only if the array
3149       // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3150       // it needs to bail out.
3151       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3152       // The comparison only needs LS bits of value, which is a smi.
3153       __ ld(result, FieldMemOperand(result, Cell::kValueOffset));
3154       DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
3155                    Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
3156     }
3157     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3158     __ bind(&done);
3159   }
3160 }
3161 
3162 
DoLoadKeyed(LLoadKeyed * instr)3163 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3164   if (instr->is_fixed_typed_array()) {
3165     DoLoadKeyedExternalArray(instr);
3166   } else if (instr->hydrogen()->representation().IsDouble()) {
3167     DoLoadKeyedFixedDoubleArray(instr);
3168   } else {
3169     DoLoadKeyedFixedArray(instr);
3170   }
3171 }
3172 
3173 
PrepareKeyedOperand(Register key,Register base,bool key_is_constant,int constant_key,int element_size,int shift_size,int base_offset)3174 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3175                                          Register base,
3176                                          bool key_is_constant,
3177                                          int constant_key,
3178                                          int element_size,
3179                                          int shift_size,
3180                                          int base_offset) {
3181   if (key_is_constant) {
3182     return MemOperand(base, (constant_key << element_size) + base_offset);
3183   }
3184 
3185   if (base_offset == 0) {
3186     if (shift_size >= 0) {
3187       __ dsll(scratch0(), key, shift_size);
3188       __ Daddu(scratch0(), base, scratch0());
3189       return MemOperand(scratch0());
3190     } else {
3191       if (shift_size == -32) {
3192         __ dsra32(scratch0(), key, 0);
3193       } else {
3194         __ dsra(scratch0(), key, -shift_size);
3195       }
3196       __ Daddu(scratch0(), base, scratch0());
3197       return MemOperand(scratch0());
3198     }
3199   }
3200 
3201   if (shift_size >= 0) {
3202     __ dsll(scratch0(), key, shift_size);
3203     __ Daddu(scratch0(), base, scratch0());
3204     return MemOperand(scratch0(), base_offset);
3205   } else {
3206     if (shift_size == -32) {
3207        __ dsra32(scratch0(), key, 0);
3208     } else {
3209       __ dsra(scratch0(), key, -shift_size);
3210     }
3211     __ Daddu(scratch0(), base, scratch0());
3212     return MemOperand(scratch0(), base_offset);
3213   }
3214 }
3215 
3216 
DoLoadKeyedGeneric(LLoadKeyedGeneric * instr)3217 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3218   DCHECK(ToRegister(instr->context()).is(cp));
3219   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3220   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3221 
3222   if (instr->hydrogen()->HasVectorAndSlot()) {
3223     EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3224   }
3225 
3226   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
3227                         isolate(), instr->hydrogen()->language_mode(),
3228                         instr->hydrogen()->initialization_state()).code();
3229   CallCode(ic, RelocInfo::CODE_TARGET, instr);
3230 }
3231 
3232 
DoArgumentsElements(LArgumentsElements * instr)3233 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3234   Register scratch = scratch0();
3235   Register temp = scratch1();
3236   Register result = ToRegister(instr->result());
3237 
3238   if (instr->hydrogen()->from_inlined()) {
3239     __ Dsubu(result, sp, 2 * kPointerSize);
3240   } else {
3241     // Check if the calling frame is an arguments adaptor frame.
3242     Label done, adapted;
3243     __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3244     __ ld(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3245     __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3246 
3247     // Result is the frame pointer for the frame if not adapted and for the real
3248     // frame below the adaptor frame if adapted.
3249     __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
3250     __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
3251   }
3252 }
3253 
3254 
DoArgumentsLength(LArgumentsLength * instr)3255 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3256   Register elem = ToRegister(instr->elements());
3257   Register result = ToRegister(instr->result());
3258 
3259   Label done;
3260 
3261   // If no arguments adaptor frame the number of arguments is fixed.
3262   __ Daddu(result, zero_reg, Operand(scope()->num_parameters()));
3263   __ Branch(&done, eq, fp, Operand(elem));
3264 
3265   // Arguments adaptor frame present. Get argument length from there.
3266   __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3267   __ ld(result,
3268         MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3269   __ SmiUntag(result);
3270 
3271   // Argument length is in result register.
3272   __ bind(&done);
3273 }
3274 
3275 
DoWrapReceiver(LWrapReceiver * instr)3276 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3277   Register receiver = ToRegister(instr->receiver());
3278   Register function = ToRegister(instr->function());
3279   Register result = ToRegister(instr->result());
3280   Register scratch = scratch0();
3281 
3282   // If the receiver is null or undefined, we have to pass the global
3283   // object as a receiver to normal functions. Values have to be
3284   // passed unchanged to builtins and strict-mode functions.
3285   Label global_object, result_in_receiver;
3286 
3287   if (!instr->hydrogen()->known_function()) {
3288     // Do not transform the receiver to object for strict mode functions.
3289     __ ld(scratch,
3290            FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3291 
3292     // Do not transform the receiver to object for builtins.
3293     int32_t strict_mode_function_mask =
3294         1 <<  SharedFunctionInfo::kStrictModeBitWithinByte;
3295     int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
3296 
3297     __ lbu(at,
3298            FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
3299     __ And(at, at, Operand(strict_mode_function_mask));
3300     __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3301     __ lbu(at,
3302            FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
3303     __ And(at, at, Operand(native_mask));
3304     __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3305   }
3306 
3307   // Normal function. Replace undefined or null with global receiver.
3308   __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3309   __ Branch(&global_object, eq, receiver, Operand(scratch));
3310   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3311   __ Branch(&global_object, eq, receiver, Operand(scratch));
3312 
3313   // Deoptimize if the receiver is not a JS object.
3314   __ SmiTst(receiver, scratch);
3315   DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
3316 
3317   __ GetObjectType(receiver, scratch, scratch);
3318   DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
3319                Operand(FIRST_JS_RECEIVER_TYPE));
3320   __ Branch(&result_in_receiver);
3321 
3322   __ bind(&global_object);
3323   __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
3324   __ ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3325   __ ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3326 
3327   if (result.is(receiver)) {
3328     __ bind(&result_in_receiver);
3329   } else {
3330     Label result_ok;
3331     __ Branch(&result_ok);
3332     __ bind(&result_in_receiver);
3333     __ mov(result, receiver);
3334     __ bind(&result_ok);
3335   }
3336 }
3337 
3338 
DoApplyArguments(LApplyArguments * instr)3339 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3340   Register receiver = ToRegister(instr->receiver());
3341   Register function = ToRegister(instr->function());
3342   Register length = ToRegister(instr->length());
3343   Register elements = ToRegister(instr->elements());
3344   Register scratch = scratch0();
3345   DCHECK(receiver.is(a0));  // Used for parameter count.
3346   DCHECK(function.is(a1));  // Required by InvokeFunction.
3347   DCHECK(ToRegister(instr->result()).is(v0));
3348 
3349   // Copy the arguments to this function possibly from the
3350   // adaptor frame below it.
3351   const uint32_t kArgumentsLimit = 1 * KB;
3352   DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
3353                Operand(kArgumentsLimit));
3354 
3355   // Push the receiver and use the register to keep the original
3356   // number of arguments.
3357   __ push(receiver);
3358   __ Move(receiver, length);
3359   // The arguments are at a one pointer size offset from elements.
3360   __ Daddu(elements, elements, Operand(1 * kPointerSize));
3361 
3362   // Loop through the arguments pushing them onto the execution
3363   // stack.
3364   Label invoke, loop;
3365   // length is a small non-negative integer, due to the test above.
3366   __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3367   __ dsll(scratch, length, kPointerSizeLog2);
3368   __ bind(&loop);
3369   __ Daddu(scratch, elements, scratch);
3370   __ ld(scratch, MemOperand(scratch));
3371   __ push(scratch);
3372   __ Dsubu(length, length, Operand(1));
3373   __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3374   __ dsll(scratch, length, kPointerSizeLog2);
3375 
3376   __ bind(&invoke);
3377   DCHECK(instr->HasPointerMap());
3378   LPointerMap* pointers = instr->pointer_map();
3379   SafepointGenerator safepoint_generator(
3380       this, pointers, Safepoint::kLazyDeopt);
3381   // The number of arguments is stored in receiver which is a0, as expected
3382   // by InvokeFunction.
3383   ParameterCount actual(receiver);
3384   __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
3385                     safepoint_generator);
3386 }
3387 
3388 
DoPushArgument(LPushArgument * instr)3389 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3390   LOperand* argument = instr->value();
3391   if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3392     Abort(kDoPushArgumentNotImplementedForDoubleType);
3393   } else {
3394     Register argument_reg = EmitLoadRegister(argument, at);
3395     __ push(argument_reg);
3396   }
3397 }
3398 
3399 
DoDrop(LDrop * instr)3400 void LCodeGen::DoDrop(LDrop* instr) {
3401   __ Drop(instr->count());
3402 }
3403 
3404 
DoThisFunction(LThisFunction * instr)3405 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3406   Register result = ToRegister(instr->result());
3407   __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3408 }
3409 
3410 
DoContext(LContext * instr)3411 void LCodeGen::DoContext(LContext* instr) {
3412   // If there is a non-return use, the context must be moved to a register.
3413   Register result = ToRegister(instr->result());
3414   if (info()->IsOptimizing()) {
3415     __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3416   } else {
3417     // If there is no frame, the context must be in cp.
3418     DCHECK(result.is(cp));
3419   }
3420 }
3421 
3422 
DoDeclareGlobals(LDeclareGlobals * instr)3423 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3424   DCHECK(ToRegister(instr->context()).is(cp));
3425   __ li(scratch0(), instr->hydrogen()->pairs());
3426   __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3427   __ Push(scratch0(), scratch1());
3428   CallRuntime(Runtime::kDeclareGlobals, instr);
3429 }
3430 
3431 
CallKnownFunction(Handle<JSFunction> function,int formal_parameter_count,int arity,LInstruction * instr)3432 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3433                                  int formal_parameter_count, int arity,
3434                                  LInstruction* instr) {
3435   bool dont_adapt_arguments =
3436       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3437   bool can_invoke_directly =
3438       dont_adapt_arguments || formal_parameter_count == arity;
3439 
3440   Register function_reg = a1;
3441   LPointerMap* pointers = instr->pointer_map();
3442 
3443   if (can_invoke_directly) {
3444     // Change context.
3445     __ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3446 
3447     // Always initialize new target and number of actual arguments.
3448     __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
3449     __ li(a0, Operand(arity));
3450 
3451     // Invoke function.
3452     __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3453     __ Call(at);
3454 
3455     // Set up deoptimization.
3456     RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3457   } else {
3458     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3459     ParameterCount count(arity);
3460     ParameterCount expected(formal_parameter_count);
3461     __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3462   }
3463 }
3464 
3465 
DoDeferredMathAbsTaggedHeapNumber(LMathAbs * instr)3466 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3467   DCHECK(instr->context() != NULL);
3468   DCHECK(ToRegister(instr->context()).is(cp));
3469   Register input = ToRegister(instr->value());
3470   Register result = ToRegister(instr->result());
3471   Register scratch = scratch0();
3472 
3473   // Deoptimize if not a heap number.
3474   __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3475   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3476   DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
3477 
3478   Label done;
3479   Register exponent = scratch0();
3480   scratch = no_reg;
3481   __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3482   // Check the sign of the argument. If the argument is positive, just
3483   // return it.
3484   __ Move(result, input);
3485   __ And(at, exponent, Operand(HeapNumber::kSignMask));
3486   __ Branch(&done, eq, at, Operand(zero_reg));
3487 
3488   // Input is negative. Reverse its sign.
3489   // Preserve the value of all registers.
3490   {
3491     PushSafepointRegistersScope scope(this);
3492 
3493     // Registers were saved at the safepoint, so we can use
3494     // many scratch registers.
3495     Register tmp1 = input.is(a1) ? a0 : a1;
3496     Register tmp2 = input.is(a2) ? a0 : a2;
3497     Register tmp3 = input.is(a3) ? a0 : a3;
3498     Register tmp4 = input.is(a4) ? a0 : a4;
3499 
3500     // exponent: floating point exponent value.
3501 
3502     Label allocated, slow;
3503     __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3504     __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3505     __ Branch(&allocated);
3506 
3507     // Slow case: Call the runtime system to do the number allocation.
3508     __ bind(&slow);
3509 
3510     CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3511                             instr->context());
3512     // Set the pointer to the new heap number in tmp.
3513     if (!tmp1.is(v0))
3514       __ mov(tmp1, v0);
3515     // Restore input_reg after call to runtime.
3516     __ LoadFromSafepointRegisterSlot(input, input);
3517     __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3518 
3519     __ bind(&allocated);
3520     // exponent: floating point exponent value.
3521     // tmp1: allocated heap number.
3522     __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3523     __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3524     __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3525     __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3526 
3527     __ StoreToSafepointRegisterSlot(tmp1, result);
3528   }
3529 
3530   __ bind(&done);
3531 }
3532 
3533 
EmitIntegerMathAbs(LMathAbs * instr)3534 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3535   Register input = ToRegister(instr->value());
3536   Register result = ToRegister(instr->result());
3537   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3538   Label done;
3539   __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3540   __ mov(result, input);
3541   __ subu(result, zero_reg, input);
3542   // Overflow if result is still negative, i.e. 0x80000000.
3543   DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
3544   __ bind(&done);
3545 }
3546 
3547 
EmitSmiMathAbs(LMathAbs * instr)3548 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3549   Register input = ToRegister(instr->value());
3550   Register result = ToRegister(instr->result());
3551   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3552   Label done;
3553   __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3554   __ mov(result, input);
3555   __ dsubu(result, zero_reg, input);
3556   // Overflow if result is still negative, i.e. 0x80000000 00000000.
3557   DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
3558   __ bind(&done);
3559 }
3560 
3561 
DoMathAbs(LMathAbs * instr)3562 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3563   // Class for deferred case.
3564   class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3565    public:
3566     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3567         : LDeferredCode(codegen), instr_(instr) { }
3568     void Generate() override {
3569       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3570     }
3571     LInstruction* instr() override { return instr_; }
3572 
3573    private:
3574     LMathAbs* instr_;
3575   };
3576 
3577   Representation r = instr->hydrogen()->value()->representation();
3578   if (r.IsDouble()) {
3579     FPURegister input = ToDoubleRegister(instr->value());
3580     FPURegister result = ToDoubleRegister(instr->result());
3581     __ abs_d(result, input);
3582   } else if (r.IsInteger32()) {
3583     EmitIntegerMathAbs(instr);
3584   } else if (r.IsSmi()) {
3585     EmitSmiMathAbs(instr);
3586   } else {
3587     // Representation is tagged.
3588     DeferredMathAbsTaggedHeapNumber* deferred =
3589         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3590     Register input = ToRegister(instr->value());
3591     // Smi check.
3592     __ JumpIfNotSmi(input, deferred->entry());
3593     // If smi, handle it directly.
3594     EmitSmiMathAbs(instr);
3595     __ bind(deferred->exit());
3596   }
3597 }
3598 
3599 
DoMathFloor(LMathFloor * instr)3600 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3601   DoubleRegister input = ToDoubleRegister(instr->value());
3602   Register result = ToRegister(instr->result());
3603   Register scratch1 = scratch0();
3604   Register except_flag = ToRegister(instr->temp());
3605 
3606   __ EmitFPUTruncate(kRoundToMinusInf,
3607                      result,
3608                      input,
3609                      scratch1,
3610                      double_scratch0(),
3611                      except_flag);
3612 
3613   // Deopt if the operation did not succeed.
3614   DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3615                Operand(zero_reg));
3616 
3617   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3618     // Test for -0.
3619     Label done;
3620     __ Branch(&done, ne, result, Operand(zero_reg));
3621     __ mfhc1(scratch1, input);  // Get exponent/sign bits.
3622     __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3623     DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
3624                  Operand(zero_reg));
3625     __ bind(&done);
3626   }
3627 }
3628 
3629 
DoMathRound(LMathRound * instr)3630 void LCodeGen::DoMathRound(LMathRound* instr) {
3631   DoubleRegister input = ToDoubleRegister(instr->value());
3632   Register result = ToRegister(instr->result());
3633   DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3634   Register scratch = scratch0();
3635   Label done, check_sign_on_zero;
3636 
3637   // Extract exponent bits.
3638   __ mfhc1(result, input);
3639   __ Ext(scratch,
3640          result,
3641          HeapNumber::kExponentShift,
3642          HeapNumber::kExponentBits);
3643 
3644   // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3645   Label skip1;
3646   __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3647   __ mov(result, zero_reg);
3648   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3649     __ Branch(&check_sign_on_zero);
3650   } else {
3651     __ Branch(&done);
3652   }
3653   __ bind(&skip1);
3654 
3655   // The following conversion will not work with numbers
3656   // outside of ]-2^32, 2^32[.
3657   DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
3658                Operand(HeapNumber::kExponentBias + 32));
3659 
3660   // Save the original sign for later comparison.
3661   __ And(scratch, result, Operand(HeapNumber::kSignMask));
3662 
3663   __ Move(double_scratch0(), 0.5);
3664   __ add_d(double_scratch0(), input, double_scratch0());
3665 
3666   // Check sign of the result: if the sign changed, the input
3667   // value was in ]0.5, 0[ and the result should be -0.
3668   __ mfhc1(result, double_scratch0());
3669   // mfhc1 sign-extends, clear the upper bits.
3670   __ dsll32(result, result, 0);
3671   __ dsrl32(result, result, 0);
3672   __ Xor(result, result, Operand(scratch));
3673   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3674     // ARM uses 'mi' here, which is 'lt'
3675     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
3676   } else {
3677     Label skip2;
3678     // ARM uses 'mi' here, which is 'lt'
3679     // Negating it results in 'ge'
3680     __ Branch(&skip2, ge, result, Operand(zero_reg));
3681     __ mov(result, zero_reg);
3682     __ Branch(&done);
3683     __ bind(&skip2);
3684   }
3685 
3686   Register except_flag = scratch;
3687   __ EmitFPUTruncate(kRoundToMinusInf,
3688                      result,
3689                      double_scratch0(),
3690                      at,
3691                      double_scratch1,
3692                      except_flag);
3693 
3694   DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3695                Operand(zero_reg));
3696 
3697   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3698     // Test for -0.
3699     __ Branch(&done, ne, result, Operand(zero_reg));
3700     __ bind(&check_sign_on_zero);
3701     __ mfhc1(scratch, input);  // Get exponent/sign bits.
3702     __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3703     DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
3704                  Operand(zero_reg));
3705   }
3706   __ bind(&done);
3707 }
3708 
3709 
DoMathFround(LMathFround * instr)3710 void LCodeGen::DoMathFround(LMathFround* instr) {
3711   DoubleRegister input = ToDoubleRegister(instr->value());
3712   DoubleRegister result = ToDoubleRegister(instr->result());
3713   __ cvt_s_d(result, input);
3714   __ cvt_d_s(result, result);
3715 }
3716 
3717 
DoMathSqrt(LMathSqrt * instr)3718 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3719   DoubleRegister input = ToDoubleRegister(instr->value());
3720   DoubleRegister result = ToDoubleRegister(instr->result());
3721   __ sqrt_d(result, input);
3722 }
3723 
3724 
DoMathPowHalf(LMathPowHalf * instr)3725 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3726   DoubleRegister input = ToDoubleRegister(instr->value());
3727   DoubleRegister result = ToDoubleRegister(instr->result());
3728   DoubleRegister temp = ToDoubleRegister(instr->temp());
3729 
3730   DCHECK(!input.is(result));
3731 
3732   // Note that according to ECMA-262 15.8.2.13:
3733   // Math.pow(-Infinity, 0.5) == Infinity
3734   // Math.sqrt(-Infinity) == NaN
3735   Label done;
3736   __ Move(temp, static_cast<double>(-V8_INFINITY));
3737   __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3738   // Set up Infinity in the delay slot.
3739   // result is overwritten if the branch is not taken.
3740   __ neg_d(result, temp);
3741 
3742   // Add +0 to convert -0 to +0.
3743   __ add_d(result, input, kDoubleRegZero);
3744   __ sqrt_d(result, result);
3745   __ bind(&done);
3746 }
3747 
3748 
DoPower(LPower * instr)3749 void LCodeGen::DoPower(LPower* instr) {
3750   Representation exponent_type = instr->hydrogen()->right()->representation();
3751   // Having marked this as a call, we can use any registers.
3752   // Just make sure that the input/output registers are the expected ones.
3753   Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3754   DCHECK(!instr->right()->IsDoubleRegister() ||
3755          ToDoubleRegister(instr->right()).is(f4));
3756   DCHECK(!instr->right()->IsRegister() ||
3757          ToRegister(instr->right()).is(tagged_exponent));
3758   DCHECK(ToDoubleRegister(instr->left()).is(f2));
3759   DCHECK(ToDoubleRegister(instr->result()).is(f0));
3760 
3761   if (exponent_type.IsSmi()) {
3762     MathPowStub stub(isolate(), MathPowStub::TAGGED);
3763     __ CallStub(&stub);
3764   } else if (exponent_type.IsTagged()) {
3765     Label no_deopt;
3766     __ JumpIfSmi(tagged_exponent, &no_deopt);
3767     DCHECK(!a7.is(tagged_exponent));
3768     __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3769     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3770     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, a7, Operand(at));
3771     __ bind(&no_deopt);
3772     MathPowStub stub(isolate(), MathPowStub::TAGGED);
3773     __ CallStub(&stub);
3774   } else if (exponent_type.IsInteger32()) {
3775     MathPowStub stub(isolate(), MathPowStub::INTEGER);
3776     __ CallStub(&stub);
3777   } else {
3778     DCHECK(exponent_type.IsDouble());
3779     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3780     __ CallStub(&stub);
3781   }
3782 }
3783 
3784 
DoMathExp(LMathExp * instr)3785 void LCodeGen::DoMathExp(LMathExp* instr) {
3786   DoubleRegister input = ToDoubleRegister(instr->value());
3787   DoubleRegister result = ToDoubleRegister(instr->result());
3788   DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3789   DoubleRegister double_scratch2 = double_scratch0();
3790   Register temp1 = ToRegister(instr->temp1());
3791   Register temp2 = ToRegister(instr->temp2());
3792 
3793   MathExpGenerator::EmitMathExp(
3794       masm(), input, result, double_scratch1, double_scratch2,
3795       temp1, temp2, scratch0());
3796 }
3797 
3798 
DoMathLog(LMathLog * instr)3799 void LCodeGen::DoMathLog(LMathLog* instr) {
3800   __ PrepareCallCFunction(0, 1, scratch0());
3801   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3802   __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3803                    0, 1);
3804   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3805 }
3806 
3807 
DoMathClz32(LMathClz32 * instr)3808 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3809   Register input = ToRegister(instr->value());
3810   Register result = ToRegister(instr->result());
3811   __ Clz(result, input);
3812 }
3813 
3814 
DoInvokeFunction(LInvokeFunction * instr)3815 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3816   DCHECK(ToRegister(instr->context()).is(cp));
3817   DCHECK(ToRegister(instr->function()).is(a1));
3818   DCHECK(instr->HasPointerMap());
3819 
3820   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3821   if (known_function.is_null()) {
3822     LPointerMap* pointers = instr->pointer_map();
3823     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3824     ParameterCount count(instr->arity());
3825     __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
3826   } else {
3827     CallKnownFunction(known_function,
3828                       instr->hydrogen()->formal_parameter_count(),
3829                       instr->arity(), instr);
3830   }
3831 }
3832 
3833 
DoCallWithDescriptor(LCallWithDescriptor * instr)3834 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3835   DCHECK(ToRegister(instr->result()).is(v0));
3836 
3837   if (instr->hydrogen()->IsTailCall()) {
3838     if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3839 
3840     if (instr->target()->IsConstantOperand()) {
3841       LConstantOperand* target = LConstantOperand::cast(instr->target());
3842       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3843       __ Jump(code, RelocInfo::CODE_TARGET);
3844     } else {
3845       DCHECK(instr->target()->IsRegister());
3846       Register target = ToRegister(instr->target());
3847       __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3848       __ Jump(target);
3849     }
3850   } else {
3851     LPointerMap* pointers = instr->pointer_map();
3852     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3853 
3854     if (instr->target()->IsConstantOperand()) {
3855       LConstantOperand* target = LConstantOperand::cast(instr->target());
3856       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3857       generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3858       __ Call(code, RelocInfo::CODE_TARGET);
3859     } else {
3860       DCHECK(instr->target()->IsRegister());
3861       Register target = ToRegister(instr->target());
3862       generator.BeforeCall(__ CallSize(target));
3863       __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3864       __ Call(target);
3865     }
3866     generator.AfterCall();
3867   }
3868 }
3869 
3870 
DoCallJSFunction(LCallJSFunction * instr)3871 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3872   DCHECK(ToRegister(instr->function()).is(a1));
3873   DCHECK(ToRegister(instr->result()).is(v0));
3874 
3875   // Change context.
3876   __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3877 
3878   // Always initialize new target and number of actual arguments.
3879   __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
3880   __ li(a0, Operand(instr->arity()));
3881 
3882   // Load the code entry address
3883   __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3884   __ Call(at);
3885 
3886   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3887 }
3888 
3889 
DoCallFunction(LCallFunction * instr)3890 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3891   DCHECK(ToRegister(instr->context()).is(cp));
3892   DCHECK(ToRegister(instr->function()).is(a1));
3893   DCHECK(ToRegister(instr->result()).is(v0));
3894 
3895   int arity = instr->arity();
3896   ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
3897   if (instr->hydrogen()->HasVectorAndSlot()) {
3898     Register slot_register = ToRegister(instr->temp_slot());
3899     Register vector_register = ToRegister(instr->temp_vector());
3900     DCHECK(slot_register.is(a3));
3901     DCHECK(vector_register.is(a2));
3902 
3903     AllowDeferredHandleDereference vector_structure_check;
3904     Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3905     int index = vector->GetIndex(instr->hydrogen()->slot());
3906 
3907     __ li(vector_register, vector);
3908     __ li(slot_register, Operand(Smi::FromInt(index)));
3909 
3910     Handle<Code> ic =
3911         CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
3912     CallCode(ic, RelocInfo::CODE_TARGET, instr);
3913   } else {
3914     __ li(a0, Operand(arity));
3915     CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
3916   }
3917 }
3918 
3919 
DoCallNewArray(LCallNewArray * instr)3920 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3921   DCHECK(ToRegister(instr->context()).is(cp));
3922   DCHECK(ToRegister(instr->constructor()).is(a1));
3923   DCHECK(ToRegister(instr->result()).is(v0));
3924 
3925   __ li(a0, Operand(instr->arity()));
3926   if (instr->arity() == 1) {
3927     // We only need the allocation site for the case we have a length argument.
3928     // The case may bail out to the runtime, which will determine the correct
3929     // elements kind with the site.
3930     __ li(a2, instr->hydrogen()->site());
3931   } else {
3932     __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3933   }
3934   ElementsKind kind = instr->hydrogen()->elements_kind();
3935   AllocationSiteOverrideMode override_mode =
3936       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3937           ? DISABLE_ALLOCATION_SITES
3938           : DONT_OVERRIDE;
3939 
3940   if (instr->arity() == 0) {
3941     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3942     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3943   } else if (instr->arity() == 1) {
3944     Label done;
3945     if (IsFastPackedElementsKind(kind)) {
3946       Label packed_case;
3947       // We might need a change here,
3948       // look at the first argument.
3949       __ ld(a5, MemOperand(sp, 0));
3950       __ Branch(&packed_case, eq, a5, Operand(zero_reg));
3951 
3952       ElementsKind holey_kind = GetHoleyElementsKind(kind);
3953       ArraySingleArgumentConstructorStub stub(isolate(),
3954                                               holey_kind,
3955                                               override_mode);
3956       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3957       __ jmp(&done);
3958       __ bind(&packed_case);
3959     }
3960 
3961     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3962     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3963     __ bind(&done);
3964   } else {
3965     ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
3966     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3967   }
3968 }
3969 
3970 
DoCallRuntime(LCallRuntime * instr)3971 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3972   CallRuntime(instr->function(), instr->arity(), instr);
3973 }
3974 
3975 
DoStoreCodeEntry(LStoreCodeEntry * instr)3976 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3977   Register function = ToRegister(instr->function());
3978   Register code_object = ToRegister(instr->code_object());
3979   __ Daddu(code_object, code_object,
3980           Operand(Code::kHeaderSize - kHeapObjectTag));
3981   __ sd(code_object,
3982         FieldMemOperand(function, JSFunction::kCodeEntryOffset));
3983 }
3984 
3985 
DoInnerAllocatedObject(LInnerAllocatedObject * instr)3986 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3987   Register result = ToRegister(instr->result());
3988   Register base = ToRegister(instr->base_object());
3989   if (instr->offset()->IsConstantOperand()) {
3990     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3991     __ Daddu(result, base, Operand(ToInteger32(offset)));
3992   } else {
3993     Register offset = ToRegister(instr->offset());
3994     __ Daddu(result, base, offset);
3995   }
3996 }
3997 
3998 
DoStoreNamedField(LStoreNamedField * instr)3999 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4000   Representation representation = instr->representation();
4001 
4002   Register object = ToRegister(instr->object());
4003   Register scratch2 = scratch1();
4004   Register scratch1 = scratch0();
4005 
4006   HObjectAccess access = instr->hydrogen()->access();
4007   int offset = access.offset();
4008   if (access.IsExternalMemory()) {
4009     Register value = ToRegister(instr->value());
4010     MemOperand operand = MemOperand(object, offset);
4011     __ Store(value, operand, representation);
4012     return;
4013   }
4014 
4015   __ AssertNotSmi(object);
4016 
4017   DCHECK(!representation.IsSmi() ||
4018          !instr->value()->IsConstantOperand() ||
4019          IsSmi(LConstantOperand::cast(instr->value())));
4020   if (!FLAG_unbox_double_fields && representation.IsDouble()) {
4021     DCHECK(access.IsInobject());
4022     DCHECK(!instr->hydrogen()->has_transition());
4023     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4024     DoubleRegister value = ToDoubleRegister(instr->value());
4025     __ sdc1(value, FieldMemOperand(object, offset));
4026     return;
4027   }
4028 
4029   if (instr->hydrogen()->has_transition()) {
4030     Handle<Map> transition = instr->hydrogen()->transition_map();
4031     AddDeprecationDependency(transition);
4032     __ li(scratch1, Operand(transition));
4033     __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4034     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4035       Register temp = ToRegister(instr->temp());
4036       // Update the write barrier for the map field.
4037       __ RecordWriteForMap(object,
4038                            scratch1,
4039                            temp,
4040                            GetRAState(),
4041                            kSaveFPRegs);
4042     }
4043   }
4044 
4045   // Do the store.
4046   Register destination = object;
4047   if (!access.IsInobject()) {
4048        destination = scratch1;
4049     __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
4050   }
4051 
4052   if (representation.IsSmi() && SmiValuesAre32Bits() &&
4053       instr->hydrogen()->value()->representation().IsInteger32()) {
4054     DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4055     if (FLAG_debug_code) {
4056       __ Load(scratch2, FieldMemOperand(destination, offset), representation);
4057       __ AssertSmi(scratch2);
4058     }
4059     // Store int value directly to upper half of the smi.
4060     offset = SmiWordOffset(offset);
4061     representation = Representation::Integer32();
4062   }
4063   MemOperand operand = FieldMemOperand(destination, offset);
4064 
4065   if (FLAG_unbox_double_fields && representation.IsDouble()) {
4066     DCHECK(access.IsInobject());
4067     DoubleRegister value = ToDoubleRegister(instr->value());
4068     __ sdc1(value, operand);
4069   } else {
4070     DCHECK(instr->value()->IsRegister());
4071     Register value = ToRegister(instr->value());
4072     __ Store(value, operand, representation);
4073   }
4074 
4075   if (instr->hydrogen()->NeedsWriteBarrier()) {
4076     // Update the write barrier for the object for in-object properties.
4077     Register value = ToRegister(instr->value());
4078     __ RecordWriteField(destination,
4079                         offset,
4080                         value,
4081                         scratch2,
4082                         GetRAState(),
4083                         kSaveFPRegs,
4084                         EMIT_REMEMBERED_SET,
4085                         instr->hydrogen()->SmiCheckForWriteBarrier(),
4086                         instr->hydrogen()->PointersToHereCheckForValue());
4087   }
4088 }
4089 
4090 
DoStoreNamedGeneric(LStoreNamedGeneric * instr)4091 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4092   DCHECK(ToRegister(instr->context()).is(cp));
4093   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4094   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4095 
4096   if (instr->hydrogen()->HasVectorAndSlot()) {
4097     EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4098   }
4099 
4100   __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
4101   Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4102                         isolate(), instr->language_mode(),
4103                         instr->hydrogen()->initialization_state()).code();
4104   CallCode(ic, RelocInfo::CODE_TARGET, instr);
4105 }
4106 
4107 
DoBoundsCheck(LBoundsCheck * instr)4108 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4109   Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4110   Operand operand((int64_t)0);
4111   Register reg;
4112   if (instr->index()->IsConstantOperand()) {
4113     operand = ToOperand(instr->index());
4114     reg = ToRegister(instr->length());
4115     cc = CommuteCondition(cc);
4116   } else {
4117     reg = ToRegister(instr->index());
4118     operand = ToOperand(instr->length());
4119   }
4120   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4121     Label done;
4122     __ Branch(&done, NegateCondition(cc), reg, operand);
4123     __ stop("eliminated bounds check failed");
4124     __ bind(&done);
4125   } else {
4126     DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
4127   }
4128 }
4129 
4130 
DoStoreKeyedExternalArray(LStoreKeyed * instr)4131 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4132   Register external_pointer = ToRegister(instr->elements());
4133   Register key = no_reg;
4134   ElementsKind elements_kind = instr->elements_kind();
4135   bool key_is_constant = instr->key()->IsConstantOperand();
4136   int constant_key = 0;
4137   if (key_is_constant) {
4138     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4139     if (constant_key & 0xF0000000) {
4140       Abort(kArrayIndexConstantValueTooBig);
4141     }
4142   } else {
4143     key = ToRegister(instr->key());
4144   }
4145   int element_size_shift = ElementsKindToShiftSize(elements_kind);
4146   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4147       ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4148       : element_size_shift;
4149   int base_offset = instr->base_offset();
4150 
4151   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
4152     Register address = scratch0();
4153     FPURegister value(ToDoubleRegister(instr->value()));
4154     if (key_is_constant) {
4155       if (constant_key != 0) {
4156         __ Daddu(address, external_pointer,
4157                 Operand(constant_key << element_size_shift));
4158       } else {
4159         address = external_pointer;
4160       }
4161     } else {
4162       if (shift_size < 0) {
4163         if (shift_size == -32) {
4164           __ dsra32(address, key, 0);
4165         } else {
4166           __ dsra(address, key, -shift_size);
4167         }
4168       } else {
4169         __ dsll(address, key, shift_size);
4170       }
4171       __ Daddu(address, external_pointer, address);
4172     }
4173 
4174     if (elements_kind == FLOAT32_ELEMENTS) {
4175       __ cvt_s_d(double_scratch0(), value);
4176       __ swc1(double_scratch0(), MemOperand(address, base_offset));
4177     } else {  // Storing doubles, not floats.
4178       __ sdc1(value, MemOperand(address, base_offset));
4179     }
4180   } else {
4181     Register value(ToRegister(instr->value()));
4182     MemOperand mem_operand = PrepareKeyedOperand(
4183         key, external_pointer, key_is_constant, constant_key,
4184         element_size_shift, shift_size,
4185         base_offset);
4186     switch (elements_kind) {
4187       case UINT8_ELEMENTS:
4188       case UINT8_CLAMPED_ELEMENTS:
4189       case INT8_ELEMENTS:
4190         __ sb(value, mem_operand);
4191         break;
4192       case INT16_ELEMENTS:
4193       case UINT16_ELEMENTS:
4194         __ sh(value, mem_operand);
4195         break;
4196       case INT32_ELEMENTS:
4197       case UINT32_ELEMENTS:
4198         __ sw(value, mem_operand);
4199         break;
4200       case FLOAT32_ELEMENTS:
4201       case FLOAT64_ELEMENTS:
4202       case FAST_DOUBLE_ELEMENTS:
4203       case FAST_ELEMENTS:
4204       case FAST_SMI_ELEMENTS:
4205       case FAST_HOLEY_DOUBLE_ELEMENTS:
4206       case FAST_HOLEY_ELEMENTS:
4207       case FAST_HOLEY_SMI_ELEMENTS:
4208       case DICTIONARY_ELEMENTS:
4209       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4210       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4211         UNREACHABLE();
4212         break;
4213     }
4214   }
4215 }
4216 
4217 
DoStoreKeyedFixedDoubleArray(LStoreKeyed * instr)4218 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4219   DoubleRegister value = ToDoubleRegister(instr->value());
4220   Register elements = ToRegister(instr->elements());
4221   Register scratch = scratch0();
4222   DoubleRegister double_scratch = double_scratch0();
4223   bool key_is_constant = instr->key()->IsConstantOperand();
4224   int base_offset = instr->base_offset();
4225   Label not_nan, done;
4226 
4227   // Calculate the effective address of the slot in the array to store the
4228   // double value.
4229   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4230   if (key_is_constant) {
4231     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4232     if (constant_key & 0xF0000000) {
4233       Abort(kArrayIndexConstantValueTooBig);
4234     }
4235     __ Daddu(scratch, elements,
4236              Operand((constant_key << element_size_shift) + base_offset));
4237   } else {
4238     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4239         ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4240         : element_size_shift;
4241     __ Daddu(scratch, elements, Operand(base_offset));
4242     DCHECK((shift_size == 3) || (shift_size == -29));
4243     if (shift_size == 3) {
4244       __ dsll(at, ToRegister(instr->key()), 3);
4245     } else if (shift_size == -29) {
4246       __ dsra(at, ToRegister(instr->key()), 29);
4247     }
4248     __ Daddu(scratch, scratch, at);
4249   }
4250 
4251   if (instr->NeedsCanonicalization()) {
4252     __ FPUCanonicalizeNaN(double_scratch, value);
4253     __ sdc1(double_scratch, MemOperand(scratch, 0));
4254   } else {
4255     __ sdc1(value, MemOperand(scratch, 0));
4256   }
4257 }
4258 
4259 
DoStoreKeyedFixedArray(LStoreKeyed * instr)4260 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4261   Register value = ToRegister(instr->value());
4262   Register elements = ToRegister(instr->elements());
4263   Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4264       : no_reg;
4265   Register scratch = scratch0();
4266   Register store_base = scratch;
4267   int offset = instr->base_offset();
4268 
4269   // Do the store.
4270   if (instr->key()->IsConstantOperand()) {
4271     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4272     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4273     offset += ToInteger32(const_operand) * kPointerSize;
4274     store_base = elements;
4275   } else {
4276     // Even though the HLoadKeyed instruction forces the input
4277     // representation for the key to be an integer, the input gets replaced
4278     // during bound check elimination with the index argument to the bounds
4279     // check, which can be tagged, so that case must be handled here, too.
4280     if (instr->hydrogen()->key()->representation().IsSmi()) {
4281       __ SmiScale(scratch, key, kPointerSizeLog2);
4282       __ daddu(store_base, elements, scratch);
4283     } else {
4284       __ dsll(scratch, key, kPointerSizeLog2);
4285       __ daddu(store_base, elements, scratch);
4286     }
4287   }
4288 
4289   Representation representation = instr->hydrogen()->value()->representation();
4290   if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4291     DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4292     DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
4293     if (FLAG_debug_code) {
4294       Register temp = scratch1();
4295       __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
4296       __ AssertSmi(temp);
4297     }
4298 
4299     // Store int value directly to upper half of the smi.
4300     STATIC_ASSERT(kSmiTag == 0);
4301     STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4302     offset = SmiWordOffset(offset);
4303     representation = Representation::Integer32();
4304   }
4305 
4306   __ Store(value, MemOperand(store_base, offset), representation);
4307 
4308   if (instr->hydrogen()->NeedsWriteBarrier()) {
4309     SmiCheck check_needed =
4310         instr->hydrogen()->value()->type().IsHeapObject()
4311             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4312     // Compute address of modified element and store it into key register.
4313     __ Daddu(key, store_base, Operand(offset));
4314     __ RecordWrite(elements,
4315                    key,
4316                    value,
4317                    GetRAState(),
4318                    kSaveFPRegs,
4319                    EMIT_REMEMBERED_SET,
4320                    check_needed,
4321                    instr->hydrogen()->PointersToHereCheckForValue());
4322   }
4323 }
4324 
4325 
DoStoreKeyed(LStoreKeyed * instr)4326 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4327   // By cases: external, fast double
4328   if (instr->is_fixed_typed_array()) {
4329     DoStoreKeyedExternalArray(instr);
4330   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4331     DoStoreKeyedFixedDoubleArray(instr);
4332   } else {
4333     DoStoreKeyedFixedArray(instr);
4334   }
4335 }
4336 
4337 
DoStoreKeyedGeneric(LStoreKeyedGeneric * instr)4338 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4339   DCHECK(ToRegister(instr->context()).is(cp));
4340   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4341   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4342   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4343 
4344   if (instr->hydrogen()->HasVectorAndSlot()) {
4345     EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4346   }
4347 
4348   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4349                         isolate(), instr->language_mode(),
4350                         instr->hydrogen()->initialization_state()).code();
4351   CallCode(ic, RelocInfo::CODE_TARGET, instr);
4352 }
4353 
4354 
DoMaybeGrowElements(LMaybeGrowElements * instr)4355 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4356   class DeferredMaybeGrowElements final : public LDeferredCode {
4357    public:
4358     DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4359         : LDeferredCode(codegen), instr_(instr) {}
4360     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4361     LInstruction* instr() override { return instr_; }
4362 
4363    private:
4364     LMaybeGrowElements* instr_;
4365   };
4366 
4367   Register result = v0;
4368   DeferredMaybeGrowElements* deferred =
4369       new (zone()) DeferredMaybeGrowElements(this, instr);
4370   LOperand* key = instr->key();
4371   LOperand* current_capacity = instr->current_capacity();
4372 
4373   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4374   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4375   DCHECK(key->IsConstantOperand() || key->IsRegister());
4376   DCHECK(current_capacity->IsConstantOperand() ||
4377          current_capacity->IsRegister());
4378 
4379   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4380     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4381     int32_t constant_capacity =
4382         ToInteger32(LConstantOperand::cast(current_capacity));
4383     if (constant_key >= constant_capacity) {
4384       // Deferred case.
4385       __ jmp(deferred->entry());
4386     }
4387   } else if (key->IsConstantOperand()) {
4388     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4389     __ Branch(deferred->entry(), le, ToRegister(current_capacity),
4390               Operand(constant_key));
4391   } else if (current_capacity->IsConstantOperand()) {
4392     int32_t constant_capacity =
4393         ToInteger32(LConstantOperand::cast(current_capacity));
4394     __ Branch(deferred->entry(), ge, ToRegister(key),
4395               Operand(constant_capacity));
4396   } else {
4397     __ Branch(deferred->entry(), ge, ToRegister(key),
4398               Operand(ToRegister(current_capacity)));
4399   }
4400 
4401   if (instr->elements()->IsRegister()) {
4402     __ mov(result, ToRegister(instr->elements()));
4403   } else {
4404     __ ld(result, ToMemOperand(instr->elements()));
4405   }
4406 
4407   __ bind(deferred->exit());
4408 }
4409 
4410 
DoDeferredMaybeGrowElements(LMaybeGrowElements * instr)4411 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4412   // TODO(3095996): Get rid of this. For now, we need to make the
4413   // result register contain a valid pointer because it is already
4414   // contained in the register pointer map.
4415   Register result = v0;
4416   __ mov(result, zero_reg);
4417 
4418   // We have to call a stub.
4419   {
4420     PushSafepointRegistersScope scope(this);
4421     if (instr->object()->IsRegister()) {
4422       __ mov(result, ToRegister(instr->object()));
4423     } else {
4424       __ ld(result, ToMemOperand(instr->object()));
4425     }
4426 
4427     LOperand* key = instr->key();
4428     if (key->IsConstantOperand()) {
4429       __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
4430     } else {
4431       __ mov(a3, ToRegister(key));
4432       __ SmiTag(a3);
4433     }
4434 
4435     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4436                                instr->hydrogen()->kind());
4437     __ mov(a0, result);
4438     __ CallStub(&stub);
4439     RecordSafepointWithLazyDeopt(
4440         instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4441     __ StoreToSafepointRegisterSlot(result, result);
4442   }
4443 
4444   // Deopt on smi, which means the elements array changed to dictionary mode.
4445   __ SmiTst(result, at);
4446   DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
4447 }
4448 
4449 
DoTransitionElementsKind(LTransitionElementsKind * instr)4450 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4451   Register object_reg = ToRegister(instr->object());
4452   Register scratch = scratch0();
4453 
4454   Handle<Map> from_map = instr->original_map();
4455   Handle<Map> to_map = instr->transitioned_map();
4456   ElementsKind from_kind = instr->from_kind();
4457   ElementsKind to_kind = instr->to_kind();
4458 
4459   Label not_applicable;
4460   __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4461   __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4462 
4463   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4464     Register new_map_reg = ToRegister(instr->new_map_temp());
4465     __ li(new_map_reg, Operand(to_map));
4466     __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4467     // Write barrier.
4468     __ RecordWriteForMap(object_reg,
4469                          new_map_reg,
4470                          scratch,
4471                          GetRAState(),
4472                          kDontSaveFPRegs);
4473   } else {
4474     DCHECK(object_reg.is(a0));
4475     DCHECK(ToRegister(instr->context()).is(cp));
4476     PushSafepointRegistersScope scope(this);
4477     __ li(a1, Operand(to_map));
4478     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4479     TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4480     __ CallStub(&stub);
4481     RecordSafepointWithRegisters(
4482         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4483   }
4484   __ bind(&not_applicable);
4485 }
4486 
4487 
DoTrapAllocationMemento(LTrapAllocationMemento * instr)4488 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4489   Register object = ToRegister(instr->object());
4490   Register temp = ToRegister(instr->temp());
4491   Label no_memento_found;
4492   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
4493                                      ne, &no_memento_found);
4494   DeoptimizeIf(al, instr, Deoptimizer::kMementoFound);
4495   __ bind(&no_memento_found);
4496 }
4497 
4498 
DoStringAdd(LStringAdd * instr)4499 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4500   DCHECK(ToRegister(instr->context()).is(cp));
4501   DCHECK(ToRegister(instr->left()).is(a1));
4502   DCHECK(ToRegister(instr->right()).is(a0));
4503   StringAddStub stub(isolate(),
4504                      instr->hydrogen()->flags(),
4505                      instr->hydrogen()->pretenure_flag());
4506   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4507 }
4508 
4509 
DoStringCharCodeAt(LStringCharCodeAt * instr)4510 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4511   class DeferredStringCharCodeAt final : public LDeferredCode {
4512    public:
4513     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4514         : LDeferredCode(codegen), instr_(instr) { }
4515     void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4516     LInstruction* instr() override { return instr_; }
4517 
4518    private:
4519     LStringCharCodeAt* instr_;
4520   };
4521 
4522   DeferredStringCharCodeAt* deferred =
4523       new(zone()) DeferredStringCharCodeAt(this, instr);
4524   StringCharLoadGenerator::Generate(masm(),
4525                                     ToRegister(instr->string()),
4526                                     ToRegister(instr->index()),
4527                                     ToRegister(instr->result()),
4528                                     deferred->entry());
4529   __ bind(deferred->exit());
4530 }
4531 
4532 
DoDeferredStringCharCodeAt(LStringCharCodeAt * instr)4533 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4534   Register string = ToRegister(instr->string());
4535   Register result = ToRegister(instr->result());
4536   Register scratch = scratch0();
4537 
4538   // TODO(3095996): Get rid of this. For now, we need to make the
4539   // result register contain a valid pointer because it is already
4540   // contained in the register pointer map.
4541   __ mov(result, zero_reg);
4542 
4543   PushSafepointRegistersScope scope(this);
4544   __ push(string);
4545   // Push the index as a smi. This is safe because of the checks in
4546   // DoStringCharCodeAt above.
4547   if (instr->index()->IsConstantOperand()) {
4548     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4549     __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4550     __ push(scratch);
4551   } else {
4552     Register index = ToRegister(instr->index());
4553     __ SmiTag(index);
4554     __ push(index);
4555   }
4556   CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4557                           instr->context());
4558   __ AssertSmi(v0);
4559   __ SmiUntag(v0);
4560   __ StoreToSafepointRegisterSlot(v0, result);
4561 }
4562 
4563 
DoStringCharFromCode(LStringCharFromCode * instr)4564 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4565   class DeferredStringCharFromCode final : public LDeferredCode {
4566    public:
4567     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4568         : LDeferredCode(codegen), instr_(instr) { }
4569     void Generate() override {
4570       codegen()->DoDeferredStringCharFromCode(instr_);
4571     }
4572     LInstruction* instr() override { return instr_; }
4573 
4574    private:
4575     LStringCharFromCode* instr_;
4576   };
4577 
4578   DeferredStringCharFromCode* deferred =
4579       new(zone()) DeferredStringCharFromCode(this, instr);
4580 
4581   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4582   Register char_code = ToRegister(instr->char_code());
4583   Register result = ToRegister(instr->result());
4584   Register scratch = scratch0();
4585   DCHECK(!char_code.is(result));
4586 
4587   __ Branch(deferred->entry(), hi,
4588             char_code, Operand(String::kMaxOneByteCharCode));
4589   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4590   __ dsll(scratch, char_code, kPointerSizeLog2);
4591   __ Daddu(result, result, scratch);
4592   __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4593   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4594   __ Branch(deferred->entry(), eq, result, Operand(scratch));
4595   __ bind(deferred->exit());
4596 }
4597 
4598 
DoDeferredStringCharFromCode(LStringCharFromCode * instr)4599 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4600   Register char_code = ToRegister(instr->char_code());
4601   Register result = ToRegister(instr->result());
4602 
4603   // TODO(3095996): Get rid of this. For now, we need to make the
4604   // result register contain a valid pointer because it is already
4605   // contained in the register pointer map.
4606   __ mov(result, zero_reg);
4607 
4608   PushSafepointRegistersScope scope(this);
4609   __ SmiTag(char_code);
4610   __ push(char_code);
4611   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4612                           instr->context());
4613   __ StoreToSafepointRegisterSlot(v0, result);
4614 }
4615 
4616 
DoInteger32ToDouble(LInteger32ToDouble * instr)4617 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4618   LOperand* input = instr->value();
4619   DCHECK(input->IsRegister() || input->IsStackSlot());
4620   LOperand* output = instr->result();
4621   DCHECK(output->IsDoubleRegister());
4622   FPURegister single_scratch = double_scratch0().low();
4623   if (input->IsStackSlot()) {
4624     Register scratch = scratch0();
4625     __ ld(scratch, ToMemOperand(input));
4626     __ mtc1(scratch, single_scratch);
4627   } else {
4628     __ mtc1(ToRegister(input), single_scratch);
4629   }
4630   __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4631 }
4632 
4633 
DoUint32ToDouble(LUint32ToDouble * instr)4634 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4635   LOperand* input = instr->value();
4636   LOperand* output = instr->result();
4637 
4638   FPURegister dbl_scratch = double_scratch0();
4639   __ mtc1(ToRegister(input), dbl_scratch);
4640   __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch);
4641 }
4642 
4643 
DoNumberTagU(LNumberTagU * instr)4644 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4645   class DeferredNumberTagU final : public LDeferredCode {
4646    public:
4647     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4648         : LDeferredCode(codegen), instr_(instr) { }
4649     void Generate() override {
4650       codegen()->DoDeferredNumberTagIU(instr_,
4651                                        instr_->value(),
4652                                        instr_->temp1(),
4653                                        instr_->temp2(),
4654                                        UNSIGNED_INT32);
4655     }
4656     LInstruction* instr() override { return instr_; }
4657 
4658    private:
4659     LNumberTagU* instr_;
4660   };
4661 
4662   Register input = ToRegister(instr->value());
4663   Register result = ToRegister(instr->result());
4664 
4665   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4666   __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4667   __ SmiTag(result, input);
4668   __ bind(deferred->exit());
4669 }
4670 
4671 
DoDeferredNumberTagIU(LInstruction * instr,LOperand * value,LOperand * temp1,LOperand * temp2,IntegerSignedness signedness)4672 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4673                                      LOperand* value,
4674                                      LOperand* temp1,
4675                                      LOperand* temp2,
4676                                      IntegerSignedness signedness) {
4677   Label done, slow;
4678   Register src = ToRegister(value);
4679   Register dst = ToRegister(instr->result());
4680   Register tmp1 = scratch0();
4681   Register tmp2 = ToRegister(temp1);
4682   Register tmp3 = ToRegister(temp2);
4683   DoubleRegister dbl_scratch = double_scratch0();
4684 
4685   if (signedness == SIGNED_INT32) {
4686     // There was overflow, so bits 30 and 31 of the original integer
4687     // disagree. Try to allocate a heap number in new space and store
4688     // the value in there. If that fails, call the runtime system.
4689     if (dst.is(src)) {
4690       __ SmiUntag(src, dst);
4691       __ Xor(src, src, Operand(0x80000000));
4692     }
4693     __ mtc1(src, dbl_scratch);
4694     __ cvt_d_w(dbl_scratch, dbl_scratch);
4695   } else {
4696     __ mtc1(src, dbl_scratch);
4697     __ Cvt_d_uw(dbl_scratch, dbl_scratch);
4698   }
4699 
4700   if (FLAG_inline_new) {
4701     __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4702     __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, TAG_RESULT);
4703     __ Branch(&done);
4704   }
4705 
4706   // Slow case: Call the runtime system to do the number allocation.
4707   __ bind(&slow);
4708   {
4709     // TODO(3095996): Put a valid pointer value in the stack slot where the
4710     // result register is stored, as this register is in the pointer map, but
4711     // contains an integer value.
4712     __ mov(dst, zero_reg);
4713     // Preserve the value of all registers.
4714     PushSafepointRegistersScope scope(this);
4715 
4716     // NumberTagI and NumberTagD use the context from the frame, rather than
4717     // the environment's HContext or HInlinedContext value.
4718     // They only call Runtime::kAllocateHeapNumber.
4719     // The corresponding HChange instructions are added in a phase that does
4720     // not have easy access to the local context.
4721     __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4722     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4723     RecordSafepointWithRegisters(
4724         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4725     __ StoreToSafepointRegisterSlot(v0, dst);
4726   }
4727 
4728   // Done. Put the value in dbl_scratch into the value of the allocated heap
4729   // number.
4730   __ bind(&done);
4731   __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4732 }
4733 
4734 
DoNumberTagD(LNumberTagD * instr)4735 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4736   class DeferredNumberTagD final : public LDeferredCode {
4737    public:
4738     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4739         : LDeferredCode(codegen), instr_(instr) { }
4740     void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4741     LInstruction* instr() override { return instr_; }
4742 
4743    private:
4744     LNumberTagD* instr_;
4745   };
4746 
4747   DoubleRegister input_reg = ToDoubleRegister(instr->value());
4748   Register scratch = scratch0();
4749   Register reg = ToRegister(instr->result());
4750   Register temp1 = ToRegister(instr->temp());
4751   Register temp2 = ToRegister(instr->temp2());
4752 
4753   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4754   if (FLAG_inline_new) {
4755     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4756     // We want the untagged address first for performance
4757     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4758                           DONT_TAG_RESULT);
4759   } else {
4760     __ Branch(deferred->entry());
4761   }
4762   __ bind(deferred->exit());
4763   __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4764   // Now that we have finished with the object's real address tag it
4765   __ Daddu(reg, reg, kHeapObjectTag);
4766 }
4767 
4768 
DoDeferredNumberTagD(LNumberTagD * instr)4769 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4770   // TODO(3095996): Get rid of this. For now, we need to make the
4771   // result register contain a valid pointer because it is already
4772   // contained in the register pointer map.
4773   Register reg = ToRegister(instr->result());
4774   __ mov(reg, zero_reg);
4775 
4776   PushSafepointRegistersScope scope(this);
4777   // NumberTagI and NumberTagD use the context from the frame, rather than
4778   // the environment's HContext or HInlinedContext value.
4779   // They only call Runtime::kAllocateHeapNumber.
4780   // The corresponding HChange instructions are added in a phase that does
4781   // not have easy access to the local context.
4782   __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4783   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4784   RecordSafepointWithRegisters(
4785       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4786   __ Dsubu(v0, v0, kHeapObjectTag);
4787   __ StoreToSafepointRegisterSlot(v0, reg);
4788 }
4789 
4790 
DoSmiTag(LSmiTag * instr)4791 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4792   HChange* hchange = instr->hydrogen();
4793   Register input = ToRegister(instr->value());
4794   Register output = ToRegister(instr->result());
4795   if (hchange->CheckFlag(HValue::kCanOverflow) &&
4796       hchange->value()->CheckFlag(HValue::kUint32)) {
4797     __ And(at, input, Operand(0x80000000));
4798     DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4799   }
4800   if (hchange->CheckFlag(HValue::kCanOverflow) &&
4801       !hchange->value()->CheckFlag(HValue::kUint32)) {
4802     __ SmiTagCheckOverflow(output, input, at);
4803     DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4804   } else {
4805     __ SmiTag(output, input);
4806   }
4807 }
4808 
4809 
DoSmiUntag(LSmiUntag * instr)4810 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4811   Register scratch = scratch0();
4812   Register input = ToRegister(instr->value());
4813   Register result = ToRegister(instr->result());
4814   if (instr->needs_check()) {
4815     STATIC_ASSERT(kHeapObjectTag == 1);
4816     // If the input is a HeapObject, value of scratch won't be zero.
4817     __ And(scratch, input, Operand(kHeapObjectTag));
4818     __ SmiUntag(result, input);
4819     DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
4820   } else {
4821     __ SmiUntag(result, input);
4822   }
4823 }
4824 
4825 
EmitNumberUntagD(LNumberUntagD * instr,Register input_reg,DoubleRegister result_reg,NumberUntagDMode mode)4826 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4827                                 DoubleRegister result_reg,
4828                                 NumberUntagDMode mode) {
4829   bool can_convert_undefined_to_nan =
4830       instr->hydrogen()->can_convert_undefined_to_nan();
4831   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4832 
4833   Register scratch = scratch0();
4834   Label convert, load_smi, done;
4835   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4836     // Smi check.
4837     __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4838     // Heap number map check.
4839     __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4840     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4841     if (can_convert_undefined_to_nan) {
4842       __ Branch(&convert, ne, scratch, Operand(at));
4843     } else {
4844       DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
4845                    Operand(at));
4846     }
4847     // Load heap number.
4848     __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4849     if (deoptimize_on_minus_zero) {
4850       __ mfc1(at, result_reg);
4851       __ Branch(&done, ne, at, Operand(zero_reg));
4852       __ mfhc1(scratch, result_reg);  // Get exponent/sign bits.
4853       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
4854                    Operand(HeapNumber::kSignMask));
4855     }
4856     __ Branch(&done);
4857     if (can_convert_undefined_to_nan) {
4858       __ bind(&convert);
4859       // Convert undefined (and hole) to NaN.
4860       __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4861       DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
4862                    Operand(at));
4863       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4864       __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4865       __ Branch(&done);
4866     }
4867   } else {
4868     __ SmiUntag(scratch, input_reg);
4869     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4870   }
4871   // Smi to double register conversion
4872   __ bind(&load_smi);
4873   // scratch: untagged value of input_reg
4874   __ mtc1(scratch, result_reg);
4875   __ cvt_d_w(result_reg, result_reg);
4876   __ bind(&done);
4877 }
4878 
4879 
DoDeferredTaggedToI(LTaggedToI * instr)4880 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4881   Register input_reg = ToRegister(instr->value());
4882   Register scratch1 = scratch0();
4883   Register scratch2 = ToRegister(instr->temp());
4884   DoubleRegister double_scratch = double_scratch0();
4885   DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4886 
4887   DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4888   DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4889 
4890   Label done;
4891 
4892   // The input is a tagged HeapObject.
4893   // Heap number map check.
4894   __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4895   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4896   // This 'at' value and scratch1 map value are used for tests in both clauses
4897   // of the if.
4898 
4899   if (instr->truncating()) {
4900     // Performs a truncating conversion of a floating point number as used by
4901     // the JS bitwise operations.
4902     Label no_heap_number, check_bools, check_false;
4903     // Check HeapNumber map.
4904     __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
4905     __ mov(scratch2, input_reg);  // In delay slot.
4906     __ TruncateHeapNumberToI(input_reg, scratch2);
4907     __ Branch(&done);
4908 
4909     // Check for Oddballs. Undefined/False is converted to zero and True to one
4910     // for truncating conversions.
4911     __ bind(&no_heap_number);
4912     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4913     __ Branch(&check_bools, ne, input_reg, Operand(at));
4914     DCHECK(ToRegister(instr->result()).is(input_reg));
4915     __ Branch(USE_DELAY_SLOT, &done);
4916     __ mov(input_reg, zero_reg);  // In delay slot.
4917 
4918     __ bind(&check_bools);
4919     __ LoadRoot(at, Heap::kTrueValueRootIndex);
4920     __ Branch(&check_false, ne, scratch2, Operand(at));
4921     __ Branch(USE_DELAY_SLOT, &done);
4922     __ li(input_reg, Operand(1));  // In delay slot.
4923 
4924     __ bind(&check_false);
4925     __ LoadRoot(at, Heap::kFalseValueRootIndex);
4926     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
4927                  scratch2, Operand(at));
4928     __ Branch(USE_DELAY_SLOT, &done);
4929     __ mov(input_reg, zero_reg);  // In delay slot.
4930   } else {
4931     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
4932                  Operand(at));
4933 
4934     // Load the double value.
4935     __ ldc1(double_scratch,
4936             FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4937 
4938     Register except_flag = scratch2;
4939     __ EmitFPUTruncate(kRoundToZero,
4940                        input_reg,
4941                        double_scratch,
4942                        scratch1,
4943                        double_scratch2,
4944                        except_flag,
4945                        kCheckForInexactConversion);
4946 
4947     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4948                  Operand(zero_reg));
4949 
4950     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4951       __ Branch(&done, ne, input_reg, Operand(zero_reg));
4952 
4953       __ mfhc1(scratch1, double_scratch);  // Get exponent/sign bits.
4954       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4955       DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
4956                    Operand(zero_reg));
4957     }
4958   }
4959   __ bind(&done);
4960 }
4961 
4962 
DoTaggedToI(LTaggedToI * instr)4963 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4964   class DeferredTaggedToI final : public LDeferredCode {
4965    public:
4966     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4967         : LDeferredCode(codegen), instr_(instr) { }
4968     void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4969     LInstruction* instr() override { return instr_; }
4970 
4971    private:
4972     LTaggedToI* instr_;
4973   };
4974 
4975   LOperand* input = instr->value();
4976   DCHECK(input->IsRegister());
4977   DCHECK(input->Equals(instr->result()));
4978 
4979   Register input_reg = ToRegister(input);
4980 
4981   if (instr->hydrogen()->value()->representation().IsSmi()) {
4982     __ SmiUntag(input_reg);
4983   } else {
4984     DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4985 
4986     // Let the deferred code handle the HeapObject case.
4987     __ JumpIfNotSmi(input_reg, deferred->entry());
4988 
4989     // Smi to int32 conversion.
4990     __ SmiUntag(input_reg);
4991     __ bind(deferred->exit());
4992   }
4993 }
4994 
4995 
DoNumberUntagD(LNumberUntagD * instr)4996 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4997   LOperand* input = instr->value();
4998   DCHECK(input->IsRegister());
4999   LOperand* result = instr->result();
5000   DCHECK(result->IsDoubleRegister());
5001 
5002   Register input_reg = ToRegister(input);
5003   DoubleRegister result_reg = ToDoubleRegister(result);
5004 
5005   HValue* value = instr->hydrogen()->value();
5006   NumberUntagDMode mode = value->representation().IsSmi()
5007       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5008 
5009   EmitNumberUntagD(instr, input_reg, result_reg, mode);
5010 }
5011 
5012 
DoDoubleToI(LDoubleToI * instr)5013 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5014   Register result_reg = ToRegister(instr->result());
5015   Register scratch1 = scratch0();
5016   DoubleRegister double_input = ToDoubleRegister(instr->value());
5017 
5018   if (instr->truncating()) {
5019     __ TruncateDoubleToI(result_reg, double_input);
5020   } else {
5021     Register except_flag = LCodeGen::scratch1();
5022 
5023     __ EmitFPUTruncate(kRoundToMinusInf,
5024                        result_reg,
5025                        double_input,
5026                        scratch1,
5027                        double_scratch0(),
5028                        except_flag,
5029                        kCheckForInexactConversion);
5030 
5031     // Deopt if the operation did not succeed (except_flag != 0).
5032     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
5033                  Operand(zero_reg));
5034 
5035     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5036       Label done;
5037       __ Branch(&done, ne, result_reg, Operand(zero_reg));
5038       __ mfhc1(scratch1, double_input);  // Get exponent/sign bits.
5039       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5040       DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
5041                    Operand(zero_reg));
5042       __ bind(&done);
5043     }
5044   }
5045 }
5046 
5047 
DoDoubleToSmi(LDoubleToSmi * instr)5048 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5049   Register result_reg = ToRegister(instr->result());
5050   Register scratch1 = LCodeGen::scratch0();
5051   DoubleRegister double_input = ToDoubleRegister(instr->value());
5052 
5053   if (instr->truncating()) {
5054     __ TruncateDoubleToI(result_reg, double_input);
5055   } else {
5056     Register except_flag = LCodeGen::scratch1();
5057 
5058     __ EmitFPUTruncate(kRoundToMinusInf,
5059                        result_reg,
5060                        double_input,
5061                        scratch1,
5062                        double_scratch0(),
5063                        except_flag,
5064                        kCheckForInexactConversion);
5065 
5066     // Deopt if the operation did not succeed (except_flag != 0).
5067     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
5068                  Operand(zero_reg));
5069 
5070     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5071       Label done;
5072       __ Branch(&done, ne, result_reg, Operand(zero_reg));
5073       __ mfhc1(scratch1, double_input);  // Get exponent/sign bits.
5074       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5075       DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
5076                    Operand(zero_reg));
5077       __ bind(&done);
5078     }
5079   }
5080   __ SmiTag(result_reg, result_reg);
5081 }
5082 
5083 
DoCheckSmi(LCheckSmi * instr)5084 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5085   LOperand* input = instr->value();
5086   __ SmiTst(ToRegister(input), at);
5087   DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
5088 }
5089 
5090 
DoCheckNonSmi(LCheckNonSmi * instr)5091 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5092   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5093     LOperand* input = instr->value();
5094     __ SmiTst(ToRegister(input), at);
5095     DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
5096   }
5097 }
5098 
5099 
DoCheckArrayBufferNotNeutered(LCheckArrayBufferNotNeutered * instr)5100 void LCodeGen::DoCheckArrayBufferNotNeutered(
5101     LCheckArrayBufferNotNeutered* instr) {
5102   Register view = ToRegister(instr->view());
5103   Register scratch = scratch0();
5104 
5105   __ ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
5106   __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
5107   __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
5108   DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
5109 }
5110 
5111 
DoCheckInstanceType(LCheckInstanceType * instr)5112 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5113   Register input = ToRegister(instr->value());
5114   Register scratch = scratch0();
5115 
5116   __ GetObjectType(input, scratch, scratch);
5117 
5118   if (instr->hydrogen()->is_interval_check()) {
5119     InstanceType first;
5120     InstanceType last;
5121     instr->hydrogen()->GetCheckInterval(&first, &last);
5122 
5123     // If there is only one type in the interval check for equality.
5124     if (first == last) {
5125       DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
5126                    Operand(first));
5127     } else {
5128       DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
5129                    Operand(first));
5130       // Omit check for the last type.
5131       if (last != LAST_TYPE) {
5132         DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
5133                      Operand(last));
5134       }
5135     }
5136   } else {
5137     uint8_t mask;
5138     uint8_t tag;
5139     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5140 
5141     if (base::bits::IsPowerOfTwo32(mask)) {
5142       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5143       __ And(at, scratch, mask);
5144       DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
5145                    at, Operand(zero_reg));
5146     } else {
5147       __ And(scratch, scratch, Operand(mask));
5148       DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
5149                    Operand(tag));
5150     }
5151   }
5152 }
5153 
5154 
DoCheckValue(LCheckValue * instr)5155 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5156   Register reg = ToRegister(instr->value());
5157   Handle<HeapObject> object = instr->hydrogen()->object().handle();
5158   AllowDeferredHandleDereference smi_check;
5159   if (isolate()->heap()->InNewSpace(*object)) {
5160     Register reg = ToRegister(instr->value());
5161     Handle<Cell> cell = isolate()->factory()->NewCell(object);
5162     __ li(at, Operand(cell));
5163     __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
5164     DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
5165   } else {
5166     DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
5167   }
5168 }
5169 
5170 
DoDeferredInstanceMigration(LCheckMaps * instr,Register object)5171 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5172   {
5173     PushSafepointRegistersScope scope(this);
5174     __ push(object);
5175     __ mov(cp, zero_reg);
5176     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5177     RecordSafepointWithRegisters(
5178         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5179     __ StoreToSafepointRegisterSlot(v0, scratch0());
5180   }
5181   __ SmiTst(scratch0(), at);
5182   DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
5183                Operand(zero_reg));
5184 }
5185 
5186 
DoCheckMaps(LCheckMaps * instr)5187 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5188   class DeferredCheckMaps final : public LDeferredCode {
5189    public:
5190     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5191         : LDeferredCode(codegen), instr_(instr), object_(object) {
5192       SetExit(check_maps());
5193     }
5194     void Generate() override {
5195       codegen()->DoDeferredInstanceMigration(instr_, object_);
5196     }
5197     Label* check_maps() { return &check_maps_; }
5198     LInstruction* instr() override { return instr_; }
5199 
5200    private:
5201     LCheckMaps* instr_;
5202     Label check_maps_;
5203     Register object_;
5204   };
5205 
5206   if (instr->hydrogen()->IsStabilityCheck()) {
5207     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5208     for (int i = 0; i < maps->size(); ++i) {
5209       AddStabilityDependency(maps->at(i).handle());
5210     }
5211     return;
5212   }
5213 
5214   Register map_reg = scratch0();
5215   LOperand* input = instr->value();
5216   DCHECK(input->IsRegister());
5217   Register reg = ToRegister(input);
5218   __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5219 
5220   DeferredCheckMaps* deferred = NULL;
5221   if (instr->hydrogen()->HasMigrationTarget()) {
5222     deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5223     __ bind(deferred->check_maps());
5224   }
5225 
5226   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5227   Label success;
5228   for (int i = 0; i < maps->size() - 1; i++) {
5229     Handle<Map> map = maps->at(i).handle();
5230     __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5231   }
5232   Handle<Map> map = maps->at(maps->size() - 1).handle();
5233   // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5234   if (instr->hydrogen()->HasMigrationTarget()) {
5235     __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5236   } else {
5237     DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
5238   }
5239 
5240   __ bind(&success);
5241 }
5242 
5243 
DoClampDToUint8(LClampDToUint8 * instr)5244 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5245   DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5246   Register result_reg = ToRegister(instr->result());
5247   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5248   __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5249 }
5250 
5251 
DoClampIToUint8(LClampIToUint8 * instr)5252 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5253   Register unclamped_reg = ToRegister(instr->unclamped());
5254   Register result_reg = ToRegister(instr->result());
5255   __ ClampUint8(result_reg, unclamped_reg);
5256 }
5257 
5258 
DoClampTToUint8(LClampTToUint8 * instr)5259 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5260   Register scratch = scratch0();
5261   Register input_reg = ToRegister(instr->unclamped());
5262   Register result_reg = ToRegister(instr->result());
5263   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5264   Label is_smi, done, heap_number;
5265 
5266   // Both smi and heap number cases are handled.
5267   __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5268 
5269   // Check for heap number
5270   __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5271   __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5272 
5273   // Check for undefined. Undefined is converted to zero for clamping
5274   // conversions.
5275   DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
5276                Operand(factory()->undefined_value()));
5277   __ mov(result_reg, zero_reg);
5278   __ jmp(&done);
5279 
5280   // Heap number
5281   __ bind(&heap_number);
5282   __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5283                                              HeapNumber::kValueOffset));
5284   __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5285   __ jmp(&done);
5286 
5287   __ bind(&is_smi);
5288   __ ClampUint8(result_reg, scratch);
5289 
5290   __ bind(&done);
5291 }
5292 
5293 
DoDoubleBits(LDoubleBits * instr)5294 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5295   DoubleRegister value_reg = ToDoubleRegister(instr->value());
5296   Register result_reg = ToRegister(instr->result());
5297   if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5298     __ FmoveHigh(result_reg, value_reg);
5299   } else {
5300     __ FmoveLow(result_reg, value_reg);
5301   }
5302 }
5303 
5304 
DoConstructDouble(LConstructDouble * instr)5305 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5306   Register hi_reg = ToRegister(instr->hi());
5307   Register lo_reg = ToRegister(instr->lo());
5308   DoubleRegister result_reg = ToDoubleRegister(instr->result());
5309   __ Move(result_reg, lo_reg, hi_reg);
5310 }
5311 
5312 
DoAllocate(LAllocate * instr)5313 void LCodeGen::DoAllocate(LAllocate* instr) {
5314   class DeferredAllocate final : public LDeferredCode {
5315    public:
5316     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5317         : LDeferredCode(codegen), instr_(instr) { }
5318     void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5319     LInstruction* instr() override { return instr_; }
5320 
5321    private:
5322     LAllocate* instr_;
5323   };
5324 
5325   DeferredAllocate* deferred =
5326       new(zone()) DeferredAllocate(this, instr);
5327 
5328   Register result = ToRegister(instr->result());
5329   Register scratch = ToRegister(instr->temp1());
5330   Register scratch2 = ToRegister(instr->temp2());
5331 
5332   // Allocate memory for the object.
5333   AllocationFlags flags = TAG_OBJECT;
5334   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5335     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5336   }
5337   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5338     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5339     flags = static_cast<AllocationFlags>(flags | PRETENURE);
5340   }
5341   if (instr->size()->IsConstantOperand()) {
5342     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5343     CHECK(size <= Page::kMaxRegularHeapObjectSize);
5344     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5345   } else {
5346     Register size = ToRegister(instr->size());
5347     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5348   }
5349 
5350   __ bind(deferred->exit());
5351 
5352   if (instr->hydrogen()->MustPrefillWithFiller()) {
5353     STATIC_ASSERT(kHeapObjectTag == 1);
5354     if (instr->size()->IsConstantOperand()) {
5355       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5356       __ li(scratch, Operand(size - kHeapObjectTag));
5357     } else {
5358       __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5359     }
5360     __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5361     Label loop;
5362     __ bind(&loop);
5363     __ Dsubu(scratch, scratch, Operand(kPointerSize));
5364     __ Daddu(at, result, Operand(scratch));
5365     __ sd(scratch2, MemOperand(at));
5366     __ Branch(&loop, ge, scratch, Operand(zero_reg));
5367   }
5368 }
5369 
5370 
DoDeferredAllocate(LAllocate * instr)5371 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5372   Register result = ToRegister(instr->result());
5373 
5374   // TODO(3095996): Get rid of this. For now, we need to make the
5375   // result register contain a valid pointer because it is already
5376   // contained in the register pointer map.
5377   __ mov(result, zero_reg);
5378 
5379   PushSafepointRegistersScope scope(this);
5380   if (instr->size()->IsRegister()) {
5381     Register size = ToRegister(instr->size());
5382     DCHECK(!size.is(result));
5383     __ SmiTag(size);
5384     __ push(size);
5385   } else {
5386     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5387     if (size >= 0 && size <= Smi::kMaxValue) {
5388       __ li(v0, Operand(Smi::FromInt(size)));
5389       __ Push(v0);
5390     } else {
5391       // We should never get here at runtime => abort
5392       __ stop("invalid allocation size");
5393       return;
5394     }
5395   }
5396 
5397   int flags = AllocateDoubleAlignFlag::encode(
5398       instr->hydrogen()->MustAllocateDoubleAligned());
5399   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5400     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5401     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5402   } else {
5403     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5404   }
5405   __ li(v0, Operand(Smi::FromInt(flags)));
5406   __ Push(v0);
5407 
5408   CallRuntimeFromDeferred(
5409       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5410   __ StoreToSafepointRegisterSlot(v0, result);
5411 }
5412 
5413 
DoToFastProperties(LToFastProperties * instr)5414 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5415   DCHECK(ToRegister(instr->value()).is(a0));
5416   DCHECK(ToRegister(instr->result()).is(v0));
5417   __ push(a0);
5418   CallRuntime(Runtime::kToFastProperties, 1, instr);
5419 }
5420 
5421 
DoTypeof(LTypeof * instr)5422 void LCodeGen::DoTypeof(LTypeof* instr) {
5423   DCHECK(ToRegister(instr->value()).is(a3));
5424   DCHECK(ToRegister(instr->result()).is(v0));
5425   Label end, do_call;
5426   Register value_register = ToRegister(instr->value());
5427   __ JumpIfNotSmi(value_register, &do_call);
5428   __ li(v0, Operand(isolate()->factory()->number_string()));
5429   __ jmp(&end);
5430   __ bind(&do_call);
5431   TypeofStub stub(isolate());
5432   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5433   __ bind(&end);
5434 }
5435 
5436 
DoTypeofIsAndBranch(LTypeofIsAndBranch * instr)5437 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5438   Register input = ToRegister(instr->value());
5439 
5440   Register cmp1 = no_reg;
5441   Operand cmp2 = Operand(no_reg);
5442 
5443   Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5444                                                   instr->FalseLabel(chunk_),
5445                                                   input,
5446                                                   instr->type_literal(),
5447                                                   &cmp1,
5448                                                   &cmp2);
5449 
5450   DCHECK(cmp1.is_valid());
5451   DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5452 
5453   if (final_branch_condition != kNoCondition) {
5454     EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5455   }
5456 }
5457 
5458 
EmitTypeofIs(Label * true_label,Label * false_label,Register input,Handle<String> type_name,Register * cmp1,Operand * cmp2)5459 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5460                                  Label* false_label,
5461                                  Register input,
5462                                  Handle<String> type_name,
5463                                  Register* cmp1,
5464                                  Operand* cmp2) {
5465   // This function utilizes the delay slot heavily. This is used to load
5466   // values that are always usable without depending on the type of the input
5467   // register.
5468   Condition final_branch_condition = kNoCondition;
5469   Register scratch = scratch0();
5470   Factory* factory = isolate()->factory();
5471   if (String::Equals(type_name, factory->number_string())) {
5472     __ JumpIfSmi(input, true_label);
5473     __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5474     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5475     *cmp1 = input;
5476     *cmp2 = Operand(at);
5477     final_branch_condition = eq;
5478 
5479   } else if (String::Equals(type_name, factory->string_string())) {
5480     __ JumpIfSmi(input, false_label);
5481     __ GetObjectType(input, input, scratch);
5482     *cmp1 = scratch;
5483     *cmp2 = Operand(FIRST_NONSTRING_TYPE);
5484     final_branch_condition = lt;
5485 
5486   } else if (String::Equals(type_name, factory->symbol_string())) {
5487     __ JumpIfSmi(input, false_label);
5488     __ GetObjectType(input, input, scratch);
5489     *cmp1 = scratch;
5490     *cmp2 = Operand(SYMBOL_TYPE);
5491     final_branch_condition = eq;
5492 
5493   } else if (String::Equals(type_name, factory->boolean_string())) {
5494     __ LoadRoot(at, Heap::kTrueValueRootIndex);
5495     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5496     __ LoadRoot(at, Heap::kFalseValueRootIndex);
5497     *cmp1 = at;
5498     *cmp2 = Operand(input);
5499     final_branch_condition = eq;
5500 
5501   } else if (String::Equals(type_name, factory->undefined_string())) {
5502     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5503     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5504     // The first instruction of JumpIfSmi is an And - it is safe in the delay
5505     // slot.
5506     __ JumpIfSmi(input, false_label);
5507     // Check for undetectable objects => true.
5508     __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5509     __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5510     __ And(at, at, 1 << Map::kIsUndetectable);
5511     *cmp1 = at;
5512     *cmp2 = Operand(zero_reg);
5513     final_branch_condition = ne;
5514 
5515   } else if (String::Equals(type_name, factory->function_string())) {
5516     __ JumpIfSmi(input, false_label);
5517     __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5518     __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5519     __ And(scratch, scratch,
5520            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5521     *cmp1 = scratch;
5522     *cmp2 = Operand(1 << Map::kIsCallable);
5523     final_branch_condition = eq;
5524 
5525   } else if (String::Equals(type_name, factory->object_string())) {
5526     __ JumpIfSmi(input, false_label);
5527     __ LoadRoot(at, Heap::kNullValueRootIndex);
5528     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5529     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5530     __ GetObjectType(input, scratch, scratch1());
5531     __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
5532     // Check for callable or undetectable objects => false.
5533     __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5534     __ And(at, scratch,
5535            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5536     *cmp1 = at;
5537     *cmp2 = Operand(zero_reg);
5538     final_branch_condition = eq;
5539 
5540 // clang-format off
5541 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
5542   } else if (String::Equals(type_name, factory->type##_string())) {  \
5543     __ JumpIfSmi(input, false_label);                                \
5544     __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));    \
5545     __ LoadRoot(at, Heap::k##Type##MapRootIndex);                    \
5546     *cmp1 = input;                                                   \
5547     *cmp2 = Operand(at);                                             \
5548     final_branch_condition = eq;
5549   SIMD128_TYPES(SIMD128_TYPE)
5550 #undef SIMD128_TYPE
5551     // clang-format on
5552 
5553 
5554   } else {
5555     *cmp1 = at;
5556     *cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
5557     __ Branch(false_label);
5558   }
5559 
5560   return final_branch_condition;
5561 }
5562 
5563 
EnsureSpaceForLazyDeopt(int space_needed)5564 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5565   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5566     // Ensure that we have enough space after the previous lazy-bailout
5567     // instruction for patching the code here.
5568     int current_pc = masm()->pc_offset();
5569     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5570       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5571       DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5572       while (padding_size > 0) {
5573         __ nop();
5574         padding_size -= Assembler::kInstrSize;
5575       }
5576     }
5577   }
5578   last_lazy_deopt_pc_ = masm()->pc_offset();
5579 }
5580 
5581 
DoLazyBailout(LLazyBailout * instr)5582 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5583   last_lazy_deopt_pc_ = masm()->pc_offset();
5584   DCHECK(instr->HasEnvironment());
5585   LEnvironment* env = instr->environment();
5586   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5587   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5588 }
5589 
5590 
DoDeoptimize(LDeoptimize * instr)5591 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5592   Deoptimizer::BailoutType type = instr->hydrogen()->type();
5593   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5594   // needed return address), even though the implementation of LAZY and EAGER is
5595   // now identical. When LAZY is eventually completely folded into EAGER, remove
5596   // the special case below.
5597   if (info()->IsStub() && type == Deoptimizer::EAGER) {
5598     type = Deoptimizer::LAZY;
5599   }
5600 
5601   DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
5602                Operand(zero_reg));
5603 }
5604 
5605 
DoDummy(LDummy * instr)5606 void LCodeGen::DoDummy(LDummy* instr) {
5607   // Nothing to see here, move on!
5608 }
5609 
5610 
DoDummyUse(LDummyUse * instr)5611 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5612   // Nothing to see here, move on!
5613 }
5614 
5615 
DoDeferredStackCheck(LStackCheck * instr)5616 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5617   PushSafepointRegistersScope scope(this);
5618   LoadContextFromDeferred(instr->context());
5619   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5620   RecordSafepointWithLazyDeopt(
5621       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5622   DCHECK(instr->HasEnvironment());
5623   LEnvironment* env = instr->environment();
5624   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5625 }
5626 
5627 
DoStackCheck(LStackCheck * instr)5628 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5629   class DeferredStackCheck final : public LDeferredCode {
5630    public:
5631     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5632         : LDeferredCode(codegen), instr_(instr) { }
5633     void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5634     LInstruction* instr() override { return instr_; }
5635 
5636    private:
5637     LStackCheck* instr_;
5638   };
5639 
5640   DCHECK(instr->HasEnvironment());
5641   LEnvironment* env = instr->environment();
5642   // There is no LLazyBailout instruction for stack-checks. We have to
5643   // prepare for lazy deoptimization explicitly here.
5644   if (instr->hydrogen()->is_function_entry()) {
5645     // Perform stack overflow check.
5646     Label done;
5647     __ LoadRoot(at, Heap::kStackLimitRootIndex);
5648     __ Branch(&done, hs, sp, Operand(at));
5649     DCHECK(instr->context()->IsRegister());
5650     DCHECK(ToRegister(instr->context()).is(cp));
5651     CallCode(isolate()->builtins()->StackCheck(),
5652              RelocInfo::CODE_TARGET,
5653              instr);
5654     __ bind(&done);
5655   } else {
5656     DCHECK(instr->hydrogen()->is_backwards_branch());
5657     // Perform stack overflow check if this goto needs it before jumping.
5658     DeferredStackCheck* deferred_stack_check =
5659         new(zone()) DeferredStackCheck(this, instr);
5660     __ LoadRoot(at, Heap::kStackLimitRootIndex);
5661     __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5662     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5663     __ bind(instr->done_label());
5664     deferred_stack_check->SetExit(instr->done_label());
5665     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5666     // Don't record a deoptimization index for the safepoint here.
5667     // This will be done explicitly when emitting call and the safepoint in
5668     // the deferred code.
5669   }
5670 }
5671 
5672 
DoOsrEntry(LOsrEntry * instr)5673 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5674   // This is a pseudo-instruction that ensures that the environment here is
5675   // properly registered for deoptimization and records the assembler's PC
5676   // offset.
5677   LEnvironment* environment = instr->environment();
5678 
5679   // If the environment were already registered, we would have no way of
5680   // backpatching it with the spill slot operands.
5681   DCHECK(!environment->HasBeenRegistered());
5682   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5683 
5684   GenerateOsrPrologue();
5685 }
5686 
5687 
DoForInPrepareMap(LForInPrepareMap * instr)5688 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5689   Register result = ToRegister(instr->result());
5690   Register object = ToRegister(instr->object());
5691 
5692   __ And(at, object, kSmiTagMask);
5693   DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
5694 
5695   STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
5696   __ GetObjectType(object, a1, a1);
5697   DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
5698                Operand(JS_PROXY_TYPE));
5699 
5700   Label use_cache, call_runtime;
5701   DCHECK(object.is(a0));
5702   Register null_value = a5;
5703   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5704   __ CheckEnumCache(null_value, &call_runtime);
5705 
5706   __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
5707   __ Branch(&use_cache);
5708 
5709   // Get the set of properties to enumerate.
5710   __ bind(&call_runtime);
5711   __ push(object);
5712   CallRuntime(Runtime::kGetPropertyNamesFast, instr);
5713 
5714   __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5715   DCHECK(result.is(v0));
5716   __ LoadRoot(at, Heap::kMetaMapRootIndex);
5717   DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at));
5718   __ bind(&use_cache);
5719 }
5720 
5721 
DoForInCacheArray(LForInCacheArray * instr)5722 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5723   Register map = ToRegister(instr->map());
5724   Register result = ToRegister(instr->result());
5725   Label load_cache, done;
5726   __ EnumLength(result, map);
5727   __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5728   __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5729   __ jmp(&done);
5730 
5731   __ bind(&load_cache);
5732   __ LoadInstanceDescriptors(map, result);
5733   __ ld(result,
5734         FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5735   __ ld(result,
5736         FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5737   DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
5738 
5739   __ bind(&done);
5740 }
5741 
5742 
DoCheckMapValue(LCheckMapValue * instr)5743 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5744   Register object = ToRegister(instr->value());
5745   Register map = ToRegister(instr->map());
5746   __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5747   DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
5748 }
5749 
5750 
DoDeferredLoadMutableDouble(LLoadFieldByIndex * instr,Register result,Register object,Register index)5751 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5752                                            Register result,
5753                                            Register object,
5754                                            Register index) {
5755   PushSafepointRegistersScope scope(this);
5756   __ Push(object, index);
5757   __ mov(cp, zero_reg);
5758   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5759   RecordSafepointWithRegisters(
5760      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5761   __ StoreToSafepointRegisterSlot(v0, result);
5762 }
5763 
5764 
DoLoadFieldByIndex(LLoadFieldByIndex * instr)5765 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5766   class DeferredLoadMutableDouble final : public LDeferredCode {
5767    public:
5768     DeferredLoadMutableDouble(LCodeGen* codegen,
5769                               LLoadFieldByIndex* instr,
5770                               Register result,
5771                               Register object,
5772                               Register index)
5773         : LDeferredCode(codegen),
5774           instr_(instr),
5775           result_(result),
5776           object_(object),
5777           index_(index) {
5778     }
5779     void Generate() override {
5780       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5781     }
5782     LInstruction* instr() override { return instr_; }
5783 
5784    private:
5785     LLoadFieldByIndex* instr_;
5786     Register result_;
5787     Register object_;
5788     Register index_;
5789   };
5790 
5791   Register object = ToRegister(instr->object());
5792   Register index = ToRegister(instr->index());
5793   Register result = ToRegister(instr->result());
5794   Register scratch = scratch0();
5795 
5796   DeferredLoadMutableDouble* deferred;
5797   deferred = new(zone()) DeferredLoadMutableDouble(
5798       this, instr, result, object, index);
5799 
5800   Label out_of_object, done;
5801 
5802   __ And(scratch, index, Operand(Smi::FromInt(1)));
5803   __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
5804   __ dsra(index, index, 1);
5805 
5806   __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5807   __ SmiScale(scratch, index, kPointerSizeLog2);  // In delay slot.
5808   __ Daddu(scratch, object, scratch);
5809   __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5810 
5811   __ Branch(&done);
5812 
5813   __ bind(&out_of_object);
5814   __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5815   // Index is equal to negated out of object property index plus 1.
5816   __ Dsubu(scratch, result, scratch);
5817   __ ld(result, FieldMemOperand(scratch,
5818                                 FixedArray::kHeaderSize - kPointerSize));
5819   __ bind(deferred->exit());
5820   __ bind(&done);
5821 }
5822 
5823 
DoStoreFrameContext(LStoreFrameContext * instr)5824 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5825   Register context = ToRegister(instr->context());
5826   __ sd(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
5827 }
5828 
5829 
DoAllocateBlockContext(LAllocateBlockContext * instr)5830 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5831   Handle<ScopeInfo> scope_info = instr->scope_info();
5832   __ li(at, scope_info);
5833   __ Push(at, ToRegister(instr->function()));
5834   CallRuntime(Runtime::kPushBlockContext, instr);
5835   RecordSafepoint(Safepoint::kNoLazyDeopt);
5836 }
5837 
5838 
5839 #undef __
5840 
5841 }  // namespace internal
5842 }  // namespace v8
5843