1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/arm64/lithium-codegen-arm64.h"
8 #include "src/arm64/lithium-gap-resolver-arm64.h"
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/hydrogen-osr.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
15
16 namespace v8 {
17 namespace internal {
18
19
20 class SafepointGenerator FINAL : public CallWrapper {
21 public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)22 SafepointGenerator(LCodeGen* codegen,
23 LPointerMap* pointers,
24 Safepoint::DeoptMode mode)
25 : codegen_(codegen),
26 pointers_(pointers),
27 deopt_mode_(mode) { }
~SafepointGenerator()28 virtual ~SafepointGenerator() { }
29
BeforeCall(int call_size) const30 virtual void BeforeCall(int call_size) const { }
31
AfterCall() const32 virtual void AfterCall() const {
33 codegen_->RecordSafepoint(pointers_, deopt_mode_);
34 }
35
36 private:
37 LCodeGen* codegen_;
38 LPointerMap* pointers_;
39 Safepoint::DeoptMode deopt_mode_;
40 };
41
42
43 #define __ masm()->
44
45 // Emit code to branch if the given condition holds.
46 // The code generated here doesn't modify the flags and they must have
47 // been set by some prior instructions.
48 //
49 // The EmitInverted function simply inverts the condition.
50 class BranchOnCondition : public BranchGenerator {
51 public:
BranchOnCondition(LCodeGen * codegen,Condition cond)52 BranchOnCondition(LCodeGen* codegen, Condition cond)
53 : BranchGenerator(codegen),
54 cond_(cond) { }
55
Emit(Label * label) const56 virtual void Emit(Label* label) const {
57 __ B(cond_, label);
58 }
59
EmitInverted(Label * label) const60 virtual void EmitInverted(Label* label) const {
61 if (cond_ != al) {
62 __ B(NegateCondition(cond_), label);
63 }
64 }
65
66 private:
67 Condition cond_;
68 };
69
70
71 // Emit code to compare lhs and rhs and branch if the condition holds.
72 // This uses MacroAssembler's CompareAndBranch function so it will handle
73 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
74 //
75 // EmitInverted still compares the two operands but inverts the condition.
76 class CompareAndBranch : public BranchGenerator {
77 public:
CompareAndBranch(LCodeGen * codegen,Condition cond,const Register & lhs,const Operand & rhs)78 CompareAndBranch(LCodeGen* codegen,
79 Condition cond,
80 const Register& lhs,
81 const Operand& rhs)
82 : BranchGenerator(codegen),
83 cond_(cond),
84 lhs_(lhs),
85 rhs_(rhs) { }
86
Emit(Label * label) const87 virtual void Emit(Label* label) const {
88 __ CompareAndBranch(lhs_, rhs_, cond_, label);
89 }
90
EmitInverted(Label * label) const91 virtual void EmitInverted(Label* label) const {
92 __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label);
93 }
94
95 private:
96 Condition cond_;
97 const Register& lhs_;
98 const Operand& rhs_;
99 };
100
101
102 // Test the input with the given mask and branch if the condition holds.
103 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
104 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
105 // conversion to Tbz/Tbnz when possible.
106 class TestAndBranch : public BranchGenerator {
107 public:
TestAndBranch(LCodeGen * codegen,Condition cond,const Register & value,uint64_t mask)108 TestAndBranch(LCodeGen* codegen,
109 Condition cond,
110 const Register& value,
111 uint64_t mask)
112 : BranchGenerator(codegen),
113 cond_(cond),
114 value_(value),
115 mask_(mask) { }
116
Emit(Label * label) const117 virtual void Emit(Label* label) const {
118 switch (cond_) {
119 case eq:
120 __ TestAndBranchIfAllClear(value_, mask_, label);
121 break;
122 case ne:
123 __ TestAndBranchIfAnySet(value_, mask_, label);
124 break;
125 default:
126 __ Tst(value_, mask_);
127 __ B(cond_, label);
128 }
129 }
130
EmitInverted(Label * label) const131 virtual void EmitInverted(Label* label) const {
132 // The inverse of "all clear" is "any set" and vice versa.
133 switch (cond_) {
134 case eq:
135 __ TestAndBranchIfAnySet(value_, mask_, label);
136 break;
137 case ne:
138 __ TestAndBranchIfAllClear(value_, mask_, label);
139 break;
140 default:
141 __ Tst(value_, mask_);
142 __ B(NegateCondition(cond_), label);
143 }
144 }
145
146 private:
147 Condition cond_;
148 const Register& value_;
149 uint64_t mask_;
150 };
151
152
153 // Test the input and branch if it is non-zero and not a NaN.
154 class BranchIfNonZeroNumber : public BranchGenerator {
155 public:
BranchIfNonZeroNumber(LCodeGen * codegen,const FPRegister & value,const FPRegister & scratch)156 BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
157 const FPRegister& scratch)
158 : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
159
Emit(Label * label) const160 virtual void Emit(Label* label) const {
161 __ Fabs(scratch_, value_);
162 // Compare with 0.0. Because scratch_ is positive, the result can be one of
163 // nZCv (equal), nzCv (greater) or nzCV (unordered).
164 __ Fcmp(scratch_, 0.0);
165 __ B(gt, label);
166 }
167
EmitInverted(Label * label) const168 virtual void EmitInverted(Label* label) const {
169 __ Fabs(scratch_, value_);
170 __ Fcmp(scratch_, 0.0);
171 __ B(le, label);
172 }
173
174 private:
175 const FPRegister& value_;
176 const FPRegister& scratch_;
177 };
178
179
180 // Test the input and branch if it is a heap number.
181 class BranchIfHeapNumber : public BranchGenerator {
182 public:
BranchIfHeapNumber(LCodeGen * codegen,const Register & value)183 BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
184 : BranchGenerator(codegen), value_(value) { }
185
Emit(Label * label) const186 virtual void Emit(Label* label) const {
187 __ JumpIfHeapNumber(value_, label);
188 }
189
EmitInverted(Label * label) const190 virtual void EmitInverted(Label* label) const {
191 __ JumpIfNotHeapNumber(value_, label);
192 }
193
194 private:
195 const Register& value_;
196 };
197
198
199 // Test the input and branch if it is the specified root value.
200 class BranchIfRoot : public BranchGenerator {
201 public:
BranchIfRoot(LCodeGen * codegen,const Register & value,Heap::RootListIndex index)202 BranchIfRoot(LCodeGen* codegen, const Register& value,
203 Heap::RootListIndex index)
204 : BranchGenerator(codegen), value_(value), index_(index) { }
205
Emit(Label * label) const206 virtual void Emit(Label* label) const {
207 __ JumpIfRoot(value_, index_, label);
208 }
209
EmitInverted(Label * label) const210 virtual void EmitInverted(Label* label) const {
211 __ JumpIfNotRoot(value_, index_, label);
212 }
213
214 private:
215 const Register& value_;
216 const Heap::RootListIndex index_;
217 };
218
219
WriteTranslation(LEnvironment * environment,Translation * translation)220 void LCodeGen::WriteTranslation(LEnvironment* environment,
221 Translation* translation) {
222 if (environment == NULL) return;
223
224 // The translation includes one command per value in the environment.
225 int translation_size = environment->translation_size();
226 // The output frame height does not include the parameters.
227 int height = translation_size - environment->parameter_count();
228
229 WriteTranslation(environment->outer(), translation);
230 bool has_closure_id = !info()->closure().is_null() &&
231 !info()->closure().is_identical_to(environment->closure());
232 int closure_id = has_closure_id
233 ? DefineDeoptimizationLiteral(environment->closure())
234 : Translation::kSelfLiteralId;
235
236 switch (environment->frame_type()) {
237 case JS_FUNCTION:
238 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
239 break;
240 case JS_CONSTRUCT:
241 translation->BeginConstructStubFrame(closure_id, translation_size);
242 break;
243 case JS_GETTER:
244 DCHECK(translation_size == 1);
245 DCHECK(height == 0);
246 translation->BeginGetterStubFrame(closure_id);
247 break;
248 case JS_SETTER:
249 DCHECK(translation_size == 2);
250 DCHECK(height == 0);
251 translation->BeginSetterStubFrame(closure_id);
252 break;
253 case STUB:
254 translation->BeginCompiledStubFrame();
255 break;
256 case ARGUMENTS_ADAPTOR:
257 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
258 break;
259 default:
260 UNREACHABLE();
261 }
262
263 int object_index = 0;
264 int dematerialized_index = 0;
265 for (int i = 0; i < translation_size; ++i) {
266 LOperand* value = environment->values()->at(i);
267
268 AddToTranslation(environment,
269 translation,
270 value,
271 environment->HasTaggedValueAt(i),
272 environment->HasUint32ValueAt(i),
273 &object_index,
274 &dematerialized_index);
275 }
276 }
277
278
AddToTranslation(LEnvironment * environment,Translation * translation,LOperand * op,bool is_tagged,bool is_uint32,int * object_index_pointer,int * dematerialized_index_pointer)279 void LCodeGen::AddToTranslation(LEnvironment* environment,
280 Translation* translation,
281 LOperand* op,
282 bool is_tagged,
283 bool is_uint32,
284 int* object_index_pointer,
285 int* dematerialized_index_pointer) {
286 if (op == LEnvironment::materialization_marker()) {
287 int object_index = (*object_index_pointer)++;
288 if (environment->ObjectIsDuplicateAt(object_index)) {
289 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
290 translation->DuplicateObject(dupe_of);
291 return;
292 }
293 int object_length = environment->ObjectLengthAt(object_index);
294 if (environment->ObjectIsArgumentsAt(object_index)) {
295 translation->BeginArgumentsObject(object_length);
296 } else {
297 translation->BeginCapturedObject(object_length);
298 }
299 int dematerialized_index = *dematerialized_index_pointer;
300 int env_offset = environment->translation_size() + dematerialized_index;
301 *dematerialized_index_pointer += object_length;
302 for (int i = 0; i < object_length; ++i) {
303 LOperand* value = environment->values()->at(env_offset + i);
304 AddToTranslation(environment,
305 translation,
306 value,
307 environment->HasTaggedValueAt(env_offset + i),
308 environment->HasUint32ValueAt(env_offset + i),
309 object_index_pointer,
310 dematerialized_index_pointer);
311 }
312 return;
313 }
314
315 if (op->IsStackSlot()) {
316 if (is_tagged) {
317 translation->StoreStackSlot(op->index());
318 } else if (is_uint32) {
319 translation->StoreUint32StackSlot(op->index());
320 } else {
321 translation->StoreInt32StackSlot(op->index());
322 }
323 } else if (op->IsDoubleStackSlot()) {
324 translation->StoreDoubleStackSlot(op->index());
325 } else if (op->IsRegister()) {
326 Register reg = ToRegister(op);
327 if (is_tagged) {
328 translation->StoreRegister(reg);
329 } else if (is_uint32) {
330 translation->StoreUint32Register(reg);
331 } else {
332 translation->StoreInt32Register(reg);
333 }
334 } else if (op->IsDoubleRegister()) {
335 DoubleRegister reg = ToDoubleRegister(op);
336 translation->StoreDoubleRegister(reg);
337 } else if (op->IsConstantOperand()) {
338 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
339 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
340 translation->StoreLiteral(src_index);
341 } else {
342 UNREACHABLE();
343 }
344 }
345
346
DefineDeoptimizationLiteral(Handle<Object> literal)347 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
348 int result = deoptimization_literals_.length();
349 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
350 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
351 }
352 deoptimization_literals_.Add(literal, zone());
353 return result;
354 }
355
356
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)357 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
358 Safepoint::DeoptMode mode) {
359 environment->set_has_been_used();
360 if (!environment->HasBeenRegistered()) {
361 int frame_count = 0;
362 int jsframe_count = 0;
363 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
364 ++frame_count;
365 if (e->frame_type() == JS_FUNCTION) {
366 ++jsframe_count;
367 }
368 }
369 Translation translation(&translations_, frame_count, jsframe_count, zone());
370 WriteTranslation(environment, &translation);
371 int deoptimization_index = deoptimizations_.length();
372 int pc_offset = masm()->pc_offset();
373 environment->Register(deoptimization_index,
374 translation.index(),
375 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
376 deoptimizations_.Add(environment, zone());
377 }
378 }
379
380
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)381 void LCodeGen::CallCode(Handle<Code> code,
382 RelocInfo::Mode mode,
383 LInstruction* instr) {
384 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
385 }
386
387
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode)388 void LCodeGen::CallCodeGeneric(Handle<Code> code,
389 RelocInfo::Mode mode,
390 LInstruction* instr,
391 SafepointMode safepoint_mode) {
392 DCHECK(instr != NULL);
393
394 Assembler::BlockPoolsScope scope(masm_);
395 __ Call(code, mode);
396 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
397
398 if ((code->kind() == Code::BINARY_OP_IC) ||
399 (code->kind() == Code::COMPARE_IC)) {
400 // Signal that we don't inline smi code before these stubs in the
401 // optimizing code generator.
402 InlineSmiCheckInfo::EmitNotInlined(masm());
403 }
404 }
405
406
DoCallFunction(LCallFunction * instr)407 void LCodeGen::DoCallFunction(LCallFunction* instr) {
408 DCHECK(ToRegister(instr->context()).is(cp));
409 DCHECK(ToRegister(instr->function()).Is(x1));
410 DCHECK(ToRegister(instr->result()).Is(x0));
411
412 int arity = instr->arity();
413 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
414 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
415 after_push_argument_ = false;
416 }
417
418
DoCallNew(LCallNew * instr)419 void LCodeGen::DoCallNew(LCallNew* instr) {
420 DCHECK(ToRegister(instr->context()).is(cp));
421 DCHECK(instr->IsMarkedAsCall());
422 DCHECK(ToRegister(instr->constructor()).is(x1));
423
424 __ Mov(x0, instr->arity());
425 // No cell in x2 for construct type feedback in optimized code.
426 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
427
428 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
429 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
430 after_push_argument_ = false;
431
432 DCHECK(ToRegister(instr->result()).is(x0));
433 }
434
435
DoCallNewArray(LCallNewArray * instr)436 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
437 DCHECK(instr->IsMarkedAsCall());
438 DCHECK(ToRegister(instr->context()).is(cp));
439 DCHECK(ToRegister(instr->constructor()).is(x1));
440
441 __ Mov(x0, Operand(instr->arity()));
442 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
443
444 ElementsKind kind = instr->hydrogen()->elements_kind();
445 AllocationSiteOverrideMode override_mode =
446 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
447 ? DISABLE_ALLOCATION_SITES
448 : DONT_OVERRIDE;
449
450 if (instr->arity() == 0) {
451 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
452 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
453 } else if (instr->arity() == 1) {
454 Label done;
455 if (IsFastPackedElementsKind(kind)) {
456 Label packed_case;
457
458 // We might need to create a holey array; look at the first argument.
459 __ Peek(x10, 0);
460 __ Cbz(x10, &packed_case);
461
462 ElementsKind holey_kind = GetHoleyElementsKind(kind);
463 ArraySingleArgumentConstructorStub stub(isolate(),
464 holey_kind,
465 override_mode);
466 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
467 __ B(&done);
468 __ Bind(&packed_case);
469 }
470
471 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
472 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
473 __ Bind(&done);
474 } else {
475 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
476 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
477 }
478 after_push_argument_ = false;
479
480 DCHECK(ToRegister(instr->result()).is(x0));
481 }
482
483
CallRuntime(const Runtime::Function * function,int num_arguments,LInstruction * instr,SaveFPRegsMode save_doubles)484 void LCodeGen::CallRuntime(const Runtime::Function* function,
485 int num_arguments,
486 LInstruction* instr,
487 SaveFPRegsMode save_doubles) {
488 DCHECK(instr != NULL);
489
490 __ CallRuntime(function, num_arguments, save_doubles);
491
492 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
493 }
494
495
LoadContextFromDeferred(LOperand * context)496 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
497 if (context->IsRegister()) {
498 __ Mov(cp, ToRegister(context));
499 } else if (context->IsStackSlot()) {
500 __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
501 } else if (context->IsConstantOperand()) {
502 HConstant* constant =
503 chunk_->LookupConstant(LConstantOperand::cast(context));
504 __ LoadHeapObject(cp,
505 Handle<HeapObject>::cast(constant->handle(isolate())));
506 } else {
507 UNREACHABLE();
508 }
509 }
510
511
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr,LOperand * context)512 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
513 int argc,
514 LInstruction* instr,
515 LOperand* context) {
516 LoadContextFromDeferred(context);
517 __ CallRuntimeSaveDoubles(id);
518 RecordSafepointWithRegisters(
519 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
520 }
521
522
RecordAndWritePosition(int position)523 void LCodeGen::RecordAndWritePosition(int position) {
524 if (position == RelocInfo::kNoPosition) return;
525 masm()->positions_recorder()->RecordPosition(position);
526 masm()->positions_recorder()->WriteRecordedPositions();
527 }
528
529
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode)530 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
531 SafepointMode safepoint_mode) {
532 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
533 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
534 } else {
535 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
536 RecordSafepointWithRegisters(
537 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
538 }
539 }
540
541
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)542 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
543 Safepoint::Kind kind,
544 int arguments,
545 Safepoint::DeoptMode deopt_mode) {
546 DCHECK(expected_safepoint_kind_ == kind);
547
548 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
549 Safepoint safepoint = safepoints_.DefineSafepoint(
550 masm(), kind, arguments, deopt_mode);
551
552 for (int i = 0; i < operands->length(); i++) {
553 LOperand* pointer = operands->at(i);
554 if (pointer->IsStackSlot()) {
555 safepoint.DefinePointerSlot(pointer->index(), zone());
556 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
557 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
558 }
559 }
560
561 if (kind & Safepoint::kWithRegisters) {
562 // Register cp always contains a pointer to the context.
563 safepoint.DefinePointerRegister(cp, zone());
564 }
565 }
566
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode deopt_mode)567 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
568 Safepoint::DeoptMode deopt_mode) {
569 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
570 }
571
572
RecordSafepoint(Safepoint::DeoptMode deopt_mode)573 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
574 LPointerMap empty_pointers(zone());
575 RecordSafepoint(&empty_pointers, deopt_mode);
576 }
577
578
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode deopt_mode)579 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
580 int arguments,
581 Safepoint::DeoptMode deopt_mode) {
582 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
583 }
584
585
GenerateCode()586 bool LCodeGen::GenerateCode() {
587 LPhase phase("Z_Code generation", chunk());
588 DCHECK(is_unused());
589 status_ = GENERATING;
590
591 // Open a frame scope to indicate that there is a frame on the stack. The
592 // NONE indicates that the scope shouldn't actually generate code to set up
593 // the frame (that is done in GeneratePrologue).
594 FrameScope frame_scope(masm_, StackFrame::NONE);
595
596 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
597 GenerateJumpTable() && GenerateSafepointTable();
598 }
599
600
SaveCallerDoubles()601 void LCodeGen::SaveCallerDoubles() {
602 DCHECK(info()->saves_caller_doubles());
603 DCHECK(NeedsEagerFrame());
604 Comment(";;; Save clobbered callee double registers");
605 BitVector* doubles = chunk()->allocated_double_registers();
606 BitVector::Iterator iterator(doubles);
607 int count = 0;
608 while (!iterator.Done()) {
609 // TODO(all): Is this supposed to save just the callee-saved doubles? It
610 // looks like it's saving all of them.
611 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
612 __ Poke(value, count * kDoubleSize);
613 iterator.Advance();
614 count++;
615 }
616 }
617
618
RestoreCallerDoubles()619 void LCodeGen::RestoreCallerDoubles() {
620 DCHECK(info()->saves_caller_doubles());
621 DCHECK(NeedsEagerFrame());
622 Comment(";;; Restore clobbered callee double registers");
623 BitVector* doubles = chunk()->allocated_double_registers();
624 BitVector::Iterator iterator(doubles);
625 int count = 0;
626 while (!iterator.Done()) {
627 // TODO(all): Is this supposed to restore just the callee-saved doubles? It
628 // looks like it's restoring all of them.
629 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
630 __ Peek(value, count * kDoubleSize);
631 iterator.Advance();
632 count++;
633 }
634 }
635
636
GeneratePrologue()637 bool LCodeGen::GeneratePrologue() {
638 DCHECK(is_generating());
639
640 if (info()->IsOptimizing()) {
641 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
642
643 // TODO(all): Add support for stop_t FLAG in DEBUG mode.
644
645 // Sloppy mode functions and builtins need to replace the receiver with the
646 // global proxy when called as functions (without an explicit receiver
647 // object).
648 if (info_->this_has_uses() &&
649 info_->strict_mode() == SLOPPY &&
650 !info_->is_native()) {
651 Label ok;
652 int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
653 __ Peek(x10, receiver_offset);
654 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
655
656 __ Ldr(x10, GlobalObjectMemOperand());
657 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
658 __ Poke(x10, receiver_offset);
659
660 __ Bind(&ok);
661 }
662 }
663
664 DCHECK(__ StackPointer().Is(jssp));
665 info()->set_prologue_offset(masm_->pc_offset());
666 if (NeedsEagerFrame()) {
667 if (info()->IsStub()) {
668 __ StubPrologue();
669 } else {
670 __ Prologue(info()->IsCodePreAgingActive());
671 }
672 frame_is_built_ = true;
673 info_->AddNoFrameRange(0, masm_->pc_offset());
674 }
675
676 // Reserve space for the stack slots needed by the code.
677 int slots = GetStackSlotCount();
678 if (slots > 0) {
679 __ Claim(slots, kPointerSize);
680 }
681
682 if (info()->saves_caller_doubles()) {
683 SaveCallerDoubles();
684 }
685
686 // Allocate a local context if needed.
687 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
688 if (heap_slots > 0) {
689 Comment(";;; Allocate local context");
690 bool need_write_barrier = true;
691 // Argument to NewContext is the function, which is in x1.
692 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
693 FastNewContextStub stub(isolate(), heap_slots);
694 __ CallStub(&stub);
695 // Result of FastNewContextStub is always in new space.
696 need_write_barrier = false;
697 } else {
698 __ Push(x1);
699 __ CallRuntime(Runtime::kNewFunctionContext, 1);
700 }
701 RecordSafepoint(Safepoint::kNoLazyDeopt);
702 // Context is returned in x0. It replaces the context passed to us. It's
703 // saved in the stack and kept live in cp.
704 __ Mov(cp, x0);
705 __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
706 // Copy any necessary parameters into the context.
707 int num_parameters = scope()->num_parameters();
708 for (int i = 0; i < num_parameters; i++) {
709 Variable* var = scope()->parameter(i);
710 if (var->IsContextSlot()) {
711 Register value = x0;
712 Register scratch = x3;
713
714 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
715 (num_parameters - 1 - i) * kPointerSize;
716 // Load parameter from stack.
717 __ Ldr(value, MemOperand(fp, parameter_offset));
718 // Store it in the context.
719 MemOperand target = ContextMemOperand(cp, var->index());
720 __ Str(value, target);
721 // Update the write barrier. This clobbers value and scratch.
722 if (need_write_barrier) {
723 __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
724 GetLinkRegisterState(), kSaveFPRegs);
725 } else if (FLAG_debug_code) {
726 Label done;
727 __ JumpIfInNewSpace(cp, &done);
728 __ Abort(kExpectedNewSpaceObject);
729 __ bind(&done);
730 }
731 }
732 }
733 Comment(";;; End allocate local context");
734 }
735
736 // Trace the call.
737 if (FLAG_trace && info()->IsOptimizing()) {
738 // We have not executed any compiled code yet, so cp still holds the
739 // incoming context.
740 __ CallRuntime(Runtime::kTraceEnter, 0);
741 }
742
743 return !is_aborted();
744 }
745
746
GenerateOsrPrologue()747 void LCodeGen::GenerateOsrPrologue() {
748 // Generate the OSR entry prologue at the first unknown OSR value, or if there
749 // are none, at the OSR entrypoint instruction.
750 if (osr_pc_offset_ >= 0) return;
751
752 osr_pc_offset_ = masm()->pc_offset();
753
754 // Adjust the frame size, subsuming the unoptimized frame into the
755 // optimized frame.
756 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
757 DCHECK(slots >= 0);
758 __ Claim(slots);
759 }
760
761
GenerateBodyInstructionPre(LInstruction * instr)762 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
763 if (instr->IsCall()) {
764 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
765 }
766 if (!instr->IsLazyBailout() && !instr->IsGap()) {
767 safepoints_.BumpLastLazySafepointIndex();
768 }
769 }
770
771
GenerateDeferredCode()772 bool LCodeGen::GenerateDeferredCode() {
773 DCHECK(is_generating());
774 if (deferred_.length() > 0) {
775 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
776 LDeferredCode* code = deferred_[i];
777
778 HValue* value =
779 instructions_->at(code->instruction_index())->hydrogen_value();
780 RecordAndWritePosition(
781 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
782
783 Comment(";;; <@%d,#%d> "
784 "-------------------- Deferred %s --------------------",
785 code->instruction_index(),
786 code->instr()->hydrogen_value()->id(),
787 code->instr()->Mnemonic());
788
789 __ Bind(code->entry());
790
791 if (NeedsDeferredFrame()) {
792 Comment(";;; Build frame");
793 DCHECK(!frame_is_built_);
794 DCHECK(info()->IsStub());
795 frame_is_built_ = true;
796 __ Push(lr, fp, cp);
797 __ Mov(fp, Smi::FromInt(StackFrame::STUB));
798 __ Push(fp);
799 __ Add(fp, __ StackPointer(),
800 StandardFrameConstants::kFixedFrameSizeFromFp);
801 Comment(";;; Deferred code");
802 }
803
804 code->Generate();
805
806 if (NeedsDeferredFrame()) {
807 Comment(";;; Destroy frame");
808 DCHECK(frame_is_built_);
809 __ Pop(xzr, cp, fp, lr);
810 frame_is_built_ = false;
811 }
812
813 __ B(code->exit());
814 }
815 }
816
817 // Force constant pool emission at the end of the deferred code to make
818 // sure that no constant pools are emitted after deferred code because
819 // deferred code generation is the last step which generates code. The two
820 // following steps will only output data used by crakshaft.
821 masm()->CheckConstPool(true, false);
822
823 return !is_aborted();
824 }
825
826
GenerateJumpTable()827 bool LCodeGen::GenerateJumpTable() {
828 Label needs_frame, restore_caller_doubles, call_deopt_entry;
829
830 if (jump_table_.length() > 0) {
831 Comment(";;; -------------------- Jump table --------------------");
832 Address base = jump_table_[0]->address;
833
834 UseScratchRegisterScope temps(masm());
835 Register entry_offset = temps.AcquireX();
836
837 int length = jump_table_.length();
838 for (int i = 0; i < length; i++) {
839 Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
840 __ Bind(&table_entry->label);
841
842 Address entry = table_entry->address;
843 DeoptComment(table_entry->reason);
844
845 // Second-level deopt table entries are contiguous and small, so instead
846 // of loading the full, absolute address of each one, load the base
847 // address and add an immediate offset.
848 __ Mov(entry_offset, entry - base);
849
850 // The last entry can fall through into `call_deopt_entry`, avoiding a
851 // branch.
852 bool last_entry = (i + 1) == length;
853
854 if (table_entry->needs_frame) {
855 DCHECK(!info()->saves_caller_doubles());
856 if (!needs_frame.is_bound()) {
857 // This variant of deopt can only be used with stubs. Since we don't
858 // have a function pointer to install in the stack frame that we're
859 // building, install a special marker there instead.
860 DCHECK(info()->IsStub());
861
862 UseScratchRegisterScope temps(masm());
863 Register stub_marker = temps.AcquireX();
864 __ Bind(&needs_frame);
865 __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
866 __ Push(lr, fp, cp, stub_marker);
867 __ Add(fp, __ StackPointer(), 2 * kPointerSize);
868 if (!last_entry) __ B(&call_deopt_entry);
869 } else {
870 // Reuse the existing needs_frame code.
871 __ B(&needs_frame);
872 }
873 } else if (info()->saves_caller_doubles()) {
874 DCHECK(info()->IsStub());
875 if (!restore_caller_doubles.is_bound()) {
876 __ Bind(&restore_caller_doubles);
877 RestoreCallerDoubles();
878 if (!last_entry) __ B(&call_deopt_entry);
879 } else {
880 // Reuse the existing restore_caller_doubles code.
881 __ B(&restore_caller_doubles);
882 }
883 } else {
884 // There is nothing special to do, so just continue to the second-level
885 // table.
886 if (!last_entry) __ B(&call_deopt_entry);
887 }
888
889 masm()->CheckConstPool(false, last_entry);
890 }
891
892 // Generate common code for calling the second-level deopt table.
893 Register deopt_entry = temps.AcquireX();
894 __ Bind(&call_deopt_entry);
895 __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
896 RelocInfo::RUNTIME_ENTRY));
897 __ Add(deopt_entry, deopt_entry, entry_offset);
898 __ Call(deopt_entry);
899 }
900
901 // Force constant pool emission at the end of the deopt jump table to make
902 // sure that no constant pools are emitted after.
903 masm()->CheckConstPool(true, false);
904
905 // The deoptimization jump table is the last part of the instruction
906 // sequence. Mark the generated code as done unless we bailed out.
907 if (!is_aborted()) status_ = DONE;
908 return !is_aborted();
909 }
910
911
GenerateSafepointTable()912 bool LCodeGen::GenerateSafepointTable() {
913 DCHECK(is_done());
914 // We do not know how much data will be emitted for the safepoint table, so
915 // force emission of the veneer pool.
916 masm()->CheckVeneerPool(true, true);
917 safepoints_.Emit(masm(), GetStackSlotCount());
918 return !is_aborted();
919 }
920
921
FinishCode(Handle<Code> code)922 void LCodeGen::FinishCode(Handle<Code> code) {
923 DCHECK(is_done());
924 code->set_stack_slots(GetStackSlotCount());
925 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
926 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
927 PopulateDeoptimizationData(code);
928 }
929
930
PopulateDeoptimizationData(Handle<Code> code)931 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
932 int length = deoptimizations_.length();
933 if (length == 0) return;
934
935 Handle<DeoptimizationInputData> data =
936 DeoptimizationInputData::New(isolate(), length, TENURED);
937
938 Handle<ByteArray> translations =
939 translations_.CreateByteArray(isolate()->factory());
940 data->SetTranslationByteArray(*translations);
941 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
942 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
943 if (info_->IsOptimizing()) {
944 // Reference to shared function info does not change between phases.
945 AllowDeferredHandleDereference allow_handle_dereference;
946 data->SetSharedFunctionInfo(*info_->shared_info());
947 } else {
948 data->SetSharedFunctionInfo(Smi::FromInt(0));
949 }
950
951 Handle<FixedArray> literals =
952 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
953 { AllowDeferredHandleDereference copy_handles;
954 for (int i = 0; i < deoptimization_literals_.length(); i++) {
955 literals->set(i, *deoptimization_literals_[i]);
956 }
957 data->SetLiteralArray(*literals);
958 }
959
960 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
961 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
962
963 // Populate the deoptimization entries.
964 for (int i = 0; i < length; i++) {
965 LEnvironment* env = deoptimizations_[i];
966 data->SetAstId(i, env->ast_id());
967 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
968 data->SetArgumentsStackHeight(i,
969 Smi::FromInt(env->arguments_stack_height()));
970 data->SetPc(i, Smi::FromInt(env->pc_offset()));
971 }
972
973 code->set_deoptimization_data(*data);
974 }
975
976
PopulateDeoptimizationLiteralsWithInlinedFunctions()977 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
978 DCHECK(deoptimization_literals_.length() == 0);
979
980 const ZoneList<Handle<JSFunction> >* inlined_closures =
981 chunk()->inlined_closures();
982
983 for (int i = 0, length = inlined_closures->length(); i < length; i++) {
984 DefineDeoptimizationLiteral(inlined_closures->at(i));
985 }
986
987 inlined_function_count_ = deoptimization_literals_.length();
988 }
989
990
DeoptimizeBranch(LInstruction * instr,const char * detail,BranchType branch_type,Register reg,int bit,Deoptimizer::BailoutType * override_bailout_type)991 void LCodeGen::DeoptimizeBranch(
992 LInstruction* instr, const char* detail, BranchType branch_type,
993 Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
994 LEnvironment* environment = instr->environment();
995 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
996 Deoptimizer::BailoutType bailout_type =
997 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
998
999 if (override_bailout_type != NULL) {
1000 bailout_type = *override_bailout_type;
1001 }
1002
1003 DCHECK(environment->HasBeenRegistered());
1004 DCHECK(info()->IsOptimizing() || info()->IsStub());
1005 int id = environment->deoptimization_index();
1006 Address entry =
1007 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1008
1009 if (entry == NULL) {
1010 Abort(kBailoutWasNotPrepared);
1011 }
1012
1013 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
1014 Label not_zero;
1015 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1016
1017 __ Push(x0, x1, x2);
1018 __ Mrs(x2, NZCV);
1019 __ Mov(x0, count);
1020 __ Ldr(w1, MemOperand(x0));
1021 __ Subs(x1, x1, 1);
1022 __ B(gt, ¬_zero);
1023 __ Mov(w1, FLAG_deopt_every_n_times);
1024 __ Str(w1, MemOperand(x0));
1025 __ Pop(x2, x1, x0);
1026 DCHECK(frame_is_built_);
1027 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1028 __ Unreachable();
1029
1030 __ Bind(¬_zero);
1031 __ Str(w1, MemOperand(x0));
1032 __ Msr(NZCV, x2);
1033 __ Pop(x2, x1, x0);
1034 }
1035
1036 if (info()->ShouldTrapOnDeopt()) {
1037 Label dont_trap;
1038 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1039 __ Debug("trap_on_deopt", __LINE__, BREAK);
1040 __ Bind(&dont_trap);
1041 }
1042
1043 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
1044 instr->Mnemonic(), detail);
1045 DCHECK(info()->IsStub() || frame_is_built_);
1046 // Go through jump table if we need to build frame, or restore caller doubles.
1047 if (branch_type == always &&
1048 frame_is_built_ && !info()->saves_caller_doubles()) {
1049 DeoptComment(reason);
1050 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1051 } else {
1052 Deoptimizer::JumpTableEntry* table_entry =
1053 new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type,
1054 !frame_is_built_);
1055 // We often have several deopts to the same entry, reuse the last
1056 // jump entry if this is the case.
1057 if (jump_table_.is_empty() ||
1058 !table_entry->IsEquivalentTo(*jump_table_.last())) {
1059 jump_table_.Add(table_entry, zone());
1060 }
1061 __ B(&jump_table_.last()->label, branch_type, reg, bit);
1062 }
1063 }
1064
1065
Deoptimize(LInstruction * instr,Deoptimizer::BailoutType * override_bailout_type,const char * detail)1066 void LCodeGen::Deoptimize(LInstruction* instr,
1067 Deoptimizer::BailoutType* override_bailout_type,
1068 const char* detail) {
1069 DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type);
1070 }
1071
1072
DeoptimizeIf(Condition cond,LInstruction * instr,const char * detail)1073 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
1074 const char* detail) {
1075 DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond));
1076 }
1077
1078
DeoptimizeIfZero(Register rt,LInstruction * instr,const char * detail)1079 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
1080 const char* detail) {
1081 DeoptimizeBranch(instr, detail, reg_zero, rt);
1082 }
1083
1084
DeoptimizeIfNotZero(Register rt,LInstruction * instr,const char * detail)1085 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
1086 const char* detail) {
1087 DeoptimizeBranch(instr, detail, reg_not_zero, rt);
1088 }
1089
1090
DeoptimizeIfNegative(Register rt,LInstruction * instr,const char * detail)1091 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
1092 const char* detail) {
1093 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
1094 DeoptimizeIfBitSet(rt, sign_bit, instr, detail);
1095 }
1096
1097
DeoptimizeIfSmi(Register rt,LInstruction * instr,const char * detail)1098 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
1099 const char* detail) {
1100 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail);
1101 }
1102
1103
DeoptimizeIfNotSmi(Register rt,LInstruction * instr,const char * detail)1104 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
1105 const char* detail) {
1106 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail);
1107 }
1108
1109
DeoptimizeIfRoot(Register rt,Heap::RootListIndex index,LInstruction * instr,const char * detail)1110 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
1111 LInstruction* instr, const char* detail) {
1112 __ CompareRoot(rt, index);
1113 DeoptimizeIf(eq, instr, detail);
1114 }
1115
1116
DeoptimizeIfNotRoot(Register rt,Heap::RootListIndex index,LInstruction * instr,const char * detail)1117 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
1118 LInstruction* instr, const char* detail) {
1119 __ CompareRoot(rt, index);
1120 DeoptimizeIf(ne, instr, detail);
1121 }
1122
1123
DeoptimizeIfMinusZero(DoubleRegister input,LInstruction * instr,const char * detail)1124 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
1125 const char* detail) {
1126 __ TestForMinusZero(input);
1127 DeoptimizeIf(vs, instr, detail);
1128 }
1129
1130
DeoptimizeIfNotHeapNumber(Register object,LInstruction * instr)1131 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
1132 __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
1133 DeoptimizeIf(ne, instr, "not heap number");
1134 }
1135
1136
DeoptimizeIfBitSet(Register rt,int bit,LInstruction * instr,const char * detail)1137 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
1138 const char* detail) {
1139 DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit);
1140 }
1141
1142
DeoptimizeIfBitClear(Register rt,int bit,LInstruction * instr,const char * detail)1143 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
1144 const char* detail) {
1145 DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit);
1146 }
1147
1148
EnsureSpaceForLazyDeopt(int space_needed)1149 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1150 if (!info()->IsStub()) {
1151 // Ensure that we have enough space after the previous lazy-bailout
1152 // instruction for patching the code here.
1153 intptr_t current_pc = masm()->pc_offset();
1154
1155 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1156 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1157 DCHECK((padding_size % kInstructionSize) == 0);
1158 InstructionAccurateScope instruction_accurate(
1159 masm(), padding_size / kInstructionSize);
1160
1161 while (padding_size > 0) {
1162 __ nop();
1163 padding_size -= kInstructionSize;
1164 }
1165 }
1166 }
1167 last_lazy_deopt_pc_ = masm()->pc_offset();
1168 }
1169
1170
ToRegister(LOperand * op) const1171 Register LCodeGen::ToRegister(LOperand* op) const {
1172 // TODO(all): support zero register results, as ToRegister32.
1173 DCHECK((op != NULL) && op->IsRegister());
1174 return Register::FromAllocationIndex(op->index());
1175 }
1176
1177
ToRegister32(LOperand * op) const1178 Register LCodeGen::ToRegister32(LOperand* op) const {
1179 DCHECK(op != NULL);
1180 if (op->IsConstantOperand()) {
1181 // If this is a constant operand, the result must be the zero register.
1182 DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0);
1183 return wzr;
1184 } else {
1185 return ToRegister(op).W();
1186 }
1187 }
1188
1189
ToSmi(LConstantOperand * op) const1190 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
1191 HConstant* constant = chunk_->LookupConstant(op);
1192 return Smi::FromInt(constant->Integer32Value());
1193 }
1194
1195
ToDoubleRegister(LOperand * op) const1196 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
1197 DCHECK((op != NULL) && op->IsDoubleRegister());
1198 return DoubleRegister::FromAllocationIndex(op->index());
1199 }
1200
1201
ToOperand(LOperand * op)1202 Operand LCodeGen::ToOperand(LOperand* op) {
1203 DCHECK(op != NULL);
1204 if (op->IsConstantOperand()) {
1205 LConstantOperand* const_op = LConstantOperand::cast(op);
1206 HConstant* constant = chunk()->LookupConstant(const_op);
1207 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1208 if (r.IsSmi()) {
1209 DCHECK(constant->HasSmiValue());
1210 return Operand(Smi::FromInt(constant->Integer32Value()));
1211 } else if (r.IsInteger32()) {
1212 DCHECK(constant->HasInteger32Value());
1213 return Operand(constant->Integer32Value());
1214 } else if (r.IsDouble()) {
1215 Abort(kToOperandUnsupportedDoubleImmediate);
1216 }
1217 DCHECK(r.IsTagged());
1218 return Operand(constant->handle(isolate()));
1219 } else if (op->IsRegister()) {
1220 return Operand(ToRegister(op));
1221 } else if (op->IsDoubleRegister()) {
1222 Abort(kToOperandIsDoubleRegisterUnimplemented);
1223 return Operand(0);
1224 }
1225 // Stack slots not implemented, use ToMemOperand instead.
1226 UNREACHABLE();
1227 return Operand(0);
1228 }
1229
1230
ToOperand32(LOperand * op)1231 Operand LCodeGen::ToOperand32(LOperand* op) {
1232 DCHECK(op != NULL);
1233 if (op->IsRegister()) {
1234 return Operand(ToRegister32(op));
1235 } else if (op->IsConstantOperand()) {
1236 LConstantOperand* const_op = LConstantOperand::cast(op);
1237 HConstant* constant = chunk()->LookupConstant(const_op);
1238 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1239 if (r.IsInteger32()) {
1240 return Operand(constant->Integer32Value());
1241 } else {
1242 // Other constants not implemented.
1243 Abort(kToOperand32UnsupportedImmediate);
1244 }
1245 }
1246 // Other cases are not implemented.
1247 UNREACHABLE();
1248 return Operand(0);
1249 }
1250
1251
ArgumentsOffsetWithoutFrame(int index)1252 static int64_t ArgumentsOffsetWithoutFrame(int index) {
1253 DCHECK(index < 0);
1254 return -(index + 1) * kPointerSize;
1255 }
1256
1257
ToMemOperand(LOperand * op,StackMode stack_mode) const1258 MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
1259 DCHECK(op != NULL);
1260 DCHECK(!op->IsRegister());
1261 DCHECK(!op->IsDoubleRegister());
1262 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
1263 if (NeedsEagerFrame()) {
1264 int fp_offset = StackSlotOffset(op->index());
1265 if (op->index() >= 0) {
1266 // Loads and stores have a bigger reach in positive offset than negative.
1267 // When the load or the store can't be done in one instruction via fp
1268 // (too big negative offset), we try to access via jssp (positive offset).
1269 // We can reference a stack slot from jssp only if jssp references the end
1270 // of the stack slots. It's not the case when:
1271 // - stack_mode != kCanUseStackPointer: this is the case when a deferred
1272 // code saved the registers.
1273 // - after_push_argument_: arguments has been pushed for a call.
1274 // - inlined_arguments_: inlined arguments have been pushed once. All the
1275 // remainder of the function cannot trust jssp any longer.
1276 // - saves_caller_doubles: some double registers have been pushed, jssp
1277 // references the end of the double registers and not the end of the
1278 // stack slots.
1279 // Also, if the offset from fp is small enough to make a load/store in
1280 // one instruction, we use a fp access.
1281 if ((stack_mode == kCanUseStackPointer) && !after_push_argument_ &&
1282 !inlined_arguments_ && !is_int9(fp_offset) &&
1283 !info()->saves_caller_doubles()) {
1284 int jssp_offset =
1285 (GetStackSlotCount() - op->index() - 1) * kPointerSize;
1286 return MemOperand(masm()->StackPointer(), jssp_offset);
1287 }
1288 }
1289 return MemOperand(fp, fp_offset);
1290 } else {
1291 // Retrieve parameter without eager stack-frame relative to the
1292 // stack-pointer.
1293 return MemOperand(masm()->StackPointer(),
1294 ArgumentsOffsetWithoutFrame(op->index()));
1295 }
1296 }
1297
1298
ToHandle(LConstantOperand * op) const1299 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
1300 HConstant* constant = chunk_->LookupConstant(op);
1301 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1302 return constant->handle(isolate());
1303 }
1304
1305
1306 template <class LI>
ToShiftedRightOperand32(LOperand * right,LI * shift_info)1307 Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info) {
1308 if (shift_info->shift() == NO_SHIFT) {
1309 return ToOperand32(right);
1310 } else {
1311 return Operand(
1312 ToRegister32(right),
1313 shift_info->shift(),
1314 JSShiftAmountFromLConstant(shift_info->shift_amount()));
1315 }
1316 }
1317
1318
IsSmi(LConstantOperand * op) const1319 bool LCodeGen::IsSmi(LConstantOperand* op) const {
1320 return chunk_->LookupLiteralRepresentation(op).IsSmi();
1321 }
1322
1323
IsInteger32Constant(LConstantOperand * op) const1324 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
1325 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
1326 }
1327
1328
ToInteger32(LConstantOperand * op) const1329 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
1330 HConstant* constant = chunk_->LookupConstant(op);
1331 return constant->Integer32Value();
1332 }
1333
1334
ToDouble(LConstantOperand * op) const1335 double LCodeGen::ToDouble(LConstantOperand* op) const {
1336 HConstant* constant = chunk_->LookupConstant(op);
1337 DCHECK(constant->HasDoubleValue());
1338 return constant->DoubleValue();
1339 }
1340
1341
TokenToCondition(Token::Value op,bool is_unsigned)1342 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1343 Condition cond = nv;
1344 switch (op) {
1345 case Token::EQ:
1346 case Token::EQ_STRICT:
1347 cond = eq;
1348 break;
1349 case Token::NE:
1350 case Token::NE_STRICT:
1351 cond = ne;
1352 break;
1353 case Token::LT:
1354 cond = is_unsigned ? lo : lt;
1355 break;
1356 case Token::GT:
1357 cond = is_unsigned ? hi : gt;
1358 break;
1359 case Token::LTE:
1360 cond = is_unsigned ? ls : le;
1361 break;
1362 case Token::GTE:
1363 cond = is_unsigned ? hs : ge;
1364 break;
1365 case Token::IN:
1366 case Token::INSTANCEOF:
1367 default:
1368 UNREACHABLE();
1369 }
1370 return cond;
1371 }
1372
1373
1374 template<class InstrType>
EmitBranchGeneric(InstrType instr,const BranchGenerator & branch)1375 void LCodeGen::EmitBranchGeneric(InstrType instr,
1376 const BranchGenerator& branch) {
1377 int left_block = instr->TrueDestination(chunk_);
1378 int right_block = instr->FalseDestination(chunk_);
1379
1380 int next_block = GetNextEmittedBlock();
1381
1382 if (right_block == left_block) {
1383 EmitGoto(left_block);
1384 } else if (left_block == next_block) {
1385 branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
1386 } else {
1387 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1388 if (right_block != next_block) {
1389 __ B(chunk_->GetAssemblyLabel(right_block));
1390 }
1391 }
1392 }
1393
1394
1395 template<class InstrType>
EmitBranch(InstrType instr,Condition condition)1396 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1397 DCHECK((condition != al) && (condition != nv));
1398 BranchOnCondition branch(this, condition);
1399 EmitBranchGeneric(instr, branch);
1400 }
1401
1402
1403 template<class InstrType>
EmitCompareAndBranch(InstrType instr,Condition condition,const Register & lhs,const Operand & rhs)1404 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1405 Condition condition,
1406 const Register& lhs,
1407 const Operand& rhs) {
1408 DCHECK((condition != al) && (condition != nv));
1409 CompareAndBranch branch(this, condition, lhs, rhs);
1410 EmitBranchGeneric(instr, branch);
1411 }
1412
1413
1414 template<class InstrType>
EmitTestAndBranch(InstrType instr,Condition condition,const Register & value,uint64_t mask)1415 void LCodeGen::EmitTestAndBranch(InstrType instr,
1416 Condition condition,
1417 const Register& value,
1418 uint64_t mask) {
1419 DCHECK((condition != al) && (condition != nv));
1420 TestAndBranch branch(this, condition, value, mask);
1421 EmitBranchGeneric(instr, branch);
1422 }
1423
1424
1425 template<class InstrType>
EmitBranchIfNonZeroNumber(InstrType instr,const FPRegister & value,const FPRegister & scratch)1426 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
1427 const FPRegister& value,
1428 const FPRegister& scratch) {
1429 BranchIfNonZeroNumber branch(this, value, scratch);
1430 EmitBranchGeneric(instr, branch);
1431 }
1432
1433
1434 template<class InstrType>
EmitBranchIfHeapNumber(InstrType instr,const Register & value)1435 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
1436 const Register& value) {
1437 BranchIfHeapNumber branch(this, value);
1438 EmitBranchGeneric(instr, branch);
1439 }
1440
1441
1442 template<class InstrType>
EmitBranchIfRoot(InstrType instr,const Register & value,Heap::RootListIndex index)1443 void LCodeGen::EmitBranchIfRoot(InstrType instr,
1444 const Register& value,
1445 Heap::RootListIndex index) {
1446 BranchIfRoot branch(this, value, index);
1447 EmitBranchGeneric(instr, branch);
1448 }
1449
1450
DoGap(LGap * gap)1451 void LCodeGen::DoGap(LGap* gap) {
1452 for (int i = LGap::FIRST_INNER_POSITION;
1453 i <= LGap::LAST_INNER_POSITION;
1454 i++) {
1455 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1456 LParallelMove* move = gap->GetParallelMove(inner_pos);
1457 if (move != NULL) {
1458 resolver_.Resolve(move);
1459 }
1460 }
1461 }
1462
1463
DoAccessArgumentsAt(LAccessArgumentsAt * instr)1464 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1465 Register arguments = ToRegister(instr->arguments());
1466 Register result = ToRegister(instr->result());
1467
1468 // The pointer to the arguments array come from DoArgumentsElements.
1469 // It does not point directly to the arguments and there is an offest of
1470 // two words that we must take into account when accessing an argument.
1471 // Subtracting the index from length accounts for one, so we add one more.
1472
1473 if (instr->length()->IsConstantOperand() &&
1474 instr->index()->IsConstantOperand()) {
1475 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1476 int length = ToInteger32(LConstantOperand::cast(instr->length()));
1477 int offset = ((length - index) + 1) * kPointerSize;
1478 __ Ldr(result, MemOperand(arguments, offset));
1479 } else if (instr->index()->IsConstantOperand()) {
1480 Register length = ToRegister32(instr->length());
1481 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1482 int loc = index - 1;
1483 if (loc != 0) {
1484 __ Sub(result.W(), length, loc);
1485 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1486 } else {
1487 __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
1488 }
1489 } else {
1490 Register length = ToRegister32(instr->length());
1491 Operand index = ToOperand32(instr->index());
1492 __ Sub(result.W(), length, index);
1493 __ Add(result.W(), result.W(), 1);
1494 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1495 }
1496 }
1497
1498
DoAddE(LAddE * instr)1499 void LCodeGen::DoAddE(LAddE* instr) {
1500 Register result = ToRegister(instr->result());
1501 Register left = ToRegister(instr->left());
1502 Operand right = (instr->right()->IsConstantOperand())
1503 ? ToInteger32(LConstantOperand::cast(instr->right()))
1504 : Operand(ToRegister32(instr->right()), SXTW);
1505
1506 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1507 __ Add(result, left, right);
1508 }
1509
1510
DoAddI(LAddI * instr)1511 void LCodeGen::DoAddI(LAddI* instr) {
1512 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1513 Register result = ToRegister32(instr->result());
1514 Register left = ToRegister32(instr->left());
1515 Operand right = ToShiftedRightOperand32(instr->right(), instr);
1516
1517 if (can_overflow) {
1518 __ Adds(result, left, right);
1519 DeoptimizeIf(vs, instr);
1520 } else {
1521 __ Add(result, left, right);
1522 }
1523 }
1524
1525
DoAddS(LAddS * instr)1526 void LCodeGen::DoAddS(LAddS* instr) {
1527 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1528 Register result = ToRegister(instr->result());
1529 Register left = ToRegister(instr->left());
1530 Operand right = ToOperand(instr->right());
1531 if (can_overflow) {
1532 __ Adds(result, left, right);
1533 DeoptimizeIf(vs, instr);
1534 } else {
1535 __ Add(result, left, right);
1536 }
1537 }
1538
1539
DoAllocate(LAllocate * instr)1540 void LCodeGen::DoAllocate(LAllocate* instr) {
1541 class DeferredAllocate: public LDeferredCode {
1542 public:
1543 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
1544 : LDeferredCode(codegen), instr_(instr) { }
1545 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
1546 virtual LInstruction* instr() { return instr_; }
1547 private:
1548 LAllocate* instr_;
1549 };
1550
1551 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
1552
1553 Register result = ToRegister(instr->result());
1554 Register temp1 = ToRegister(instr->temp1());
1555 Register temp2 = ToRegister(instr->temp2());
1556
1557 // Allocate memory for the object.
1558 AllocationFlags flags = TAG_OBJECT;
1559 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1560 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
1561 }
1562
1563 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1564 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
1565 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1566 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
1567 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1568 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1569 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
1570 }
1571
1572 if (instr->size()->IsConstantOperand()) {
1573 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1574 if (size <= Page::kMaxRegularHeapObjectSize) {
1575 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1576 } else {
1577 __ B(deferred->entry());
1578 }
1579 } else {
1580 Register size = ToRegister32(instr->size());
1581 __ Sxtw(size.X(), size);
1582 __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
1583 }
1584
1585 __ Bind(deferred->exit());
1586
1587 if (instr->hydrogen()->MustPrefillWithFiller()) {
1588 Register filler_count = temp1;
1589 Register filler = temp2;
1590 Register untagged_result = ToRegister(instr->temp3());
1591
1592 if (instr->size()->IsConstantOperand()) {
1593 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1594 __ Mov(filler_count, size / kPointerSize);
1595 } else {
1596 __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
1597 }
1598
1599 __ Sub(untagged_result, result, kHeapObjectTag);
1600 __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
1601 __ FillFields(untagged_result, filler_count, filler);
1602 } else {
1603 DCHECK(instr->temp3() == NULL);
1604 }
1605 }
1606
1607
DoDeferredAllocate(LAllocate * instr)1608 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1609 // TODO(3095996): Get rid of this. For now, we need to make the
1610 // result register contain a valid pointer because it is already
1611 // contained in the register pointer map.
1612 __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
1613
1614 PushSafepointRegistersScope scope(this);
1615 // We're in a SafepointRegistersScope so we can use any scratch registers.
1616 Register size = x0;
1617 if (instr->size()->IsConstantOperand()) {
1618 __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
1619 } else {
1620 __ SmiTag(size, ToRegister32(instr->size()).X());
1621 }
1622 int flags = AllocateDoubleAlignFlag::encode(
1623 instr->hydrogen()->MustAllocateDoubleAligned());
1624 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1625 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
1626 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1627 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
1628 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1629 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1630 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
1631 } else {
1632 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
1633 }
1634 __ Mov(x10, Smi::FromInt(flags));
1635 __ Push(size, x10);
1636
1637 CallRuntimeFromDeferred(
1638 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
1639 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
1640 }
1641
1642
DoApplyArguments(LApplyArguments * instr)1643 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1644 Register receiver = ToRegister(instr->receiver());
1645 Register function = ToRegister(instr->function());
1646 Register length = ToRegister32(instr->length());
1647
1648 Register elements = ToRegister(instr->elements());
1649 Register scratch = x5;
1650 DCHECK(receiver.Is(x0)); // Used for parameter count.
1651 DCHECK(function.Is(x1)); // Required by InvokeFunction.
1652 DCHECK(ToRegister(instr->result()).Is(x0));
1653 DCHECK(instr->IsMarkedAsCall());
1654
1655 // Copy the arguments to this function possibly from the
1656 // adaptor frame below it.
1657 const uint32_t kArgumentsLimit = 1 * KB;
1658 __ Cmp(length, kArgumentsLimit);
1659 DeoptimizeIf(hi, instr);
1660
1661 // Push the receiver and use the register to keep the original
1662 // number of arguments.
1663 __ Push(receiver);
1664 Register argc = receiver;
1665 receiver = NoReg;
1666 __ Sxtw(argc, length);
1667 // The arguments are at a one pointer size offset from elements.
1668 __ Add(elements, elements, 1 * kPointerSize);
1669
1670 // Loop through the arguments pushing them onto the execution
1671 // stack.
1672 Label invoke, loop;
1673 // length is a small non-negative integer, due to the test above.
1674 __ Cbz(length, &invoke);
1675 __ Bind(&loop);
1676 __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
1677 __ Push(scratch);
1678 __ Subs(length, length, 1);
1679 __ B(ne, &loop);
1680
1681 __ Bind(&invoke);
1682 DCHECK(instr->HasPointerMap());
1683 LPointerMap* pointers = instr->pointer_map();
1684 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
1685 // The number of arguments is stored in argc (receiver) which is x0, as
1686 // expected by InvokeFunction.
1687 ParameterCount actual(argc);
1688 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
1689 }
1690
1691
DoArgumentsElements(LArgumentsElements * instr)1692 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1693 // We push some arguments and they will be pop in an other block. We can't
1694 // trust that jssp references the end of the stack slots until the end of
1695 // the function.
1696 inlined_arguments_ = true;
1697 Register result = ToRegister(instr->result());
1698
1699 if (instr->hydrogen()->from_inlined()) {
1700 // When we are inside an inlined function, the arguments are the last things
1701 // that have been pushed on the stack. Therefore the arguments array can be
1702 // accessed directly from jssp.
1703 // However in the normal case, it is accessed via fp but there are two words
1704 // on the stack between fp and the arguments (the saved lr and fp) and the
1705 // LAccessArgumentsAt implementation take that into account.
1706 // In the inlined case we need to subtract the size of 2 words to jssp to
1707 // get a pointer which will work well with LAccessArgumentsAt.
1708 DCHECK(masm()->StackPointer().Is(jssp));
1709 __ Sub(result, jssp, 2 * kPointerSize);
1710 } else {
1711 DCHECK(instr->temp() != NULL);
1712 Register previous_fp = ToRegister(instr->temp());
1713
1714 __ Ldr(previous_fp,
1715 MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1716 __ Ldr(result,
1717 MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
1718 __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1719 __ Csel(result, fp, previous_fp, ne);
1720 }
1721 }
1722
1723
DoArgumentsLength(LArgumentsLength * instr)1724 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1725 Register elements = ToRegister(instr->elements());
1726 Register result = ToRegister32(instr->result());
1727 Label done;
1728
1729 // If no arguments adaptor frame the number of arguments is fixed.
1730 __ Cmp(fp, elements);
1731 __ Mov(result, scope()->num_parameters());
1732 __ B(eq, &done);
1733
1734 // Arguments adaptor frame present. Get argument length from there.
1735 __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1736 __ Ldr(result,
1737 UntagSmiMemOperand(result.X(),
1738 ArgumentsAdaptorFrameConstants::kLengthOffset));
1739
1740 // Argument length is in result register.
1741 __ Bind(&done);
1742 }
1743
1744
DoArithmeticD(LArithmeticD * instr)1745 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1746 DoubleRegister left = ToDoubleRegister(instr->left());
1747 DoubleRegister right = ToDoubleRegister(instr->right());
1748 DoubleRegister result = ToDoubleRegister(instr->result());
1749
1750 switch (instr->op()) {
1751 case Token::ADD: __ Fadd(result, left, right); break;
1752 case Token::SUB: __ Fsub(result, left, right); break;
1753 case Token::MUL: __ Fmul(result, left, right); break;
1754 case Token::DIV: __ Fdiv(result, left, right); break;
1755 case Token::MOD: {
1756 // The ECMA-262 remainder operator is the remainder from a truncating
1757 // (round-towards-zero) division. Note that this differs from IEEE-754.
1758 //
1759 // TODO(jbramley): See if it's possible to do this inline, rather than by
1760 // calling a helper function. With frintz (to produce the intermediate
1761 // quotient) and fmsub (to calculate the remainder without loss of
1762 // precision), it should be possible. However, we would need support for
1763 // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
1764 // support that yet.
1765 DCHECK(left.Is(d0));
1766 DCHECK(right.Is(d1));
1767 __ CallCFunction(
1768 ExternalReference::mod_two_doubles_operation(isolate()),
1769 0, 2);
1770 DCHECK(result.Is(d0));
1771 break;
1772 }
1773 default:
1774 UNREACHABLE();
1775 break;
1776 }
1777 }
1778
1779
DoArithmeticT(LArithmeticT * instr)1780 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1781 DCHECK(ToRegister(instr->context()).is(cp));
1782 DCHECK(ToRegister(instr->left()).is(x1));
1783 DCHECK(ToRegister(instr->right()).is(x0));
1784 DCHECK(ToRegister(instr->result()).is(x0));
1785
1786 Handle<Code> code =
1787 CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
1788 CallCode(code, RelocInfo::CODE_TARGET, instr);
1789 }
1790
1791
DoBitI(LBitI * instr)1792 void LCodeGen::DoBitI(LBitI* instr) {
1793 Register result = ToRegister32(instr->result());
1794 Register left = ToRegister32(instr->left());
1795 Operand right = ToShiftedRightOperand32(instr->right(), instr);
1796
1797 switch (instr->op()) {
1798 case Token::BIT_AND: __ And(result, left, right); break;
1799 case Token::BIT_OR: __ Orr(result, left, right); break;
1800 case Token::BIT_XOR: __ Eor(result, left, right); break;
1801 default:
1802 UNREACHABLE();
1803 break;
1804 }
1805 }
1806
1807
DoBitS(LBitS * instr)1808 void LCodeGen::DoBitS(LBitS* instr) {
1809 Register result = ToRegister(instr->result());
1810 Register left = ToRegister(instr->left());
1811 Operand right = ToOperand(instr->right());
1812
1813 switch (instr->op()) {
1814 case Token::BIT_AND: __ And(result, left, right); break;
1815 case Token::BIT_OR: __ Orr(result, left, right); break;
1816 case Token::BIT_XOR: __ Eor(result, left, right); break;
1817 default:
1818 UNREACHABLE();
1819 break;
1820 }
1821 }
1822
1823
DoBoundsCheck(LBoundsCheck * instr)1824 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1825 Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
1826 DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
1827 DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
1828 if (instr->index()->IsConstantOperand()) {
1829 Operand index = ToOperand32(instr->index());
1830 Register length = ToRegister32(instr->length());
1831 __ Cmp(length, index);
1832 cond = CommuteCondition(cond);
1833 } else {
1834 Register index = ToRegister32(instr->index());
1835 Operand length = ToOperand32(instr->length());
1836 __ Cmp(index, length);
1837 }
1838 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
1839 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
1840 } else {
1841 DeoptimizeIf(cond, instr);
1842 }
1843 }
1844
1845
DoBranch(LBranch * instr)1846 void LCodeGen::DoBranch(LBranch* instr) {
1847 Representation r = instr->hydrogen()->value()->representation();
1848 Label* true_label = instr->TrueLabel(chunk_);
1849 Label* false_label = instr->FalseLabel(chunk_);
1850
1851 if (r.IsInteger32()) {
1852 DCHECK(!info()->IsStub());
1853 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
1854 } else if (r.IsSmi()) {
1855 DCHECK(!info()->IsStub());
1856 STATIC_ASSERT(kSmiTag == 0);
1857 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
1858 } else if (r.IsDouble()) {
1859 DoubleRegister value = ToDoubleRegister(instr->value());
1860 // Test the double value. Zero and NaN are false.
1861 EmitBranchIfNonZeroNumber(instr, value, double_scratch());
1862 } else {
1863 DCHECK(r.IsTagged());
1864 Register value = ToRegister(instr->value());
1865 HType type = instr->hydrogen()->value()->type();
1866
1867 if (type.IsBoolean()) {
1868 DCHECK(!info()->IsStub());
1869 __ CompareRoot(value, Heap::kTrueValueRootIndex);
1870 EmitBranch(instr, eq);
1871 } else if (type.IsSmi()) {
1872 DCHECK(!info()->IsStub());
1873 EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
1874 } else if (type.IsJSArray()) {
1875 DCHECK(!info()->IsStub());
1876 EmitGoto(instr->TrueDestination(chunk()));
1877 } else if (type.IsHeapNumber()) {
1878 DCHECK(!info()->IsStub());
1879 __ Ldr(double_scratch(), FieldMemOperand(value,
1880 HeapNumber::kValueOffset));
1881 // Test the double value. Zero and NaN are false.
1882 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
1883 } else if (type.IsString()) {
1884 DCHECK(!info()->IsStub());
1885 Register temp = ToRegister(instr->temp1());
1886 __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
1887 EmitCompareAndBranch(instr, ne, temp, 0);
1888 } else {
1889 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1890 // Avoid deopts in the case where we've never executed this path before.
1891 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
1892
1893 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1894 // undefined -> false.
1895 __ JumpIfRoot(
1896 value, Heap::kUndefinedValueRootIndex, false_label);
1897 }
1898
1899 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1900 // Boolean -> its value.
1901 __ JumpIfRoot(
1902 value, Heap::kTrueValueRootIndex, true_label);
1903 __ JumpIfRoot(
1904 value, Heap::kFalseValueRootIndex, false_label);
1905 }
1906
1907 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1908 // 'null' -> false.
1909 __ JumpIfRoot(
1910 value, Heap::kNullValueRootIndex, false_label);
1911 }
1912
1913 if (expected.Contains(ToBooleanStub::SMI)) {
1914 // Smis: 0 -> false, all other -> true.
1915 DCHECK(Smi::FromInt(0) == 0);
1916 __ Cbz(value, false_label);
1917 __ JumpIfSmi(value, true_label);
1918 } else if (expected.NeedsMap()) {
1919 // If we need a map later and have a smi, deopt.
1920 DeoptimizeIfSmi(value, instr);
1921 }
1922
1923 Register map = NoReg;
1924 Register scratch = NoReg;
1925
1926 if (expected.NeedsMap()) {
1927 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
1928 map = ToRegister(instr->temp1());
1929 scratch = ToRegister(instr->temp2());
1930
1931 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
1932
1933 if (expected.CanBeUndetectable()) {
1934 // Undetectable -> false.
1935 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1936 __ TestAndBranchIfAnySet(
1937 scratch, 1 << Map::kIsUndetectable, false_label);
1938 }
1939 }
1940
1941 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1942 // spec object -> true.
1943 __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
1944 __ B(ge, true_label);
1945 }
1946
1947 if (expected.Contains(ToBooleanStub::STRING)) {
1948 // String value -> false iff empty.
1949 Label not_string;
1950 __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
1951 __ B(ge, ¬_string);
1952 __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
1953 __ Cbz(scratch, false_label);
1954 __ B(true_label);
1955 __ Bind(¬_string);
1956 }
1957
1958 if (expected.Contains(ToBooleanStub::SYMBOL)) {
1959 // Symbol value -> true.
1960 __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
1961 __ B(eq, true_label);
1962 }
1963
1964 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1965 Label not_heap_number;
1966 __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, ¬_heap_number);
1967
1968 __ Ldr(double_scratch(),
1969 FieldMemOperand(value, HeapNumber::kValueOffset));
1970 __ Fcmp(double_scratch(), 0.0);
1971 // If we got a NaN (overflow bit is set), jump to the false branch.
1972 __ B(vs, false_label);
1973 __ B(eq, false_label);
1974 __ B(true_label);
1975 __ Bind(¬_heap_number);
1976 }
1977
1978 if (!expected.IsGeneric()) {
1979 // We've seen something for the first time -> deopt.
1980 // This can only happen if we are not generic already.
1981 Deoptimize(instr);
1982 }
1983 }
1984 }
1985 }
1986
1987
CallKnownFunction(Handle<JSFunction> function,int formal_parameter_count,int arity,LInstruction * instr,Register function_reg)1988 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
1989 int formal_parameter_count,
1990 int arity,
1991 LInstruction* instr,
1992 Register function_reg) {
1993 bool dont_adapt_arguments =
1994 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1995 bool can_invoke_directly =
1996 dont_adapt_arguments || formal_parameter_count == arity;
1997
1998 // The function interface relies on the following register assignments.
1999 DCHECK(function_reg.Is(x1) || function_reg.IsNone());
2000 Register arity_reg = x0;
2001
2002 LPointerMap* pointers = instr->pointer_map();
2003
2004 // If necessary, load the function object.
2005 if (function_reg.IsNone()) {
2006 function_reg = x1;
2007 __ LoadObject(function_reg, function);
2008 }
2009
2010 if (FLAG_debug_code) {
2011 Label is_not_smi;
2012 // Try to confirm that function_reg (x1) is a tagged pointer.
2013 __ JumpIfNotSmi(function_reg, &is_not_smi);
2014 __ Abort(kExpectedFunctionObject);
2015 __ Bind(&is_not_smi);
2016 }
2017
2018 if (can_invoke_directly) {
2019 // Change context.
2020 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
2021
2022 // Set the arguments count if adaption is not needed. Assumes that x0 is
2023 // available to write to at this point.
2024 if (dont_adapt_arguments) {
2025 __ Mov(arity_reg, arity);
2026 }
2027
2028 // Invoke function.
2029 __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
2030 __ Call(x10);
2031
2032 // Set up deoptimization.
2033 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2034 } else {
2035 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2036 ParameterCount count(arity);
2037 ParameterCount expected(formal_parameter_count);
2038 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
2039 }
2040 }
2041
2042
DoTailCallThroughMegamorphicCache(LTailCallThroughMegamorphicCache * instr)2043 void LCodeGen::DoTailCallThroughMegamorphicCache(
2044 LTailCallThroughMegamorphicCache* instr) {
2045 Register receiver = ToRegister(instr->receiver());
2046 Register name = ToRegister(instr->name());
2047 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
2048 DCHECK(name.is(LoadDescriptor::NameRegister()));
2049 DCHECK(receiver.is(x1));
2050 DCHECK(name.is(x2));
2051
2052 Register scratch = x3;
2053 Register extra = x4;
2054 Register extra2 = x5;
2055 Register extra3 = x6;
2056
2057 // Important for the tail-call.
2058 bool must_teardown_frame = NeedsEagerFrame();
2059
2060 // The probe will tail call to a handler if found.
2061 isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
2062 must_teardown_frame, receiver, name,
2063 scratch, extra, extra2, extra3);
2064
2065 // Tail call to miss if we ended up here.
2066 if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
2067 LoadIC::GenerateMiss(masm());
2068 }
2069
2070
DoCallWithDescriptor(LCallWithDescriptor * instr)2071 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
2072 DCHECK(instr->IsMarkedAsCall());
2073 DCHECK(ToRegister(instr->result()).Is(x0));
2074
2075 LPointerMap* pointers = instr->pointer_map();
2076 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2077
2078 if (instr->target()->IsConstantOperand()) {
2079 LConstantOperand* target = LConstantOperand::cast(instr->target());
2080 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2081 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
2082 // TODO(all): on ARM we use a call descriptor to specify a storage mode
2083 // but on ARM64 we only have one storage mode so it isn't necessary. Check
2084 // this understanding is correct.
2085 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
2086 } else {
2087 DCHECK(instr->target()->IsRegister());
2088 Register target = ToRegister(instr->target());
2089 generator.BeforeCall(__ CallSize(target));
2090 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2091 __ Call(target);
2092 }
2093 generator.AfterCall();
2094 after_push_argument_ = false;
2095 }
2096
2097
DoCallJSFunction(LCallJSFunction * instr)2098 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
2099 DCHECK(instr->IsMarkedAsCall());
2100 DCHECK(ToRegister(instr->function()).is(x1));
2101
2102 if (instr->hydrogen()->pass_argument_count()) {
2103 __ Mov(x0, Operand(instr->arity()));
2104 }
2105
2106 // Change context.
2107 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
2108
2109 // Load the code entry address
2110 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
2111 __ Call(x10);
2112
2113 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2114 after_push_argument_ = false;
2115 }
2116
2117
DoCallRuntime(LCallRuntime * instr)2118 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2119 CallRuntime(instr->function(), instr->arity(), instr);
2120 after_push_argument_ = false;
2121 }
2122
2123
DoCallStub(LCallStub * instr)2124 void LCodeGen::DoCallStub(LCallStub* instr) {
2125 DCHECK(ToRegister(instr->context()).is(cp));
2126 DCHECK(ToRegister(instr->result()).is(x0));
2127 switch (instr->hydrogen()->major_key()) {
2128 case CodeStub::RegExpExec: {
2129 RegExpExecStub stub(isolate());
2130 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2131 break;
2132 }
2133 case CodeStub::SubString: {
2134 SubStringStub stub(isolate());
2135 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2136 break;
2137 }
2138 case CodeStub::StringCompare: {
2139 StringCompareStub stub(isolate());
2140 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2141 break;
2142 }
2143 default:
2144 UNREACHABLE();
2145 }
2146 after_push_argument_ = false;
2147 }
2148
2149
DoUnknownOSRValue(LUnknownOSRValue * instr)2150 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
2151 GenerateOsrPrologue();
2152 }
2153
2154
DoDeferredInstanceMigration(LCheckMaps * instr,Register object)2155 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
2156 Register temp = ToRegister(instr->temp());
2157 {
2158 PushSafepointRegistersScope scope(this);
2159 __ Push(object);
2160 __ Mov(cp, 0);
2161 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2162 RecordSafepointWithRegisters(
2163 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2164 __ StoreToSafepointRegisterSlot(x0, temp);
2165 }
2166 DeoptimizeIfSmi(temp, instr);
2167 }
2168
2169
DoCheckMaps(LCheckMaps * instr)2170 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2171 class DeferredCheckMaps: public LDeferredCode {
2172 public:
2173 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2174 : LDeferredCode(codegen), instr_(instr), object_(object) {
2175 SetExit(check_maps());
2176 }
2177 virtual void Generate() {
2178 codegen()->DoDeferredInstanceMigration(instr_, object_);
2179 }
2180 Label* check_maps() { return &check_maps_; }
2181 virtual LInstruction* instr() { return instr_; }
2182 private:
2183 LCheckMaps* instr_;
2184 Label check_maps_;
2185 Register object_;
2186 };
2187
2188 if (instr->hydrogen()->IsStabilityCheck()) {
2189 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2190 for (int i = 0; i < maps->size(); ++i) {
2191 AddStabilityDependency(maps->at(i).handle());
2192 }
2193 return;
2194 }
2195
2196 Register object = ToRegister(instr->value());
2197 Register map_reg = ToRegister(instr->temp());
2198
2199 __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
2200
2201 DeferredCheckMaps* deferred = NULL;
2202 if (instr->hydrogen()->HasMigrationTarget()) {
2203 deferred = new(zone()) DeferredCheckMaps(this, instr, object);
2204 __ Bind(deferred->check_maps());
2205 }
2206
2207 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2208 Label success;
2209 for (int i = 0; i < maps->size() - 1; i++) {
2210 Handle<Map> map = maps->at(i).handle();
2211 __ CompareMap(map_reg, map);
2212 __ B(eq, &success);
2213 }
2214 Handle<Map> map = maps->at(maps->size() - 1).handle();
2215 __ CompareMap(map_reg, map);
2216
2217 // We didn't match a map.
2218 if (instr->hydrogen()->HasMigrationTarget()) {
2219 __ B(ne, deferred->entry());
2220 } else {
2221 DeoptimizeIf(ne, instr);
2222 }
2223
2224 __ Bind(&success);
2225 }
2226
2227
DoCheckNonSmi(LCheckNonSmi * instr)2228 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2229 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2230 DeoptimizeIfSmi(ToRegister(instr->value()), instr);
2231 }
2232 }
2233
2234
DoCheckSmi(LCheckSmi * instr)2235 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2236 Register value = ToRegister(instr->value());
2237 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
2238 DeoptimizeIfNotSmi(value, instr);
2239 }
2240
2241
DoCheckInstanceType(LCheckInstanceType * instr)2242 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2243 Register input = ToRegister(instr->value());
2244 Register scratch = ToRegister(instr->temp());
2245
2246 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2247 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2248
2249 if (instr->hydrogen()->is_interval_check()) {
2250 InstanceType first, last;
2251 instr->hydrogen()->GetCheckInterval(&first, &last);
2252
2253 __ Cmp(scratch, first);
2254 if (first == last) {
2255 // If there is only one type in the interval check for equality.
2256 DeoptimizeIf(ne, instr);
2257 } else if (last == LAST_TYPE) {
2258 // We don't need to compare with the higher bound of the interval.
2259 DeoptimizeIf(lo, instr);
2260 } else {
2261 // If we are below the lower bound, set the C flag and clear the Z flag
2262 // to force a deopt.
2263 __ Ccmp(scratch, last, CFlag, hs);
2264 DeoptimizeIf(hi, instr);
2265 }
2266 } else {
2267 uint8_t mask;
2268 uint8_t tag;
2269 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2270
2271 if (base::bits::IsPowerOfTwo32(mask)) {
2272 DCHECK((tag == 0) || (tag == mask));
2273 if (tag == 0) {
2274 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr);
2275 } else {
2276 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr);
2277 }
2278 } else {
2279 if (tag == 0) {
2280 __ Tst(scratch, mask);
2281 } else {
2282 __ And(scratch, scratch, mask);
2283 __ Cmp(scratch, tag);
2284 }
2285 DeoptimizeIf(ne, instr);
2286 }
2287 }
2288 }
2289
2290
DoClampDToUint8(LClampDToUint8 * instr)2291 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2292 DoubleRegister input = ToDoubleRegister(instr->unclamped());
2293 Register result = ToRegister32(instr->result());
2294 __ ClampDoubleToUint8(result, input, double_scratch());
2295 }
2296
2297
DoClampIToUint8(LClampIToUint8 * instr)2298 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
2299 Register input = ToRegister32(instr->unclamped());
2300 Register result = ToRegister32(instr->result());
2301 __ ClampInt32ToUint8(result, input);
2302 }
2303
2304
DoClampTToUint8(LClampTToUint8 * instr)2305 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
2306 Register input = ToRegister(instr->unclamped());
2307 Register result = ToRegister32(instr->result());
2308 Label done;
2309
2310 // Both smi and heap number cases are handled.
2311 Label is_not_smi;
2312 __ JumpIfNotSmi(input, &is_not_smi);
2313 __ SmiUntag(result.X(), input);
2314 __ ClampInt32ToUint8(result);
2315 __ B(&done);
2316
2317 __ Bind(&is_not_smi);
2318
2319 // Check for heap number.
2320 Label is_heap_number;
2321 __ JumpIfHeapNumber(input, &is_heap_number);
2322
2323 // Check for undefined. Undefined is coverted to zero for clamping conversion.
2324 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr);
2325 __ Mov(result, 0);
2326 __ B(&done);
2327
2328 // Heap number case.
2329 __ Bind(&is_heap_number);
2330 DoubleRegister dbl_scratch = double_scratch();
2331 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
2332 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2333 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2334
2335 __ Bind(&done);
2336 }
2337
2338
DoDoubleBits(LDoubleBits * instr)2339 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
2340 DoubleRegister value_reg = ToDoubleRegister(instr->value());
2341 Register result_reg = ToRegister(instr->result());
2342 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
2343 __ Fmov(result_reg, value_reg);
2344 __ Lsr(result_reg, result_reg, 32);
2345 } else {
2346 __ Fmov(result_reg.W(), value_reg.S());
2347 }
2348 }
2349
2350
DoConstructDouble(LConstructDouble * instr)2351 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
2352 Register hi_reg = ToRegister(instr->hi());
2353 Register lo_reg = ToRegister(instr->lo());
2354 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2355
2356 // Insert the least significant 32 bits of hi_reg into the most significant
2357 // 32 bits of lo_reg, and move to a floating point register.
2358 __ Bfi(lo_reg, hi_reg, 32, 32);
2359 __ Fmov(result_reg, lo_reg);
2360 }
2361
2362
DoClassOfTestAndBranch(LClassOfTestAndBranch * instr)2363 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2364 Handle<String> class_name = instr->hydrogen()->class_name();
2365 Label* true_label = instr->TrueLabel(chunk_);
2366 Label* false_label = instr->FalseLabel(chunk_);
2367 Register input = ToRegister(instr->value());
2368 Register scratch1 = ToRegister(instr->temp1());
2369 Register scratch2 = ToRegister(instr->temp2());
2370
2371 __ JumpIfSmi(input, false_label);
2372
2373 Register map = scratch2;
2374 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2375 // Assuming the following assertions, we can use the same compares to test
2376 // for both being a function type and being in the object type range.
2377 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2378 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2379 FIRST_SPEC_OBJECT_TYPE + 1);
2380 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2381 LAST_SPEC_OBJECT_TYPE - 1);
2382 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2383
2384 // We expect CompareObjectType to load the object instance type in scratch1.
2385 __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
2386 __ B(lt, false_label);
2387 __ B(eq, true_label);
2388 __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
2389 __ B(eq, true_label);
2390 } else {
2391 __ IsObjectJSObjectType(input, map, scratch1, false_label);
2392 }
2393
2394 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2395 // Check if the constructor in the map is a function.
2396 __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
2397
2398 // Objects with a non-function constructor have class 'Object'.
2399 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2400 __ JumpIfNotObjectType(
2401 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
2402 } else {
2403 __ JumpIfNotObjectType(
2404 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
2405 }
2406
2407 // The constructor function is in scratch1. Get its instance class name.
2408 __ Ldr(scratch1,
2409 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
2410 __ Ldr(scratch1,
2411 FieldMemOperand(scratch1,
2412 SharedFunctionInfo::kInstanceClassNameOffset));
2413
2414 // The class name we are testing against is internalized since it's a literal.
2415 // The name in the constructor is internalized because of the way the context
2416 // is booted. This routine isn't expected to work for random API-created
2417 // classes and it doesn't have to because you can't access it with natives
2418 // syntax. Since both sides are internalized it is sufficient to use an
2419 // identity comparison.
2420 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
2421 }
2422
2423
DoCmpHoleAndBranchD(LCmpHoleAndBranchD * instr)2424 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
2425 DCHECK(instr->hydrogen()->representation().IsDouble());
2426 FPRegister object = ToDoubleRegister(instr->object());
2427 Register temp = ToRegister(instr->temp());
2428
2429 // If we don't have a NaN, we don't have the hole, so branch now to avoid the
2430 // (relatively expensive) hole-NaN check.
2431 __ Fcmp(object, object);
2432 __ B(vc, instr->FalseLabel(chunk_));
2433
2434 // We have a NaN, but is it the hole?
2435 __ Fmov(temp, object);
2436 EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
2437 }
2438
2439
DoCmpHoleAndBranchT(LCmpHoleAndBranchT * instr)2440 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
2441 DCHECK(instr->hydrogen()->representation().IsTagged());
2442 Register object = ToRegister(instr->object());
2443
2444 EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
2445 }
2446
2447
DoCmpMapAndBranch(LCmpMapAndBranch * instr)2448 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2449 Register value = ToRegister(instr->value());
2450 Register map = ToRegister(instr->temp());
2451
2452 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
2453 EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
2454 }
2455
2456
DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch * instr)2457 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2458 Representation rep = instr->hydrogen()->value()->representation();
2459 DCHECK(!rep.IsInteger32());
2460 Register scratch = ToRegister(instr->temp());
2461
2462 if (rep.IsDouble()) {
2463 __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
2464 instr->TrueLabel(chunk()));
2465 } else {
2466 Register value = ToRegister(instr->value());
2467 __ JumpIfNotHeapNumber(value, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2468 __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
2469 __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
2470 }
2471 EmitGoto(instr->FalseDestination(chunk()));
2472 }
2473
2474
DoCompareNumericAndBranch(LCompareNumericAndBranch * instr)2475 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2476 LOperand* left = instr->left();
2477 LOperand* right = instr->right();
2478 bool is_unsigned =
2479 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2480 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2481 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2482
2483 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2484 // We can statically evaluate the comparison.
2485 double left_val = ToDouble(LConstantOperand::cast(left));
2486 double right_val = ToDouble(LConstantOperand::cast(right));
2487 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2488 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2489 EmitGoto(next_block);
2490 } else {
2491 if (instr->is_double()) {
2492 __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
2493
2494 // If a NaN is involved, i.e. the result is unordered (V set),
2495 // jump to false block label.
2496 __ B(vs, instr->FalseLabel(chunk_));
2497 EmitBranch(instr, cond);
2498 } else {
2499 if (instr->hydrogen_value()->representation().IsInteger32()) {
2500 if (right->IsConstantOperand()) {
2501 EmitCompareAndBranch(instr, cond, ToRegister32(left),
2502 ToOperand32(right));
2503 } else {
2504 // Commute the operands and the condition.
2505 EmitCompareAndBranch(instr, CommuteCondition(cond),
2506 ToRegister32(right), ToOperand32(left));
2507 }
2508 } else {
2509 DCHECK(instr->hydrogen_value()->representation().IsSmi());
2510 if (right->IsConstantOperand()) {
2511 int32_t value = ToInteger32(LConstantOperand::cast(right));
2512 EmitCompareAndBranch(instr,
2513 cond,
2514 ToRegister(left),
2515 Operand(Smi::FromInt(value)));
2516 } else if (left->IsConstantOperand()) {
2517 // Commute the operands and the condition.
2518 int32_t value = ToInteger32(LConstantOperand::cast(left));
2519 EmitCompareAndBranch(instr,
2520 CommuteCondition(cond),
2521 ToRegister(right),
2522 Operand(Smi::FromInt(value)));
2523 } else {
2524 EmitCompareAndBranch(instr,
2525 cond,
2526 ToRegister(left),
2527 ToRegister(right));
2528 }
2529 }
2530 }
2531 }
2532 }
2533
2534
DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch * instr)2535 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2536 Register left = ToRegister(instr->left());
2537 Register right = ToRegister(instr->right());
2538 EmitCompareAndBranch(instr, eq, left, right);
2539 }
2540
2541
DoCmpT(LCmpT * instr)2542 void LCodeGen::DoCmpT(LCmpT* instr) {
2543 DCHECK(ToRegister(instr->context()).is(cp));
2544 Token::Value op = instr->op();
2545 Condition cond = TokenToCondition(op, false);
2546
2547 DCHECK(ToRegister(instr->left()).Is(x1));
2548 DCHECK(ToRegister(instr->right()).Is(x0));
2549 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2550 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2551 // Signal that we don't inline smi code before this stub.
2552 InlineSmiCheckInfo::EmitNotInlined(masm());
2553
2554 // Return true or false depending on CompareIC result.
2555 // This instruction is marked as call. We can clobber any register.
2556 DCHECK(instr->IsMarkedAsCall());
2557 __ LoadTrueFalseRoots(x1, x2);
2558 __ Cmp(x0, 0);
2559 __ Csel(ToRegister(instr->result()), x1, x2, cond);
2560 }
2561
2562
DoConstantD(LConstantD * instr)2563 void LCodeGen::DoConstantD(LConstantD* instr) {
2564 DCHECK(instr->result()->IsDoubleRegister());
2565 DoubleRegister result = ToDoubleRegister(instr->result());
2566 if (instr->value() == 0) {
2567 if (copysign(1.0, instr->value()) == 1.0) {
2568 __ Fmov(result, fp_zero);
2569 } else {
2570 __ Fneg(result, fp_zero);
2571 }
2572 } else {
2573 __ Fmov(result, instr->value());
2574 }
2575 }
2576
2577
DoConstantE(LConstantE * instr)2578 void LCodeGen::DoConstantE(LConstantE* instr) {
2579 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2580 }
2581
2582
DoConstantI(LConstantI * instr)2583 void LCodeGen::DoConstantI(LConstantI* instr) {
2584 DCHECK(is_int32(instr->value()));
2585 // Cast the value here to ensure that the value isn't sign extended by the
2586 // implicit Operand constructor.
2587 __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
2588 }
2589
2590
DoConstantS(LConstantS * instr)2591 void LCodeGen::DoConstantS(LConstantS* instr) {
2592 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2593 }
2594
2595
DoConstantT(LConstantT * instr)2596 void LCodeGen::DoConstantT(LConstantT* instr) {
2597 Handle<Object> object = instr->value(isolate());
2598 AllowDeferredHandleDereference smi_check;
2599 __ LoadObject(ToRegister(instr->result()), object);
2600 }
2601
2602
DoContext(LContext * instr)2603 void LCodeGen::DoContext(LContext* instr) {
2604 // If there is a non-return use, the context must be moved to a register.
2605 Register result = ToRegister(instr->result());
2606 if (info()->IsOptimizing()) {
2607 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
2608 } else {
2609 // If there is no frame, the context must be in cp.
2610 DCHECK(result.is(cp));
2611 }
2612 }
2613
2614
DoCheckValue(LCheckValue * instr)2615 void LCodeGen::DoCheckValue(LCheckValue* instr) {
2616 Register reg = ToRegister(instr->value());
2617 Handle<HeapObject> object = instr->hydrogen()->object().handle();
2618 AllowDeferredHandleDereference smi_check;
2619 if (isolate()->heap()->InNewSpace(*object)) {
2620 UseScratchRegisterScope temps(masm());
2621 Register temp = temps.AcquireX();
2622 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2623 __ Mov(temp, Operand(Handle<Object>(cell)));
2624 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2625 __ Cmp(reg, temp);
2626 } else {
2627 __ Cmp(reg, Operand(object));
2628 }
2629 DeoptimizeIf(ne, instr);
2630 }
2631
2632
DoLazyBailout(LLazyBailout * instr)2633 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2634 last_lazy_deopt_pc_ = masm()->pc_offset();
2635 DCHECK(instr->HasEnvironment());
2636 LEnvironment* env = instr->environment();
2637 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2638 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2639 }
2640
2641
DoDateField(LDateField * instr)2642 void LCodeGen::DoDateField(LDateField* instr) {
2643 Register object = ToRegister(instr->date());
2644 Register result = ToRegister(instr->result());
2645 Register temp1 = x10;
2646 Register temp2 = x11;
2647 Smi* index = instr->index();
2648 Label runtime, done;
2649
2650 DCHECK(object.is(result) && object.Is(x0));
2651 DCHECK(instr->IsMarkedAsCall());
2652
2653 DeoptimizeIfSmi(object, instr);
2654 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
2655 DeoptimizeIf(ne, instr);
2656
2657 if (index->value() == 0) {
2658 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2659 } else {
2660 if (index->value() < JSDate::kFirstUncachedField) {
2661 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2662 __ Mov(temp1, Operand(stamp));
2663 __ Ldr(temp1, MemOperand(temp1));
2664 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2665 __ Cmp(temp1, temp2);
2666 __ B(ne, &runtime);
2667 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
2668 kPointerSize * index->value()));
2669 __ B(&done);
2670 }
2671
2672 __ Bind(&runtime);
2673 __ Mov(x1, Operand(index));
2674 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2675 }
2676
2677 __ Bind(&done);
2678 }
2679
2680
DoDeoptimize(LDeoptimize * instr)2681 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
2682 Deoptimizer::BailoutType type = instr->hydrogen()->type();
2683 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
2684 // needed return address), even though the implementation of LAZY and EAGER is
2685 // now identical. When LAZY is eventually completely folded into EAGER, remove
2686 // the special case below.
2687 if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
2688 type = Deoptimizer::LAZY;
2689 }
2690
2691 Deoptimize(instr, &type, instr->hydrogen()->reason());
2692 }
2693
2694
DoDivByPowerOf2I(LDivByPowerOf2I * instr)2695 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2696 Register dividend = ToRegister32(instr->dividend());
2697 int32_t divisor = instr->divisor();
2698 Register result = ToRegister32(instr->result());
2699 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
2700 DCHECK(!result.is(dividend));
2701
2702 // Check for (0 / -x) that will produce negative zero.
2703 HDiv* hdiv = instr->hydrogen();
2704 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2705 DeoptimizeIfZero(dividend, instr);
2706 }
2707 // Check for (kMinInt / -1).
2708 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2709 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2710 // overflow.
2711 __ Cmp(dividend, 1);
2712 DeoptimizeIf(vs, instr);
2713 }
2714 // Deoptimize if remainder will not be 0.
2715 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2716 divisor != 1 && divisor != -1) {
2717 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2718 __ Tst(dividend, mask);
2719 DeoptimizeIf(ne, instr);
2720 }
2721
2722 if (divisor == -1) { // Nice shortcut, not needed for correctness.
2723 __ Neg(result, dividend);
2724 return;
2725 }
2726 int32_t shift = WhichPowerOf2Abs(divisor);
2727 if (shift == 0) {
2728 __ Mov(result, dividend);
2729 } else if (shift == 1) {
2730 __ Add(result, dividend, Operand(dividend, LSR, 31));
2731 } else {
2732 __ Mov(result, Operand(dividend, ASR, 31));
2733 __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2734 }
2735 if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2736 if (divisor < 0) __ Neg(result, result);
2737 }
2738
2739
DoDivByConstI(LDivByConstI * instr)2740 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2741 Register dividend = ToRegister32(instr->dividend());
2742 int32_t divisor = instr->divisor();
2743 Register result = ToRegister32(instr->result());
2744 DCHECK(!AreAliased(dividend, result));
2745
2746 if (divisor == 0) {
2747 Deoptimize(instr);
2748 return;
2749 }
2750
2751 // Check for (0 / -x) that will produce negative zero.
2752 HDiv* hdiv = instr->hydrogen();
2753 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2754 DeoptimizeIfZero(dividend, instr);
2755 }
2756
2757 __ TruncatingDiv(result, dividend, Abs(divisor));
2758 if (divisor < 0) __ Neg(result, result);
2759
2760 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2761 Register temp = ToRegister32(instr->temp());
2762 DCHECK(!AreAliased(dividend, result, temp));
2763 __ Sxtw(dividend.X(), dividend);
2764 __ Mov(temp, divisor);
2765 __ Smsubl(temp.X(), result, temp, dividend.X());
2766 DeoptimizeIfNotZero(temp, instr);
2767 }
2768 }
2769
2770
2771 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
DoDivI(LDivI * instr)2772 void LCodeGen::DoDivI(LDivI* instr) {
2773 HBinaryOperation* hdiv = instr->hydrogen();
2774 Register dividend = ToRegister32(instr->dividend());
2775 Register divisor = ToRegister32(instr->divisor());
2776 Register result = ToRegister32(instr->result());
2777
2778 // Issue the division first, and then check for any deopt cases whilst the
2779 // result is computed.
2780 __ Sdiv(result, dividend, divisor);
2781
2782 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2783 DCHECK_EQ(NULL, instr->temp());
2784 return;
2785 }
2786
2787 // Check for x / 0.
2788 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2789 DeoptimizeIfZero(divisor, instr);
2790 }
2791
2792 // Check for (0 / -x) as that will produce negative zero.
2793 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2794 __ Cmp(divisor, 0);
2795
2796 // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2797 // zero, ie. zero dividend with negative divisor deopts.
2798 // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2799 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2800 __ Ccmp(dividend, 0, NoFlag, mi);
2801 DeoptimizeIf(eq, instr);
2802 }
2803
2804 // Check for (kMinInt / -1).
2805 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2806 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2807 // overflow.
2808 __ Cmp(dividend, 1);
2809 // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2810 // -1. If overflow is clear, set the flags for condition ne, as the
2811 // dividend isn't -1, and thus we shouldn't deopt.
2812 __ Ccmp(divisor, -1, NoFlag, vs);
2813 DeoptimizeIf(eq, instr);
2814 }
2815
2816 // Compute remainder and deopt if it's not zero.
2817 Register remainder = ToRegister32(instr->temp());
2818 __ Msub(remainder, result, divisor, dividend);
2819 DeoptimizeIfNotZero(remainder, instr);
2820 }
2821
2822
DoDoubleToIntOrSmi(LDoubleToIntOrSmi * instr)2823 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2824 DoubleRegister input = ToDoubleRegister(instr->value());
2825 Register result = ToRegister32(instr->result());
2826
2827 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2828 DeoptimizeIfMinusZero(input, instr);
2829 }
2830
2831 __ TryRepresentDoubleAsInt32(result, input, double_scratch());
2832 DeoptimizeIf(ne, instr);
2833
2834 if (instr->tag_result()) {
2835 __ SmiTag(result.X());
2836 }
2837 }
2838
2839
DoDrop(LDrop * instr)2840 void LCodeGen::DoDrop(LDrop* instr) {
2841 __ Drop(instr->count());
2842 }
2843
2844
DoDummy(LDummy * instr)2845 void LCodeGen::DoDummy(LDummy* instr) {
2846 // Nothing to see here, move on!
2847 }
2848
2849
DoDummyUse(LDummyUse * instr)2850 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2851 // Nothing to see here, move on!
2852 }
2853
2854
DoFunctionLiteral(LFunctionLiteral * instr)2855 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2856 DCHECK(ToRegister(instr->context()).is(cp));
2857 // FunctionLiteral instruction is marked as call, we can trash any register.
2858 DCHECK(instr->IsMarkedAsCall());
2859
2860 // Use the fast case closure allocation code that allocates in new
2861 // space for nested functions that don't need literals cloning.
2862 bool pretenure = instr->hydrogen()->pretenure();
2863 if (!pretenure && instr->hydrogen()->has_no_literals()) {
2864 FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
2865 instr->hydrogen()->kind());
2866 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2867 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2868 } else {
2869 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2870 __ Mov(x1, Operand(pretenure ? factory()->true_value()
2871 : factory()->false_value()));
2872 __ Push(cp, x2, x1);
2873 CallRuntime(Runtime::kNewClosure, 3, instr);
2874 }
2875 }
2876
2877
DoForInCacheArray(LForInCacheArray * instr)2878 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
2879 Register map = ToRegister(instr->map());
2880 Register result = ToRegister(instr->result());
2881 Label load_cache, done;
2882
2883 __ EnumLengthUntagged(result, map);
2884 __ Cbnz(result, &load_cache);
2885
2886 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2887 __ B(&done);
2888
2889 __ Bind(&load_cache);
2890 __ LoadInstanceDescriptors(map, result);
2891 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
2892 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2893 DeoptimizeIfZero(result, instr);
2894
2895 __ Bind(&done);
2896 }
2897
2898
DoForInPrepareMap(LForInPrepareMap * instr)2899 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2900 Register object = ToRegister(instr->object());
2901 Register null_value = x5;
2902
2903 DCHECK(instr->IsMarkedAsCall());
2904 DCHECK(object.Is(x0));
2905
2906 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr);
2907
2908 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2909 __ Cmp(object, null_value);
2910 DeoptimizeIf(eq, instr);
2911
2912 DeoptimizeIfSmi(object, instr);
2913
2914 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
2915 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2916 DeoptimizeIf(le, instr);
2917
2918 Label use_cache, call_runtime;
2919 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2920
2921 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2922 __ B(&use_cache);
2923
2924 // Get the set of properties to enumerate.
2925 __ Bind(&call_runtime);
2926 __ Push(object);
2927 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2928
2929 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2930 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr);
2931
2932 __ Bind(&use_cache);
2933 }
2934
2935
DoGetCachedArrayIndex(LGetCachedArrayIndex * instr)2936 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2937 Register input = ToRegister(instr->value());
2938 Register result = ToRegister(instr->result());
2939
2940 __ AssertString(input);
2941
2942 // Assert that we can use a W register load to get the hash.
2943 DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
2944 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
2945 __ IndexFromHash(result, result);
2946 }
2947
2948
EmitGoto(int block)2949 void LCodeGen::EmitGoto(int block) {
2950 // Do not emit jump if we are emitting a goto to the next block.
2951 if (!IsNextEmittedBlock(block)) {
2952 __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
2953 }
2954 }
2955
2956
DoGoto(LGoto * instr)2957 void LCodeGen::DoGoto(LGoto* instr) {
2958 EmitGoto(instr->block_id());
2959 }
2960
2961
DoHasCachedArrayIndexAndBranch(LHasCachedArrayIndexAndBranch * instr)2962 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2963 LHasCachedArrayIndexAndBranch* instr) {
2964 Register input = ToRegister(instr->value());
2965 Register temp = ToRegister32(instr->temp());
2966
2967 // Assert that the cache status bits fit in a W register.
2968 DCHECK(is_uint32(String::kContainsCachedArrayIndexMask));
2969 __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
2970 __ Tst(temp, String::kContainsCachedArrayIndexMask);
2971 EmitBranch(instr, eq);
2972 }
2973
2974
2975 // HHasInstanceTypeAndBranch instruction is built with an interval of type
2976 // to test but is only used in very restricted ways. The only possible kinds
2977 // of intervals are:
2978 // - [ FIRST_TYPE, instr->to() ]
2979 // - [ instr->form(), LAST_TYPE ]
2980 // - instr->from() == instr->to()
2981 //
2982 // These kinds of intervals can be check with only one compare instruction
2983 // providing the correct value and test condition are used.
2984 //
2985 // TestType() will return the value to use in the compare instruction and
2986 // BranchCondition() will return the condition to use depending on the kind
2987 // of interval actually specified in the instruction.
TestType(HHasInstanceTypeAndBranch * instr)2988 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2989 InstanceType from = instr->from();
2990 InstanceType to = instr->to();
2991 if (from == FIRST_TYPE) return to;
2992 DCHECK((from == to) || (to == LAST_TYPE));
2993 return from;
2994 }
2995
2996
2997 // See comment above TestType function for what this function does.
BranchCondition(HHasInstanceTypeAndBranch * instr)2998 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2999 InstanceType from = instr->from();
3000 InstanceType to = instr->to();
3001 if (from == to) return eq;
3002 if (to == LAST_TYPE) return hs;
3003 if (from == FIRST_TYPE) return ls;
3004 UNREACHABLE();
3005 return eq;
3006 }
3007
3008
DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch * instr)3009 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
3010 Register input = ToRegister(instr->value());
3011 Register scratch = ToRegister(instr->temp());
3012
3013 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3014 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3015 }
3016 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
3017 EmitBranch(instr, BranchCondition(instr->hydrogen()));
3018 }
3019
3020
DoInnerAllocatedObject(LInnerAllocatedObject * instr)3021 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3022 Register result = ToRegister(instr->result());
3023 Register base = ToRegister(instr->base_object());
3024 if (instr->offset()->IsConstantOperand()) {
3025 __ Add(result, base, ToOperand32(instr->offset()));
3026 } else {
3027 __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
3028 }
3029 }
3030
3031
DoInstanceOf(LInstanceOf * instr)3032 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
3033 DCHECK(ToRegister(instr->context()).is(cp));
3034 // Assert that the arguments are in the registers expected by InstanceofStub.
3035 DCHECK(ToRegister(instr->left()).Is(InstanceofStub::left()));
3036 DCHECK(ToRegister(instr->right()).Is(InstanceofStub::right()));
3037
3038 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
3039 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3040
3041 // InstanceofStub returns a result in x0:
3042 // 0 => not an instance
3043 // smi 1 => instance.
3044 __ Cmp(x0, 0);
3045 __ LoadTrueFalseRoots(x0, x1);
3046 __ Csel(x0, x0, x1, eq);
3047 }
3048
3049
DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal * instr)3050 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3051 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
3052 public:
3053 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
3054 LInstanceOfKnownGlobal* instr)
3055 : LDeferredCode(codegen), instr_(instr) { }
3056 virtual void Generate() {
3057 codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
3058 }
3059 virtual LInstruction* instr() { return instr_; }
3060 private:
3061 LInstanceOfKnownGlobal* instr_;
3062 };
3063
3064 DeferredInstanceOfKnownGlobal* deferred =
3065 new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
3066
3067 Label map_check, return_false, cache_miss, done;
3068 Register object = ToRegister(instr->value());
3069 Register result = ToRegister(instr->result());
3070 // x4 is expected in the associated deferred code and stub.
3071 Register map_check_site = x4;
3072 Register map = x5;
3073
3074 // This instruction is marked as call. We can clobber any register.
3075 DCHECK(instr->IsMarkedAsCall());
3076
3077 // We must take into account that object is in x11.
3078 DCHECK(object.Is(x11));
3079 Register scratch = x10;
3080
3081 // A Smi is not instance of anything.
3082 __ JumpIfSmi(object, &return_false);
3083
3084 // This is the inlined call site instanceof cache. The two occurences of the
3085 // hole value will be patched to the last map/result pair generated by the
3086 // instanceof stub.
3087 __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3088 {
3089 // Below we use Factory::the_hole_value() on purpose instead of loading from
3090 // the root array to force relocation and later be able to patch with a
3091 // custom value.
3092 InstructionAccurateScope scope(masm(), 5);
3093 __ bind(&map_check);
3094 // Will be patched with the cached map.
3095 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
3096 __ ldr(scratch, Immediate(Handle<Object>(cell)));
3097 __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
3098 __ cmp(map, scratch);
3099 __ b(&cache_miss, ne);
3100 // The address of this instruction is computed relative to the map check
3101 // above, so check the size of the code generated.
3102 DCHECK(masm()->InstructionsGeneratedSince(&map_check) == 4);
3103 // Will be patched with the cached result.
3104 __ ldr(result, Immediate(factory()->the_hole_value()));
3105 }
3106 __ B(&done);
3107
3108 // The inlined call site cache did not match.
3109 // Check null and string before calling the deferred code.
3110 __ Bind(&cache_miss);
3111 // Compute the address of the map check. It must not be clobbered until the
3112 // InstanceOfStub has used it.
3113 __ Adr(map_check_site, &map_check);
3114 // Null is not instance of anything.
3115 __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
3116
3117 // String values are not instances of anything.
3118 // Return false if the object is a string. Otherwise, jump to the deferred
3119 // code.
3120 // Note that we can't jump directly to deferred code from
3121 // IsObjectJSStringType, because it uses tbz for the jump and the deferred
3122 // code can be out of range.
3123 __ IsObjectJSStringType(object, scratch, NULL, &return_false);
3124 __ B(deferred->entry());
3125
3126 __ Bind(&return_false);
3127 __ LoadRoot(result, Heap::kFalseValueRootIndex);
3128
3129 // Here result is either true or false.
3130 __ Bind(deferred->exit());
3131 __ Bind(&done);
3132 }
3133
3134
DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal * instr)3135 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3136 Register result = ToRegister(instr->result());
3137 DCHECK(result.Is(x0)); // InstanceofStub returns its result in x0.
3138 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
3139 flags = static_cast<InstanceofStub::Flags>(
3140 flags | InstanceofStub::kArgsInRegisters);
3141 flags = static_cast<InstanceofStub::Flags>(
3142 flags | InstanceofStub::kReturnTrueFalseObject);
3143 flags = static_cast<InstanceofStub::Flags>(
3144 flags | InstanceofStub::kCallSiteInlineCheck);
3145
3146 PushSafepointRegistersScope scope(this);
3147 LoadContextFromDeferred(instr->context());
3148
3149 // Prepare InstanceofStub arguments.
3150 DCHECK(ToRegister(instr->value()).Is(InstanceofStub::left()));
3151 __ LoadObject(InstanceofStub::right(), instr->function());
3152
3153 InstanceofStub stub(isolate(), flags);
3154 CallCodeGeneric(stub.GetCode(),
3155 RelocInfo::CODE_TARGET,
3156 instr,
3157 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3158 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3159 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3160
3161 // Put the result value into the result register slot.
3162 __ StoreToSafepointRegisterSlot(result, result);
3163 }
3164
3165
DoInstructionGap(LInstructionGap * instr)3166 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
3167 DoGap(instr);
3168 }
3169
3170
DoInteger32ToDouble(LInteger32ToDouble * instr)3171 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3172 Register value = ToRegister32(instr->value());
3173 DoubleRegister result = ToDoubleRegister(instr->result());
3174 __ Scvtf(result, value);
3175 }
3176
3177
DoInvokeFunction(LInvokeFunction * instr)3178 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3179 DCHECK(ToRegister(instr->context()).is(cp));
3180 // The function is required to be in x1.
3181 DCHECK(ToRegister(instr->function()).is(x1));
3182 DCHECK(instr->HasPointerMap());
3183
3184 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3185 if (known_function.is_null()) {
3186 LPointerMap* pointers = instr->pointer_map();
3187 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3188 ParameterCount count(instr->arity());
3189 __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
3190 } else {
3191 CallKnownFunction(known_function,
3192 instr->hydrogen()->formal_parameter_count(),
3193 instr->arity(),
3194 instr,
3195 x1);
3196 }
3197 after_push_argument_ = false;
3198 }
3199
3200
DoIsConstructCallAndBranch(LIsConstructCallAndBranch * instr)3201 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3202 Register temp1 = ToRegister(instr->temp1());
3203 Register temp2 = ToRegister(instr->temp2());
3204
3205 // Get the frame pointer for the calling frame.
3206 __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3207
3208 // Skip the arguments adaptor frame if it exists.
3209 Label check_frame_marker;
3210 __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
3211 __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3212 __ B(ne, &check_frame_marker);
3213 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
3214
3215 // Check the marker in the calling frame.
3216 __ Bind(&check_frame_marker);
3217 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
3218
3219 EmitCompareAndBranch(
3220 instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3221 }
3222
3223
DoIsObjectAndBranch(LIsObjectAndBranch * instr)3224 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
3225 Label* is_object = instr->TrueLabel(chunk_);
3226 Label* is_not_object = instr->FalseLabel(chunk_);
3227 Register value = ToRegister(instr->value());
3228 Register map = ToRegister(instr->temp1());
3229 Register scratch = ToRegister(instr->temp2());
3230
3231 __ JumpIfSmi(value, is_not_object);
3232 __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
3233
3234 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3235
3236 // Check for undetectable objects.
3237 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
3238 __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
3239
3240 // Check that instance type is in object type range.
3241 __ IsInstanceJSObjectType(map, scratch, NULL);
3242 // Flags have been updated by IsInstanceJSObjectType. We can now test the
3243 // flags for "le" condition to check if the object's type is a valid
3244 // JS object type.
3245 EmitBranch(instr, le);
3246 }
3247
3248
EmitIsString(Register input,Register temp1,Label * is_not_string,SmiCheck check_needed=INLINE_SMI_CHECK)3249 Condition LCodeGen::EmitIsString(Register input,
3250 Register temp1,
3251 Label* is_not_string,
3252 SmiCheck check_needed = INLINE_SMI_CHECK) {
3253 if (check_needed == INLINE_SMI_CHECK) {
3254 __ JumpIfSmi(input, is_not_string);
3255 }
3256 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
3257
3258 return lt;
3259 }
3260
3261
DoIsStringAndBranch(LIsStringAndBranch * instr)3262 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
3263 Register val = ToRegister(instr->value());
3264 Register scratch = ToRegister(instr->temp());
3265
3266 SmiCheck check_needed =
3267 instr->hydrogen()->value()->type().IsHeapObject()
3268 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3269 Condition true_cond =
3270 EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
3271
3272 EmitBranch(instr, true_cond);
3273 }
3274
3275
DoIsSmiAndBranch(LIsSmiAndBranch * instr)3276 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
3277 Register value = ToRegister(instr->value());
3278 STATIC_ASSERT(kSmiTag == 0);
3279 EmitTestAndBranch(instr, eq, value, kSmiTagMask);
3280 }
3281
3282
DoIsUndetectableAndBranch(LIsUndetectableAndBranch * instr)3283 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
3284 Register input = ToRegister(instr->value());
3285 Register temp = ToRegister(instr->temp());
3286
3287 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3288 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3289 }
3290 __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
3291 __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3292
3293 EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
3294 }
3295
3296
LabelType(LLabel * label)3297 static const char* LabelType(LLabel* label) {
3298 if (label->is_loop_header()) return " (loop header)";
3299 if (label->is_osr_entry()) return " (OSR entry)";
3300 return "";
3301 }
3302
3303
DoLabel(LLabel * label)3304 void LCodeGen::DoLabel(LLabel* label) {
3305 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
3306 current_instruction_,
3307 label->hydrogen_value()->id(),
3308 label->block_id(),
3309 LabelType(label));
3310
3311 __ Bind(label->label());
3312 current_block_ = label->block_id();
3313 DoGap(label);
3314 }
3315
3316
DoLoadContextSlot(LLoadContextSlot * instr)3317 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3318 Register context = ToRegister(instr->context());
3319 Register result = ToRegister(instr->result());
3320 __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
3321 if (instr->hydrogen()->RequiresHoleCheck()) {
3322 if (instr->hydrogen()->DeoptimizesOnHole()) {
3323 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
3324 } else {
3325 Label not_the_hole;
3326 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole);
3327 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3328 __ Bind(¬_the_hole);
3329 }
3330 }
3331 }
3332
3333
DoLoadFunctionPrototype(LLoadFunctionPrototype * instr)3334 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3335 Register function = ToRegister(instr->function());
3336 Register result = ToRegister(instr->result());
3337 Register temp = ToRegister(instr->temp());
3338
3339 // Get the prototype or initial map from the function.
3340 __ Ldr(result, FieldMemOperand(function,
3341 JSFunction::kPrototypeOrInitialMapOffset));
3342
3343 // Check that the function has a prototype or an initial map.
3344 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
3345
3346 // If the function does not have an initial map, we're done.
3347 Label done;
3348 __ CompareObjectType(result, temp, temp, MAP_TYPE);
3349 __ B(ne, &done);
3350
3351 // Get the prototype from the initial map.
3352 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3353
3354 // All done.
3355 __ Bind(&done);
3356 }
3357
3358
DoLoadGlobalCell(LLoadGlobalCell * instr)3359 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3360 Register result = ToRegister(instr->result());
3361 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
3362 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3363 if (instr->hydrogen()->RequiresHoleCheck()) {
3364 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
3365 }
3366 }
3367
3368
3369 template <class T>
EmitVectorLoadICRegisters(T * instr)3370 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
3371 DCHECK(FLAG_vector_ics);
3372 Register vector = ToRegister(instr->temp_vector());
3373 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
3374 __ Mov(vector, instr->hydrogen()->feedback_vector());
3375 // No need to allocate this register.
3376 DCHECK(VectorLoadICDescriptor::SlotRegister().is(x0));
3377 __ Mov(VectorLoadICDescriptor::SlotRegister(),
3378 Smi::FromInt(instr->hydrogen()->slot()));
3379 }
3380
3381
DoLoadGlobalGeneric(LLoadGlobalGeneric * instr)3382 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3383 DCHECK(ToRegister(instr->context()).is(cp));
3384 DCHECK(ToRegister(instr->global_object())
3385 .is(LoadDescriptor::ReceiverRegister()));
3386 DCHECK(ToRegister(instr->result()).Is(x0));
3387 __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3388 if (FLAG_vector_ics) {
3389 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3390 }
3391 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3392 Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
3393 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3394 }
3395
3396
PrepareKeyedExternalArrayOperand(Register key,Register base,Register scratch,bool key_is_smi,bool key_is_constant,int constant_key,ElementsKind elements_kind,int base_offset)3397 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
3398 Register key,
3399 Register base,
3400 Register scratch,
3401 bool key_is_smi,
3402 bool key_is_constant,
3403 int constant_key,
3404 ElementsKind elements_kind,
3405 int base_offset) {
3406 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3407
3408 if (key_is_constant) {
3409 int key_offset = constant_key << element_size_shift;
3410 return MemOperand(base, key_offset + base_offset);
3411 }
3412
3413 if (key_is_smi) {
3414 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
3415 return MemOperand(scratch, base_offset);
3416 }
3417
3418 if (base_offset == 0) {
3419 return MemOperand(base, key, SXTW, element_size_shift);
3420 }
3421
3422 DCHECK(!AreAliased(scratch, key));
3423 __ Add(scratch, base, base_offset);
3424 return MemOperand(scratch, key, SXTW, element_size_shift);
3425 }
3426
3427
DoLoadKeyedExternal(LLoadKeyedExternal * instr)3428 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3429 Register ext_ptr = ToRegister(instr->elements());
3430 Register scratch;
3431 ElementsKind elements_kind = instr->elements_kind();
3432
3433 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3434 bool key_is_constant = instr->key()->IsConstantOperand();
3435 Register key = no_reg;
3436 int constant_key = 0;
3437 if (key_is_constant) {
3438 DCHECK(instr->temp() == NULL);
3439 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3440 if (constant_key & 0xf0000000) {
3441 Abort(kArrayIndexConstantValueTooBig);
3442 }
3443 } else {
3444 scratch = ToRegister(instr->temp());
3445 key = ToRegister(instr->key());
3446 }
3447
3448 MemOperand mem_op =
3449 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
3450 key_is_constant, constant_key,
3451 elements_kind,
3452 instr->base_offset());
3453
3454 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
3455 (elements_kind == FLOAT32_ELEMENTS)) {
3456 DoubleRegister result = ToDoubleRegister(instr->result());
3457 __ Ldr(result.S(), mem_op);
3458 __ Fcvt(result, result.S());
3459 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
3460 (elements_kind == FLOAT64_ELEMENTS)) {
3461 DoubleRegister result = ToDoubleRegister(instr->result());
3462 __ Ldr(result, mem_op);
3463 } else {
3464 Register result = ToRegister(instr->result());
3465
3466 switch (elements_kind) {
3467 case EXTERNAL_INT8_ELEMENTS:
3468 case INT8_ELEMENTS:
3469 __ Ldrsb(result, mem_op);
3470 break;
3471 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3472 case EXTERNAL_UINT8_ELEMENTS:
3473 case UINT8_ELEMENTS:
3474 case UINT8_CLAMPED_ELEMENTS:
3475 __ Ldrb(result, mem_op);
3476 break;
3477 case EXTERNAL_INT16_ELEMENTS:
3478 case INT16_ELEMENTS:
3479 __ Ldrsh(result, mem_op);
3480 break;
3481 case EXTERNAL_UINT16_ELEMENTS:
3482 case UINT16_ELEMENTS:
3483 __ Ldrh(result, mem_op);
3484 break;
3485 case EXTERNAL_INT32_ELEMENTS:
3486 case INT32_ELEMENTS:
3487 __ Ldrsw(result, mem_op);
3488 break;
3489 case EXTERNAL_UINT32_ELEMENTS:
3490 case UINT32_ELEMENTS:
3491 __ Ldr(result.W(), mem_op);
3492 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3493 // Deopt if value > 0x80000000.
3494 __ Tst(result, 0xFFFFFFFF80000000);
3495 DeoptimizeIf(ne, instr);
3496 }
3497 break;
3498 case FLOAT32_ELEMENTS:
3499 case FLOAT64_ELEMENTS:
3500 case EXTERNAL_FLOAT32_ELEMENTS:
3501 case EXTERNAL_FLOAT64_ELEMENTS:
3502 case FAST_HOLEY_DOUBLE_ELEMENTS:
3503 case FAST_HOLEY_ELEMENTS:
3504 case FAST_HOLEY_SMI_ELEMENTS:
3505 case FAST_DOUBLE_ELEMENTS:
3506 case FAST_ELEMENTS:
3507 case FAST_SMI_ELEMENTS:
3508 case DICTIONARY_ELEMENTS:
3509 case SLOPPY_ARGUMENTS_ELEMENTS:
3510 UNREACHABLE();
3511 break;
3512 }
3513 }
3514 }
3515
3516
PrepareKeyedArrayOperand(Register base,Register elements,Register key,bool key_is_tagged,ElementsKind elements_kind,Representation representation,int base_offset)3517 MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
3518 Register elements,
3519 Register key,
3520 bool key_is_tagged,
3521 ElementsKind elements_kind,
3522 Representation representation,
3523 int base_offset) {
3524 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3525 STATIC_ASSERT(kSmiTag == 0);
3526 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3527
3528 // Even though the HLoad/StoreKeyed instructions force the input
3529 // representation for the key to be an integer, the input gets replaced during
3530 // bounds check elimination with the index argument to the bounds check, which
3531 // can be tagged, so that case must be handled here, too.
3532 if (key_is_tagged) {
3533 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3534 if (representation.IsInteger32()) {
3535 DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3536 // Read or write only the smi payload in the case of fast smi arrays.
3537 return UntagSmiMemOperand(base, base_offset);
3538 } else {
3539 return MemOperand(base, base_offset);
3540 }
3541 } else {
3542 // Sign extend key because it could be a 32-bit negative value or contain
3543 // garbage in the top 32-bits. The address computation happens in 64-bit.
3544 DCHECK((element_size_shift >= 0) && (element_size_shift <= 4));
3545 if (representation.IsInteger32()) {
3546 DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3547 // Read or write only the smi payload in the case of fast smi arrays.
3548 __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3549 return UntagSmiMemOperand(base, base_offset);
3550 } else {
3551 __ Add(base, elements, base_offset);
3552 return MemOperand(base, key, SXTW, element_size_shift);
3553 }
3554 }
3555 }
3556
3557
DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble * instr)3558 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3559 Register elements = ToRegister(instr->elements());
3560 DoubleRegister result = ToDoubleRegister(instr->result());
3561 MemOperand mem_op;
3562
3563 if (instr->key()->IsConstantOperand()) {
3564 DCHECK(instr->hydrogen()->RequiresHoleCheck() ||
3565 (instr->temp() == NULL));
3566
3567 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3568 if (constant_key & 0xf0000000) {
3569 Abort(kArrayIndexConstantValueTooBig);
3570 }
3571 int offset = instr->base_offset() + constant_key * kDoubleSize;
3572 mem_op = MemOperand(elements, offset);
3573 } else {
3574 Register load_base = ToRegister(instr->temp());
3575 Register key = ToRegister(instr->key());
3576 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3577 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3578 instr->hydrogen()->elements_kind(),
3579 instr->hydrogen()->representation(),
3580 instr->base_offset());
3581 }
3582
3583 __ Ldr(result, mem_op);
3584
3585 if (instr->hydrogen()->RequiresHoleCheck()) {
3586 Register scratch = ToRegister(instr->temp());
3587 // Detect the hole NaN by adding one to the integer representation of the
3588 // result, and checking for overflow.
3589 STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff);
3590 __ Ldr(scratch, mem_op);
3591 __ Cmn(scratch, 1);
3592 DeoptimizeIf(vs, instr);
3593 }
3594 }
3595
3596
DoLoadKeyedFixed(LLoadKeyedFixed * instr)3597 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3598 Register elements = ToRegister(instr->elements());
3599 Register result = ToRegister(instr->result());
3600 MemOperand mem_op;
3601
3602 Representation representation = instr->hydrogen()->representation();
3603 if (instr->key()->IsConstantOperand()) {
3604 DCHECK(instr->temp() == NULL);
3605 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3606 int offset = instr->base_offset() +
3607 ToInteger32(const_operand) * kPointerSize;
3608 if (representation.IsInteger32()) {
3609 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
3610 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3611 STATIC_ASSERT(kSmiTag == 0);
3612 mem_op = UntagSmiMemOperand(elements, offset);
3613 } else {
3614 mem_op = MemOperand(elements, offset);
3615 }
3616 } else {
3617 Register load_base = ToRegister(instr->temp());
3618 Register key = ToRegister(instr->key());
3619 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3620
3621 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3622 instr->hydrogen()->elements_kind(),
3623 representation, instr->base_offset());
3624 }
3625
3626 __ Load(result, mem_op, representation);
3627
3628 if (instr->hydrogen()->RequiresHoleCheck()) {
3629 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3630 DeoptimizeIfNotSmi(result, instr);
3631 } else {
3632 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
3633 }
3634 }
3635 }
3636
3637
DoLoadKeyedGeneric(LLoadKeyedGeneric * instr)3638 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3639 DCHECK(ToRegister(instr->context()).is(cp));
3640 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3641 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3642 if (FLAG_vector_ics) {
3643 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3644 }
3645
3646 Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3647 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3648
3649 DCHECK(ToRegister(instr->result()).Is(x0));
3650 }
3651
3652
DoLoadNamedField(LLoadNamedField * instr)3653 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3654 HObjectAccess access = instr->hydrogen()->access();
3655 int offset = access.offset();
3656 Register object = ToRegister(instr->object());
3657
3658 if (access.IsExternalMemory()) {
3659 Register result = ToRegister(instr->result());
3660 __ Load(result, MemOperand(object, offset), access.representation());
3661 return;
3662 }
3663
3664 if (instr->hydrogen()->representation().IsDouble()) {
3665 FPRegister result = ToDoubleRegister(instr->result());
3666 __ Ldr(result, FieldMemOperand(object, offset));
3667 return;
3668 }
3669
3670 Register result = ToRegister(instr->result());
3671 Register source;
3672 if (access.IsInobject()) {
3673 source = object;
3674 } else {
3675 // Load the properties array, using result as a scratch register.
3676 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3677 source = result;
3678 }
3679
3680 if (access.representation().IsSmi() &&
3681 instr->hydrogen()->representation().IsInteger32()) {
3682 // Read int value directly from upper half of the smi.
3683 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3684 STATIC_ASSERT(kSmiTag == 0);
3685 __ Load(result, UntagSmiFieldMemOperand(source, offset),
3686 Representation::Integer32());
3687 } else {
3688 __ Load(result, FieldMemOperand(source, offset), access.representation());
3689 }
3690 }
3691
3692
DoLoadNamedGeneric(LLoadNamedGeneric * instr)3693 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3694 DCHECK(ToRegister(instr->context()).is(cp));
3695 // LoadIC expects name and receiver in registers.
3696 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3697 __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3698 if (FLAG_vector_ics) {
3699 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3700 }
3701
3702 Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
3703 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3704
3705 DCHECK(ToRegister(instr->result()).is(x0));
3706 }
3707
3708
DoLoadRoot(LLoadRoot * instr)3709 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3710 Register result = ToRegister(instr->result());
3711 __ LoadRoot(result, instr->index());
3712 }
3713
3714
DoMapEnumLength(LMapEnumLength * instr)3715 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
3716 Register result = ToRegister(instr->result());
3717 Register map = ToRegister(instr->value());
3718 __ EnumLengthSmi(result, map);
3719 }
3720
3721
DoMathAbs(LMathAbs * instr)3722 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3723 Representation r = instr->hydrogen()->value()->representation();
3724 if (r.IsDouble()) {
3725 DoubleRegister input = ToDoubleRegister(instr->value());
3726 DoubleRegister result = ToDoubleRegister(instr->result());
3727 __ Fabs(result, input);
3728 } else if (r.IsSmi() || r.IsInteger32()) {
3729 Register input = r.IsSmi() ? ToRegister(instr->value())
3730 : ToRegister32(instr->value());
3731 Register result = r.IsSmi() ? ToRegister(instr->result())
3732 : ToRegister32(instr->result());
3733 __ Abs(result, input);
3734 DeoptimizeIf(vs, instr);
3735 }
3736 }
3737
3738
DoDeferredMathAbsTagged(LMathAbsTagged * instr,Label * exit,Label * allocation_entry)3739 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3740 Label* exit,
3741 Label* allocation_entry) {
3742 // Handle the tricky cases of MathAbsTagged:
3743 // - HeapNumber inputs.
3744 // - Negative inputs produce a positive result, so a new HeapNumber is
3745 // allocated to hold it.
3746 // - Positive inputs are returned as-is, since there is no need to allocate
3747 // a new HeapNumber for the result.
3748 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
3749 // a smi. In this case, the inline code sets the result and jumps directly
3750 // to the allocation_entry label.
3751 DCHECK(instr->context() != NULL);
3752 DCHECK(ToRegister(instr->context()).is(cp));
3753 Register input = ToRegister(instr->value());
3754 Register temp1 = ToRegister(instr->temp1());
3755 Register temp2 = ToRegister(instr->temp2());
3756 Register result_bits = ToRegister(instr->temp3());
3757 Register result = ToRegister(instr->result());
3758
3759 Label runtime_allocation;
3760
3761 // Deoptimize if the input is not a HeapNumber.
3762 DeoptimizeIfNotHeapNumber(input, instr);
3763
3764 // If the argument is positive, we can return it as-is, without any need to
3765 // allocate a new HeapNumber for the result. We have to do this in integer
3766 // registers (rather than with fabs) because we need to be able to distinguish
3767 // the two zeroes.
3768 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
3769 __ Mov(result, input);
3770 __ Tbz(result_bits, kXSignBit, exit);
3771
3772 // Calculate abs(input) by clearing the sign bit.
3773 __ Bic(result_bits, result_bits, kXSignMask);
3774
3775 // Allocate a new HeapNumber to hold the result.
3776 // result_bits The bit representation of the (double) result.
3777 __ Bind(allocation_entry);
3778 __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
3779 // The inline (non-deferred) code will store result_bits into result.
3780 __ B(exit);
3781
3782 __ Bind(&runtime_allocation);
3783 if (FLAG_debug_code) {
3784 // Because result is in the pointer map, we need to make sure it has a valid
3785 // tagged value before we call the runtime. We speculatively set it to the
3786 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
3787 // be valid.
3788 Label result_ok;
3789 Register input = ToRegister(instr->value());
3790 __ JumpIfSmi(result, &result_ok);
3791 __ Cmp(input, result);
3792 __ Assert(eq, kUnexpectedValue);
3793 __ Bind(&result_ok);
3794 }
3795
3796 { PushSafepointRegistersScope scope(this);
3797 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3798 instr->context());
3799 __ StoreToSafepointRegisterSlot(x0, result);
3800 }
3801 // The inline (non-deferred) code will store result_bits into result.
3802 }
3803
3804
DoMathAbsTagged(LMathAbsTagged * instr)3805 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
3806 // Class for deferred case.
3807 class DeferredMathAbsTagged: public LDeferredCode {
3808 public:
3809 DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
3810 : LDeferredCode(codegen), instr_(instr) { }
3811 virtual void Generate() {
3812 codegen()->DoDeferredMathAbsTagged(instr_, exit(),
3813 allocation_entry());
3814 }
3815 virtual LInstruction* instr() { return instr_; }
3816 Label* allocation_entry() { return &allocation; }
3817 private:
3818 LMathAbsTagged* instr_;
3819 Label allocation;
3820 };
3821
3822 // TODO(jbramley): The early-exit mechanism would skip the new frame handling
3823 // in GenerateDeferredCode. Tidy this up.
3824 DCHECK(!NeedsDeferredFrame());
3825
3826 DeferredMathAbsTagged* deferred =
3827 new(zone()) DeferredMathAbsTagged(this, instr);
3828
3829 DCHECK(instr->hydrogen()->value()->representation().IsTagged() ||
3830 instr->hydrogen()->value()->representation().IsSmi());
3831 Register input = ToRegister(instr->value());
3832 Register result_bits = ToRegister(instr->temp3());
3833 Register result = ToRegister(instr->result());
3834 Label done;
3835
3836 // Handle smis inline.
3837 // We can treat smis as 64-bit integers, since the (low-order) tag bits will
3838 // never get set by the negation. This is therefore the same as the Integer32
3839 // case in DoMathAbs, except that it operates on 64-bit values.
3840 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
3841
3842 __ JumpIfNotSmi(input, deferred->entry());
3843
3844 __ Abs(result, input, NULL, &done);
3845
3846 // The result is the magnitude (abs) of the smallest value a smi can
3847 // represent, encoded as a double.
3848 __ Mov(result_bits, double_to_rawbits(0x80000000));
3849 __ B(deferred->allocation_entry());
3850
3851 __ Bind(deferred->exit());
3852 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
3853
3854 __ Bind(&done);
3855 }
3856
3857
DoMathExp(LMathExp * instr)3858 void LCodeGen::DoMathExp(LMathExp* instr) {
3859 DoubleRegister input = ToDoubleRegister(instr->value());
3860 DoubleRegister result = ToDoubleRegister(instr->result());
3861 DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
3862 DoubleRegister double_temp2 = double_scratch();
3863 Register temp1 = ToRegister(instr->temp1());
3864 Register temp2 = ToRegister(instr->temp2());
3865 Register temp3 = ToRegister(instr->temp3());
3866
3867 MathExpGenerator::EmitMathExp(masm(), input, result,
3868 double_temp1, double_temp2,
3869 temp1, temp2, temp3);
3870 }
3871
3872
DoMathFloorD(LMathFloorD * instr)3873 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
3874 DoubleRegister input = ToDoubleRegister(instr->value());
3875 DoubleRegister result = ToDoubleRegister(instr->result());
3876
3877 __ Frintm(result, input);
3878 }
3879
3880
DoMathFloorI(LMathFloorI * instr)3881 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3882 DoubleRegister input = ToDoubleRegister(instr->value());
3883 Register result = ToRegister(instr->result());
3884
3885 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3886 DeoptimizeIfMinusZero(input, instr);
3887 }
3888
3889 __ Fcvtms(result, input);
3890
3891 // Check that the result fits into a 32-bit integer.
3892 // - The result did not overflow.
3893 __ Cmp(result, Operand(result, SXTW));
3894 // - The input was not NaN.
3895 __ Fccmp(input, input, NoFlag, eq);
3896 DeoptimizeIf(ne, instr);
3897 }
3898
3899
DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I * instr)3900 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3901 Register dividend = ToRegister32(instr->dividend());
3902 Register result = ToRegister32(instr->result());
3903 int32_t divisor = instr->divisor();
3904
3905 // If the divisor is 1, return the dividend.
3906 if (divisor == 1) {
3907 __ Mov(result, dividend, kDiscardForSameWReg);
3908 return;
3909 }
3910
3911 // If the divisor is positive, things are easy: There can be no deopts and we
3912 // can simply do an arithmetic right shift.
3913 int32_t shift = WhichPowerOf2Abs(divisor);
3914 if (divisor > 1) {
3915 __ Mov(result, Operand(dividend, ASR, shift));
3916 return;
3917 }
3918
3919 // If the divisor is negative, we have to negate and handle edge cases.
3920 __ Negs(result, dividend);
3921 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3922 DeoptimizeIf(eq, instr);
3923 }
3924
3925 // Dividing by -1 is basically negation, unless we overflow.
3926 if (divisor == -1) {
3927 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3928 DeoptimizeIf(vs, instr);
3929 }
3930 return;
3931 }
3932
3933 // If the negation could not overflow, simply shifting is OK.
3934 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3935 __ Mov(result, Operand(dividend, ASR, shift));
3936 return;
3937 }
3938
3939 __ Asr(result, result, shift);
3940 __ Csel(result, result, kMinInt / divisor, vc);
3941 }
3942
3943
DoFlooringDivByConstI(LFlooringDivByConstI * instr)3944 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3945 Register dividend = ToRegister32(instr->dividend());
3946 int32_t divisor = instr->divisor();
3947 Register result = ToRegister32(instr->result());
3948 DCHECK(!AreAliased(dividend, result));
3949
3950 if (divisor == 0) {
3951 Deoptimize(instr);
3952 return;
3953 }
3954
3955 // Check for (0 / -x) that will produce negative zero.
3956 HMathFloorOfDiv* hdiv = instr->hydrogen();
3957 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
3958 DeoptimizeIfZero(dividend, instr);
3959 }
3960
3961 // Easy case: We need no dynamic check for the dividend and the flooring
3962 // division is the same as the truncating division.
3963 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
3964 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
3965 __ TruncatingDiv(result, dividend, Abs(divisor));
3966 if (divisor < 0) __ Neg(result, result);
3967 return;
3968 }
3969
3970 // In the general case we may need to adjust before and after the truncating
3971 // division to get a flooring division.
3972 Register temp = ToRegister32(instr->temp());
3973 DCHECK(!AreAliased(temp, dividend, result));
3974 Label needs_adjustment, done;
3975 __ Cmp(dividend, 0);
3976 __ B(divisor > 0 ? lt : gt, &needs_adjustment);
3977 __ TruncatingDiv(result, dividend, Abs(divisor));
3978 if (divisor < 0) __ Neg(result, result);
3979 __ B(&done);
3980 __ Bind(&needs_adjustment);
3981 __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
3982 __ TruncatingDiv(result, temp, Abs(divisor));
3983 if (divisor < 0) __ Neg(result, result);
3984 __ Sub(result, result, Operand(1));
3985 __ Bind(&done);
3986 }
3987
3988
3989 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
DoFlooringDivI(LFlooringDivI * instr)3990 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
3991 Register dividend = ToRegister32(instr->dividend());
3992 Register divisor = ToRegister32(instr->divisor());
3993 Register remainder = ToRegister32(instr->temp());
3994 Register result = ToRegister32(instr->result());
3995
3996 // This can't cause an exception on ARM, so we can speculatively
3997 // execute it already now.
3998 __ Sdiv(result, dividend, divisor);
3999
4000 // Check for x / 0.
4001 DeoptimizeIfZero(divisor, instr);
4002
4003 // Check for (kMinInt / -1).
4004 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
4005 // The V flag will be set iff dividend == kMinInt.
4006 __ Cmp(dividend, 1);
4007 __ Ccmp(divisor, -1, NoFlag, vs);
4008 DeoptimizeIf(eq, instr);
4009 }
4010
4011 // Check for (0 / -x) that will produce negative zero.
4012 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4013 __ Cmp(divisor, 0);
4014 __ Ccmp(dividend, 0, ZFlag, mi);
4015 // "divisor" can't be null because the code would have already been
4016 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
4017 // In this case we need to deoptimize to produce a -0.
4018 DeoptimizeIf(eq, instr);
4019 }
4020
4021 Label done;
4022 // If both operands have the same sign then we are done.
4023 __ Eor(remainder, dividend, divisor);
4024 __ Tbz(remainder, kWSignBit, &done);
4025
4026 // Check if the result needs to be corrected.
4027 __ Msub(remainder, result, divisor, dividend);
4028 __ Cbz(remainder, &done);
4029 __ Sub(result, result, 1);
4030
4031 __ Bind(&done);
4032 }
4033
4034
DoMathLog(LMathLog * instr)4035 void LCodeGen::DoMathLog(LMathLog* instr) {
4036 DCHECK(instr->IsMarkedAsCall());
4037 DCHECK(ToDoubleRegister(instr->value()).is(d0));
4038 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
4039 0, 1);
4040 DCHECK(ToDoubleRegister(instr->result()).Is(d0));
4041 }
4042
4043
DoMathClz32(LMathClz32 * instr)4044 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4045 Register input = ToRegister32(instr->value());
4046 Register result = ToRegister32(instr->result());
4047 __ Clz(result, input);
4048 }
4049
4050
DoMathPowHalf(LMathPowHalf * instr)4051 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4052 DoubleRegister input = ToDoubleRegister(instr->value());
4053 DoubleRegister result = ToDoubleRegister(instr->result());
4054 Label done;
4055
4056 // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
4057 // Math.pow(-Infinity, 0.5) == +Infinity
4058 // Math.pow(-0.0, 0.5) == +0.0
4059
4060 // Catch -infinity inputs first.
4061 // TODO(jbramley): A constant infinity register would be helpful here.
4062 __ Fmov(double_scratch(), kFP64NegativeInfinity);
4063 __ Fcmp(double_scratch(), input);
4064 __ Fabs(result, input);
4065 __ B(&done, eq);
4066
4067 // Add +0.0 to convert -0.0 to +0.0.
4068 __ Fadd(double_scratch(), input, fp_zero);
4069 __ Fsqrt(result, double_scratch());
4070
4071 __ Bind(&done);
4072 }
4073
4074
DoPower(LPower * instr)4075 void LCodeGen::DoPower(LPower* instr) {
4076 Representation exponent_type = instr->hydrogen()->right()->representation();
4077 // Having marked this as a call, we can use any registers.
4078 // Just make sure that the input/output registers are the expected ones.
4079 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
4080 Register integer_exponent = MathPowIntegerDescriptor::exponent();
4081 DCHECK(!instr->right()->IsDoubleRegister() ||
4082 ToDoubleRegister(instr->right()).is(d1));
4083 DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
4084 ToRegister(instr->right()).is(tagged_exponent));
4085 DCHECK(!exponent_type.IsInteger32() ||
4086 ToRegister(instr->right()).is(integer_exponent));
4087 DCHECK(ToDoubleRegister(instr->left()).is(d0));
4088 DCHECK(ToDoubleRegister(instr->result()).is(d0));
4089
4090 if (exponent_type.IsSmi()) {
4091 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4092 __ CallStub(&stub);
4093 } else if (exponent_type.IsTagged()) {
4094 Label no_deopt;
4095 __ JumpIfSmi(tagged_exponent, &no_deopt);
4096 DeoptimizeIfNotHeapNumber(tagged_exponent, instr);
4097 __ Bind(&no_deopt);
4098 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4099 __ CallStub(&stub);
4100 } else if (exponent_type.IsInteger32()) {
4101 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
4102 // supports large integer exponents.
4103 __ Sxtw(integer_exponent, integer_exponent);
4104 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4105 __ CallStub(&stub);
4106 } else {
4107 DCHECK(exponent_type.IsDouble());
4108 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4109 __ CallStub(&stub);
4110 }
4111 }
4112
4113
DoMathRoundD(LMathRoundD * instr)4114 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
4115 DoubleRegister input = ToDoubleRegister(instr->value());
4116 DoubleRegister result = ToDoubleRegister(instr->result());
4117 DoubleRegister scratch_d = double_scratch();
4118
4119 DCHECK(!AreAliased(input, result, scratch_d));
4120
4121 Label done;
4122
4123 __ Frinta(result, input);
4124 __ Fcmp(input, 0.0);
4125 __ Fccmp(result, input, ZFlag, lt);
4126 // The result is correct if the input was in [-0, +infinity], or was a
4127 // negative integral value.
4128 __ B(eq, &done);
4129
4130 // Here the input is negative, non integral, with an exponent lower than 52.
4131 // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
4132 // case. So we can safely add 0.5.
4133 __ Fmov(scratch_d, 0.5);
4134 __ Fadd(result, input, scratch_d);
4135 __ Frintm(result, result);
4136 // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
4137 __ Fabs(result, result);
4138 __ Fneg(result, result);
4139
4140 __ Bind(&done);
4141 }
4142
4143
DoMathRoundI(LMathRoundI * instr)4144 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
4145 DoubleRegister input = ToDoubleRegister(instr->value());
4146 DoubleRegister temp = ToDoubleRegister(instr->temp1());
4147 DoubleRegister dot_five = double_scratch();
4148 Register result = ToRegister(instr->result());
4149 Label done;
4150
4151 // Math.round() rounds to the nearest integer, with ties going towards
4152 // +infinity. This does not match any IEEE-754 rounding mode.
4153 // - Infinities and NaNs are propagated unchanged, but cause deopts because
4154 // they can't be represented as integers.
4155 // - The sign of the result is the same as the sign of the input. This means
4156 // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
4157 // result of -0.0.
4158
4159 // Add 0.5 and round towards -infinity.
4160 __ Fmov(dot_five, 0.5);
4161 __ Fadd(temp, input, dot_five);
4162 __ Fcvtms(result, temp);
4163
4164 // The result is correct if:
4165 // result is not 0, as the input could be NaN or [-0.5, -0.0].
4166 // result is not 1, as 0.499...94 will wrongly map to 1.
4167 // result fits in 32 bits.
4168 __ Cmp(result, Operand(result.W(), SXTW));
4169 __ Ccmp(result, 1, ZFlag, eq);
4170 __ B(hi, &done);
4171
4172 // At this point, we have to handle possible inputs of NaN or numbers in the
4173 // range [-0.5, 1.5[, or numbers larger than 32 bits.
4174
4175 // Deoptimize if the result > 1, as it must be larger than 32 bits.
4176 __ Cmp(result, 1);
4177 DeoptimizeIf(hi, instr);
4178
4179 // Deoptimize for negative inputs, which at this point are only numbers in
4180 // the range [-0.5, -0.0]
4181 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4182 __ Fmov(result, input);
4183 DeoptimizeIfNegative(result, instr);
4184 }
4185
4186 // Deoptimize if the input was NaN.
4187 __ Fcmp(input, dot_five);
4188 DeoptimizeIf(vs, instr);
4189
4190 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
4191 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
4192 // else 0; we avoid dealing with 0.499...94 directly.
4193 __ Cset(result, ge);
4194 __ Bind(&done);
4195 }
4196
4197
DoMathFround(LMathFround * instr)4198 void LCodeGen::DoMathFround(LMathFround* instr) {
4199 DoubleRegister input = ToDoubleRegister(instr->value());
4200 DoubleRegister result = ToDoubleRegister(instr->result());
4201 __ Fcvt(result.S(), input);
4202 __ Fcvt(result, result.S());
4203 }
4204
4205
DoMathSqrt(LMathSqrt * instr)4206 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4207 DoubleRegister input = ToDoubleRegister(instr->value());
4208 DoubleRegister result = ToDoubleRegister(instr->result());
4209 __ Fsqrt(result, input);
4210 }
4211
4212
DoMathMinMax(LMathMinMax * instr)4213 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
4214 HMathMinMax::Operation op = instr->hydrogen()->operation();
4215 if (instr->hydrogen()->representation().IsInteger32()) {
4216 Register result = ToRegister32(instr->result());
4217 Register left = ToRegister32(instr->left());
4218 Operand right = ToOperand32(instr->right());
4219
4220 __ Cmp(left, right);
4221 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4222 } else if (instr->hydrogen()->representation().IsSmi()) {
4223 Register result = ToRegister(instr->result());
4224 Register left = ToRegister(instr->left());
4225 Operand right = ToOperand(instr->right());
4226
4227 __ Cmp(left, right);
4228 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4229 } else {
4230 DCHECK(instr->hydrogen()->representation().IsDouble());
4231 DoubleRegister result = ToDoubleRegister(instr->result());
4232 DoubleRegister left = ToDoubleRegister(instr->left());
4233 DoubleRegister right = ToDoubleRegister(instr->right());
4234
4235 if (op == HMathMinMax::kMathMax) {
4236 __ Fmax(result, left, right);
4237 } else {
4238 DCHECK(op == HMathMinMax::kMathMin);
4239 __ Fmin(result, left, right);
4240 }
4241 }
4242 }
4243
4244
DoModByPowerOf2I(LModByPowerOf2I * instr)4245 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
4246 Register dividend = ToRegister32(instr->dividend());
4247 int32_t divisor = instr->divisor();
4248 DCHECK(dividend.is(ToRegister32(instr->result())));
4249
4250 // Theoretically, a variation of the branch-free code for integer division by
4251 // a power of 2 (calculating the remainder via an additional multiplication
4252 // (which gets simplified to an 'and') and subtraction) should be faster, and
4253 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
4254 // indicate that positive dividends are heavily favored, so the branching
4255 // version performs better.
4256 HMod* hmod = instr->hydrogen();
4257 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4258 Label dividend_is_not_negative, done;
4259 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
4260 __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative);
4261 // Note that this is correct even for kMinInt operands.
4262 __ Neg(dividend, dividend);
4263 __ And(dividend, dividend, mask);
4264 __ Negs(dividend, dividend);
4265 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4266 DeoptimizeIf(eq, instr);
4267 }
4268 __ B(&done);
4269 }
4270
4271 __ bind(÷nd_is_not_negative);
4272 __ And(dividend, dividend, mask);
4273 __ bind(&done);
4274 }
4275
4276
DoModByConstI(LModByConstI * instr)4277 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4278 Register dividend = ToRegister32(instr->dividend());
4279 int32_t divisor = instr->divisor();
4280 Register result = ToRegister32(instr->result());
4281 Register temp = ToRegister32(instr->temp());
4282 DCHECK(!AreAliased(dividend, result, temp));
4283
4284 if (divisor == 0) {
4285 Deoptimize(instr);
4286 return;
4287 }
4288
4289 __ TruncatingDiv(result, dividend, Abs(divisor));
4290 __ Sxtw(dividend.X(), dividend);
4291 __ Mov(temp, Abs(divisor));
4292 __ Smsubl(result.X(), result, temp, dividend.X());
4293
4294 // Check for negative zero.
4295 HMod* hmod = instr->hydrogen();
4296 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4297 Label remainder_not_zero;
4298 __ Cbnz(result, &remainder_not_zero);
4299 DeoptimizeIfNegative(dividend, instr);
4300 __ bind(&remainder_not_zero);
4301 }
4302 }
4303
4304
DoModI(LModI * instr)4305 void LCodeGen::DoModI(LModI* instr) {
4306 Register dividend = ToRegister32(instr->left());
4307 Register divisor = ToRegister32(instr->right());
4308 Register result = ToRegister32(instr->result());
4309
4310 Label done;
4311 // modulo = dividend - quotient * divisor
4312 __ Sdiv(result, dividend, divisor);
4313 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
4314 DeoptimizeIfZero(divisor, instr);
4315 }
4316 __ Msub(result, result, divisor, dividend);
4317 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4318 __ Cbnz(result, &done);
4319 DeoptimizeIfNegative(dividend, instr);
4320 }
4321 __ Bind(&done);
4322 }
4323
4324
DoMulConstIS(LMulConstIS * instr)4325 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4326 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
4327 bool is_smi = instr->hydrogen()->representation().IsSmi();
4328 Register result =
4329 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4330 Register left =
4331 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4332 int32_t right = ToInteger32(instr->right());
4333 DCHECK((right > -kMaxInt) || (right < kMaxInt));
4334
4335 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4336 bool bailout_on_minus_zero =
4337 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4338
4339 if (bailout_on_minus_zero) {
4340 if (right < 0) {
4341 // The result is -0 if right is negative and left is zero.
4342 DeoptimizeIfZero(left, instr);
4343 } else if (right == 0) {
4344 // The result is -0 if the right is zero and the left is negative.
4345 DeoptimizeIfNegative(left, instr);
4346 }
4347 }
4348
4349 switch (right) {
4350 // Cases which can detect overflow.
4351 case -1:
4352 if (can_overflow) {
4353 // Only 0x80000000 can overflow here.
4354 __ Negs(result, left);
4355 DeoptimizeIf(vs, instr);
4356 } else {
4357 __ Neg(result, left);
4358 }
4359 break;
4360 case 0:
4361 // This case can never overflow.
4362 __ Mov(result, 0);
4363 break;
4364 case 1:
4365 // This case can never overflow.
4366 __ Mov(result, left, kDiscardForSameWReg);
4367 break;
4368 case 2:
4369 if (can_overflow) {
4370 __ Adds(result, left, left);
4371 DeoptimizeIf(vs, instr);
4372 } else {
4373 __ Add(result, left, left);
4374 }
4375 break;
4376
4377 default:
4378 // Multiplication by constant powers of two (and some related values)
4379 // can be done efficiently with shifted operands.
4380 int32_t right_abs = Abs(right);
4381
4382 if (base::bits::IsPowerOfTwo32(right_abs)) {
4383 int right_log2 = WhichPowerOf2(right_abs);
4384
4385 if (can_overflow) {
4386 Register scratch = result;
4387 DCHECK(!AreAliased(scratch, left));
4388 __ Cls(scratch, left);
4389 __ Cmp(scratch, right_log2);
4390 DeoptimizeIf(lt, instr);
4391 }
4392
4393 if (right >= 0) {
4394 // result = left << log2(right)
4395 __ Lsl(result, left, right_log2);
4396 } else {
4397 // result = -left << log2(-right)
4398 if (can_overflow) {
4399 __ Negs(result, Operand(left, LSL, right_log2));
4400 DeoptimizeIf(vs, instr);
4401 } else {
4402 __ Neg(result, Operand(left, LSL, right_log2));
4403 }
4404 }
4405 return;
4406 }
4407
4408
4409 // For the following cases, we could perform a conservative overflow check
4410 // with CLS as above. However the few cycles saved are likely not worth
4411 // the risk of deoptimizing more often than required.
4412 DCHECK(!can_overflow);
4413
4414 if (right >= 0) {
4415 if (base::bits::IsPowerOfTwo32(right - 1)) {
4416 // result = left + left << log2(right - 1)
4417 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
4418 } else if (base::bits::IsPowerOfTwo32(right + 1)) {
4419 // result = -left + left << log2(right + 1)
4420 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
4421 __ Neg(result, result);
4422 } else {
4423 UNREACHABLE();
4424 }
4425 } else {
4426 if (base::bits::IsPowerOfTwo32(-right + 1)) {
4427 // result = left - left << log2(-right + 1)
4428 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
4429 } else if (base::bits::IsPowerOfTwo32(-right - 1)) {
4430 // result = -left - left << log2(-right - 1)
4431 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
4432 __ Neg(result, result);
4433 } else {
4434 UNREACHABLE();
4435 }
4436 }
4437 }
4438 }
4439
4440
DoMulI(LMulI * instr)4441 void LCodeGen::DoMulI(LMulI* instr) {
4442 Register result = ToRegister32(instr->result());
4443 Register left = ToRegister32(instr->left());
4444 Register right = ToRegister32(instr->right());
4445
4446 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4447 bool bailout_on_minus_zero =
4448 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4449
4450 if (bailout_on_minus_zero && !left.Is(right)) {
4451 // If one operand is zero and the other is negative, the result is -0.
4452 // - Set Z (eq) if either left or right, or both, are 0.
4453 __ Cmp(left, 0);
4454 __ Ccmp(right, 0, ZFlag, ne);
4455 // - If so (eq), set N (mi) if left + right is negative.
4456 // - Otherwise, clear N.
4457 __ Ccmn(left, right, NoFlag, eq);
4458 DeoptimizeIf(mi, instr);
4459 }
4460
4461 if (can_overflow) {
4462 __ Smull(result.X(), left, right);
4463 __ Cmp(result.X(), Operand(result, SXTW));
4464 DeoptimizeIf(ne, instr);
4465 } else {
4466 __ Mul(result, left, right);
4467 }
4468 }
4469
4470
DoMulS(LMulS * instr)4471 void LCodeGen::DoMulS(LMulS* instr) {
4472 Register result = ToRegister(instr->result());
4473 Register left = ToRegister(instr->left());
4474 Register right = ToRegister(instr->right());
4475
4476 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4477 bool bailout_on_minus_zero =
4478 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4479
4480 if (bailout_on_minus_zero && !left.Is(right)) {
4481 // If one operand is zero and the other is negative, the result is -0.
4482 // - Set Z (eq) if either left or right, or both, are 0.
4483 __ Cmp(left, 0);
4484 __ Ccmp(right, 0, ZFlag, ne);
4485 // - If so (eq), set N (mi) if left + right is negative.
4486 // - Otherwise, clear N.
4487 __ Ccmn(left, right, NoFlag, eq);
4488 DeoptimizeIf(mi, instr);
4489 }
4490
4491 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4492 if (can_overflow) {
4493 __ Smulh(result, left, right);
4494 __ Cmp(result, Operand(result.W(), SXTW));
4495 __ SmiTag(result);
4496 DeoptimizeIf(ne, instr);
4497 } else {
4498 if (AreAliased(result, left, right)) {
4499 // All three registers are the same: half untag the input and then
4500 // multiply, giving a tagged result.
4501 STATIC_ASSERT((kSmiShift % 2) == 0);
4502 __ Asr(result, left, kSmiShift / 2);
4503 __ Mul(result, result, result);
4504 } else if (result.Is(left) && !left.Is(right)) {
4505 // Registers result and left alias, right is distinct: untag left into
4506 // result, and then multiply by right, giving a tagged result.
4507 __ SmiUntag(result, left);
4508 __ Mul(result, result, right);
4509 } else {
4510 DCHECK(!left.Is(result));
4511 // Registers result and right alias, left is distinct, or all registers
4512 // are distinct: untag right into result, and then multiply by left,
4513 // giving a tagged result.
4514 __ SmiUntag(result, right);
4515 __ Mul(result, left, result);
4516 }
4517 }
4518 }
4519
4520
DoDeferredNumberTagD(LNumberTagD * instr)4521 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4522 // TODO(3095996): Get rid of this. For now, we need to make the
4523 // result register contain a valid pointer because it is already
4524 // contained in the register pointer map.
4525 Register result = ToRegister(instr->result());
4526 __ Mov(result, 0);
4527
4528 PushSafepointRegistersScope scope(this);
4529 // NumberTagU and NumberTagD use the context from the frame, rather than
4530 // the environment's HContext or HInlinedContext value.
4531 // They only call Runtime::kAllocateHeapNumber.
4532 // The corresponding HChange instructions are added in a phase that does
4533 // not have easy access to the local context.
4534 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4535 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4536 RecordSafepointWithRegisters(
4537 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4538 __ StoreToSafepointRegisterSlot(x0, result);
4539 }
4540
4541
DoNumberTagD(LNumberTagD * instr)4542 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4543 class DeferredNumberTagD: public LDeferredCode {
4544 public:
4545 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4546 : LDeferredCode(codegen), instr_(instr) { }
4547 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4548 virtual LInstruction* instr() { return instr_; }
4549 private:
4550 LNumberTagD* instr_;
4551 };
4552
4553 DoubleRegister input = ToDoubleRegister(instr->value());
4554 Register result = ToRegister(instr->result());
4555 Register temp1 = ToRegister(instr->temp1());
4556 Register temp2 = ToRegister(instr->temp2());
4557
4558 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4559 if (FLAG_inline_new) {
4560 __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
4561 } else {
4562 __ B(deferred->entry());
4563 }
4564
4565 __ Bind(deferred->exit());
4566 __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
4567 }
4568
4569
DoDeferredNumberTagU(LInstruction * instr,LOperand * value,LOperand * temp1,LOperand * temp2)4570 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
4571 LOperand* value,
4572 LOperand* temp1,
4573 LOperand* temp2) {
4574 Label slow, convert_and_store;
4575 Register src = ToRegister32(value);
4576 Register dst = ToRegister(instr->result());
4577 Register scratch1 = ToRegister(temp1);
4578
4579 if (FLAG_inline_new) {
4580 Register scratch2 = ToRegister(temp2);
4581 __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
4582 __ B(&convert_and_store);
4583 }
4584
4585 // Slow case: call the runtime system to do the number allocation.
4586 __ Bind(&slow);
4587 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4588 // register is stored, as this register is in the pointer map, but contains an
4589 // integer value.
4590 __ Mov(dst, 0);
4591 {
4592 // Preserve the value of all registers.
4593 PushSafepointRegistersScope scope(this);
4594
4595 // NumberTagU and NumberTagD use the context from the frame, rather than
4596 // the environment's HContext or HInlinedContext value.
4597 // They only call Runtime::kAllocateHeapNumber.
4598 // The corresponding HChange instructions are added in a phase that does
4599 // not have easy access to the local context.
4600 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4601 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4602 RecordSafepointWithRegisters(
4603 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4604 __ StoreToSafepointRegisterSlot(x0, dst);
4605 }
4606
4607 // Convert number to floating point and store in the newly allocated heap
4608 // number.
4609 __ Bind(&convert_and_store);
4610 DoubleRegister dbl_scratch = double_scratch();
4611 __ Ucvtf(dbl_scratch, src);
4612 __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4613 }
4614
4615
DoNumberTagU(LNumberTagU * instr)4616 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4617 class DeferredNumberTagU: public LDeferredCode {
4618 public:
4619 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4620 : LDeferredCode(codegen), instr_(instr) { }
4621 virtual void Generate() {
4622 codegen()->DoDeferredNumberTagU(instr_,
4623 instr_->value(),
4624 instr_->temp1(),
4625 instr_->temp2());
4626 }
4627 virtual LInstruction* instr() { return instr_; }
4628 private:
4629 LNumberTagU* instr_;
4630 };
4631
4632 Register value = ToRegister32(instr->value());
4633 Register result = ToRegister(instr->result());
4634
4635 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4636 __ Cmp(value, Smi::kMaxValue);
4637 __ B(hi, deferred->entry());
4638 __ SmiTag(result, value.X());
4639 __ Bind(deferred->exit());
4640 }
4641
4642
DoNumberUntagD(LNumberUntagD * instr)4643 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4644 Register input = ToRegister(instr->value());
4645 Register scratch = ToRegister(instr->temp());
4646 DoubleRegister result = ToDoubleRegister(instr->result());
4647 bool can_convert_undefined_to_nan =
4648 instr->hydrogen()->can_convert_undefined_to_nan();
4649
4650 Label done, load_smi;
4651
4652 // Work out what untag mode we're working with.
4653 HValue* value = instr->hydrogen()->value();
4654 NumberUntagDMode mode = value->representation().IsSmi()
4655 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4656
4657 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4658 __ JumpIfSmi(input, &load_smi);
4659
4660 Label convert_undefined;
4661
4662 // Heap number map check.
4663 if (can_convert_undefined_to_nan) {
4664 __ JumpIfNotHeapNumber(input, &convert_undefined);
4665 } else {
4666 DeoptimizeIfNotHeapNumber(input, instr);
4667 }
4668
4669 // Load heap number.
4670 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4671 if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4672 DeoptimizeIfMinusZero(result, instr);
4673 }
4674 __ B(&done);
4675
4676 if (can_convert_undefined_to_nan) {
4677 __ Bind(&convert_undefined);
4678 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr);
4679
4680 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4681 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4682 __ B(&done);
4683 }
4684
4685 } else {
4686 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4687 // Fall through to load_smi.
4688 }
4689
4690 // Smi to double register conversion.
4691 __ Bind(&load_smi);
4692 __ SmiUntagToDouble(result, input);
4693
4694 __ Bind(&done);
4695 }
4696
4697
DoOsrEntry(LOsrEntry * instr)4698 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4699 // This is a pseudo-instruction that ensures that the environment here is
4700 // properly registered for deoptimization and records the assembler's PC
4701 // offset.
4702 LEnvironment* environment = instr->environment();
4703
4704 // If the environment were already registered, we would have no way of
4705 // backpatching it with the spill slot operands.
4706 DCHECK(!environment->HasBeenRegistered());
4707 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4708
4709 GenerateOsrPrologue();
4710 }
4711
4712
DoParameter(LParameter * instr)4713 void LCodeGen::DoParameter(LParameter* instr) {
4714 // Nothing to do.
4715 }
4716
4717
DoPreparePushArguments(LPreparePushArguments * instr)4718 void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) {
4719 __ PushPreamble(instr->argc(), kPointerSize);
4720 }
4721
4722
DoPushArguments(LPushArguments * instr)4723 void LCodeGen::DoPushArguments(LPushArguments* instr) {
4724 MacroAssembler::PushPopQueue args(masm());
4725
4726 for (int i = 0; i < instr->ArgumentCount(); ++i) {
4727 LOperand* arg = instr->argument(i);
4728 if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) {
4729 Abort(kDoPushArgumentNotImplementedForDoubleType);
4730 return;
4731 }
4732 args.Queue(ToRegister(arg));
4733 }
4734
4735 // The preamble was done by LPreparePushArguments.
4736 args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
4737
4738 after_push_argument_ = true;
4739 }
4740
4741
DoReturn(LReturn * instr)4742 void LCodeGen::DoReturn(LReturn* instr) {
4743 if (FLAG_trace && info()->IsOptimizing()) {
4744 // Push the return value on the stack as the parameter.
4745 // Runtime::TraceExit returns its parameter in x0. We're leaving the code
4746 // managed by the register allocator and tearing down the frame, it's
4747 // safe to write to the context register.
4748 __ Push(x0);
4749 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4750 __ CallRuntime(Runtime::kTraceExit, 1);
4751 }
4752
4753 if (info()->saves_caller_doubles()) {
4754 RestoreCallerDoubles();
4755 }
4756
4757 int no_frame_start = -1;
4758 if (NeedsEagerFrame()) {
4759 Register stack_pointer = masm()->StackPointer();
4760 __ Mov(stack_pointer, fp);
4761 no_frame_start = masm_->pc_offset();
4762 __ Pop(fp, lr);
4763 }
4764
4765 if (instr->has_constant_parameter_count()) {
4766 int parameter_count = ToInteger32(instr->constant_parameter_count());
4767 __ Drop(parameter_count + 1);
4768 } else {
4769 Register parameter_count = ToRegister(instr->parameter_count());
4770 __ DropBySMI(parameter_count);
4771 }
4772 __ Ret();
4773
4774 if (no_frame_start != -1) {
4775 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
4776 }
4777 }
4778
4779
BuildSeqStringOperand(Register string,Register temp,LOperand * index,String::Encoding encoding)4780 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
4781 Register temp,
4782 LOperand* index,
4783 String::Encoding encoding) {
4784 if (index->IsConstantOperand()) {
4785 int offset = ToInteger32(LConstantOperand::cast(index));
4786 if (encoding == String::TWO_BYTE_ENCODING) {
4787 offset *= kUC16Size;
4788 }
4789 STATIC_ASSERT(kCharSize == 1);
4790 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
4791 }
4792
4793 __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
4794 if (encoding == String::ONE_BYTE_ENCODING) {
4795 return MemOperand(temp, ToRegister32(index), SXTW);
4796 } else {
4797 STATIC_ASSERT(kUC16Size == 2);
4798 return MemOperand(temp, ToRegister32(index), SXTW, 1);
4799 }
4800 }
4801
4802
DoSeqStringGetChar(LSeqStringGetChar * instr)4803 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
4804 String::Encoding encoding = instr->hydrogen()->encoding();
4805 Register string = ToRegister(instr->string());
4806 Register result = ToRegister(instr->result());
4807 Register temp = ToRegister(instr->temp());
4808
4809 if (FLAG_debug_code) {
4810 // Even though this lithium instruction comes with a temp register, we
4811 // can't use it here because we want to use "AtStart" constraints on the
4812 // inputs and the debug code here needs a scratch register.
4813 UseScratchRegisterScope temps(masm());
4814 Register dbg_temp = temps.AcquireX();
4815
4816 __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
4817 __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
4818
4819 __ And(dbg_temp, dbg_temp,
4820 Operand(kStringRepresentationMask | kStringEncodingMask));
4821 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4822 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4823 __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
4824 ? one_byte_seq_type : two_byte_seq_type));
4825 __ Check(eq, kUnexpectedStringType);
4826 }
4827
4828 MemOperand operand =
4829 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4830 if (encoding == String::ONE_BYTE_ENCODING) {
4831 __ Ldrb(result, operand);
4832 } else {
4833 __ Ldrh(result, operand);
4834 }
4835 }
4836
4837
DoSeqStringSetChar(LSeqStringSetChar * instr)4838 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4839 String::Encoding encoding = instr->hydrogen()->encoding();
4840 Register string = ToRegister(instr->string());
4841 Register value = ToRegister(instr->value());
4842 Register temp = ToRegister(instr->temp());
4843
4844 if (FLAG_debug_code) {
4845 DCHECK(ToRegister(instr->context()).is(cp));
4846 Register index = ToRegister(instr->index());
4847 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4848 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4849 int encoding_mask =
4850 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
4851 ? one_byte_seq_type : two_byte_seq_type;
4852 __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
4853 encoding_mask);
4854 }
4855 MemOperand operand =
4856 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4857 if (encoding == String::ONE_BYTE_ENCODING) {
4858 __ Strb(value, operand);
4859 } else {
4860 __ Strh(value, operand);
4861 }
4862 }
4863
4864
DoSmiTag(LSmiTag * instr)4865 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4866 HChange* hchange = instr->hydrogen();
4867 Register input = ToRegister(instr->value());
4868 Register output = ToRegister(instr->result());
4869 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4870 hchange->value()->CheckFlag(HValue::kUint32)) {
4871 DeoptimizeIfNegative(input.W(), instr);
4872 }
4873 __ SmiTag(output, input);
4874 }
4875
4876
DoSmiUntag(LSmiUntag * instr)4877 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4878 Register input = ToRegister(instr->value());
4879 Register result = ToRegister(instr->result());
4880 Label done, untag;
4881
4882 if (instr->needs_check()) {
4883 DeoptimizeIfNotSmi(input, instr);
4884 }
4885
4886 __ Bind(&untag);
4887 __ SmiUntag(result, input);
4888 __ Bind(&done);
4889 }
4890
4891
DoShiftI(LShiftI * instr)4892 void LCodeGen::DoShiftI(LShiftI* instr) {
4893 LOperand* right_op = instr->right();
4894 Register left = ToRegister32(instr->left());
4895 Register result = ToRegister32(instr->result());
4896
4897 if (right_op->IsRegister()) {
4898 Register right = ToRegister32(instr->right());
4899 switch (instr->op()) {
4900 case Token::ROR: __ Ror(result, left, right); break;
4901 case Token::SAR: __ Asr(result, left, right); break;
4902 case Token::SHL: __ Lsl(result, left, right); break;
4903 case Token::SHR:
4904 __ Lsr(result, left, right);
4905 if (instr->can_deopt()) {
4906 // If `left >>> right` >= 0x80000000, the result is not representable
4907 // in a signed 32-bit smi.
4908 DeoptimizeIfNegative(result, instr);
4909 }
4910 break;
4911 default: UNREACHABLE();
4912 }
4913 } else {
4914 DCHECK(right_op->IsConstantOperand());
4915 int shift_count = JSShiftAmountFromLConstant(right_op);
4916 if (shift_count == 0) {
4917 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4918 DeoptimizeIfNegative(left, instr);
4919 }
4920 __ Mov(result, left, kDiscardForSameWReg);
4921 } else {
4922 switch (instr->op()) {
4923 case Token::ROR: __ Ror(result, left, shift_count); break;
4924 case Token::SAR: __ Asr(result, left, shift_count); break;
4925 case Token::SHL: __ Lsl(result, left, shift_count); break;
4926 case Token::SHR: __ Lsr(result, left, shift_count); break;
4927 default: UNREACHABLE();
4928 }
4929 }
4930 }
4931 }
4932
4933
DoShiftS(LShiftS * instr)4934 void LCodeGen::DoShiftS(LShiftS* instr) {
4935 LOperand* right_op = instr->right();
4936 Register left = ToRegister(instr->left());
4937 Register result = ToRegister(instr->result());
4938
4939 if (right_op->IsRegister()) {
4940 Register right = ToRegister(instr->right());
4941
4942 // JavaScript shifts only look at the bottom 5 bits of the 'right' operand.
4943 // Since we're handling smis in X registers, we have to extract these bits
4944 // explicitly.
4945 __ Ubfx(result, right, kSmiShift, 5);
4946
4947 switch (instr->op()) {
4948 case Token::ROR: {
4949 // This is the only case that needs a scratch register. To keep things
4950 // simple for the other cases, borrow a MacroAssembler scratch register.
4951 UseScratchRegisterScope temps(masm());
4952 Register temp = temps.AcquireW();
4953 __ SmiUntag(temp, left);
4954 __ Ror(result.W(), temp.W(), result.W());
4955 __ SmiTag(result);
4956 break;
4957 }
4958 case Token::SAR:
4959 __ Asr(result, left, result);
4960 __ Bic(result, result, kSmiShiftMask);
4961 break;
4962 case Token::SHL:
4963 __ Lsl(result, left, result);
4964 break;
4965 case Token::SHR:
4966 __ Lsr(result, left, result);
4967 __ Bic(result, result, kSmiShiftMask);
4968 if (instr->can_deopt()) {
4969 // If `left >>> right` >= 0x80000000, the result is not representable
4970 // in a signed 32-bit smi.
4971 DeoptimizeIfNegative(result, instr);
4972 }
4973 break;
4974 default: UNREACHABLE();
4975 }
4976 } else {
4977 DCHECK(right_op->IsConstantOperand());
4978 int shift_count = JSShiftAmountFromLConstant(right_op);
4979 if (shift_count == 0) {
4980 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4981 DeoptimizeIfNegative(left, instr);
4982 }
4983 __ Mov(result, left);
4984 } else {
4985 switch (instr->op()) {
4986 case Token::ROR:
4987 __ SmiUntag(result, left);
4988 __ Ror(result.W(), result.W(), shift_count);
4989 __ SmiTag(result);
4990 break;
4991 case Token::SAR:
4992 __ Asr(result, left, shift_count);
4993 __ Bic(result, result, kSmiShiftMask);
4994 break;
4995 case Token::SHL:
4996 __ Lsl(result, left, shift_count);
4997 break;
4998 case Token::SHR:
4999 __ Lsr(result, left, shift_count);
5000 __ Bic(result, result, kSmiShiftMask);
5001 break;
5002 default: UNREACHABLE();
5003 }
5004 }
5005 }
5006 }
5007
5008
DoDebugBreak(LDebugBreak * instr)5009 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
5010 __ Debug("LDebugBreak", 0, BREAK);
5011 }
5012
5013
DoDeclareGlobals(LDeclareGlobals * instr)5014 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
5015 DCHECK(ToRegister(instr->context()).is(cp));
5016 Register scratch1 = x5;
5017 Register scratch2 = x6;
5018 DCHECK(instr->IsMarkedAsCall());
5019
5020 ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
5021 // TODO(all): if Mov could handle object in new space then it could be used
5022 // here.
5023 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
5024 __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
5025 __ Push(cp, scratch1, scratch2); // The context is the first argument.
5026 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
5027 }
5028
5029
DoDeferredStackCheck(LStackCheck * instr)5030 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5031 PushSafepointRegistersScope scope(this);
5032 LoadContextFromDeferred(instr->context());
5033 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5034 RecordSafepointWithLazyDeopt(
5035 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5036 DCHECK(instr->HasEnvironment());
5037 LEnvironment* env = instr->environment();
5038 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5039 }
5040
5041
DoStackCheck(LStackCheck * instr)5042 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5043 class DeferredStackCheck: public LDeferredCode {
5044 public:
5045 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5046 : LDeferredCode(codegen), instr_(instr) { }
5047 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5048 virtual LInstruction* instr() { return instr_; }
5049 private:
5050 LStackCheck* instr_;
5051 };
5052
5053 DCHECK(instr->HasEnvironment());
5054 LEnvironment* env = instr->environment();
5055 // There is no LLazyBailout instruction for stack-checks. We have to
5056 // prepare for lazy deoptimization explicitly here.
5057 if (instr->hydrogen()->is_function_entry()) {
5058 // Perform stack overflow check.
5059 Label done;
5060 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5061 __ B(hs, &done);
5062
5063 PredictableCodeSizeScope predictable(masm_,
5064 Assembler::kCallSizeWithRelocation);
5065 DCHECK(instr->context()->IsRegister());
5066 DCHECK(ToRegister(instr->context()).is(cp));
5067 CallCode(isolate()->builtins()->StackCheck(),
5068 RelocInfo::CODE_TARGET,
5069 instr);
5070 __ Bind(&done);
5071 } else {
5072 DCHECK(instr->hydrogen()->is_backwards_branch());
5073 // Perform stack overflow check if this goto needs it before jumping.
5074 DeferredStackCheck* deferred_stack_check =
5075 new(zone()) DeferredStackCheck(this, instr);
5076 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5077 __ B(lo, deferred_stack_check->entry());
5078
5079 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5080 __ Bind(instr->done_label());
5081 deferred_stack_check->SetExit(instr->done_label());
5082 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5083 // Don't record a deoptimization index for the safepoint here.
5084 // This will be done explicitly when emitting call and the safepoint in
5085 // the deferred code.
5086 }
5087 }
5088
5089
DoStoreCodeEntry(LStoreCodeEntry * instr)5090 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
5091 Register function = ToRegister(instr->function());
5092 Register code_object = ToRegister(instr->code_object());
5093 Register temp = ToRegister(instr->temp());
5094 __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
5095 __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
5096 }
5097
5098
DoStoreContextSlot(LStoreContextSlot * instr)5099 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
5100 Register context = ToRegister(instr->context());
5101 Register value = ToRegister(instr->value());
5102 Register scratch = ToRegister(instr->temp());
5103 MemOperand target = ContextMemOperand(context, instr->slot_index());
5104
5105 Label skip_assignment;
5106
5107 if (instr->hydrogen()->RequiresHoleCheck()) {
5108 __ Ldr(scratch, target);
5109 if (instr->hydrogen()->DeoptimizesOnHole()) {
5110 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr);
5111 } else {
5112 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
5113 }
5114 }
5115
5116 __ Str(value, target);
5117 if (instr->hydrogen()->NeedsWriteBarrier()) {
5118 SmiCheck check_needed =
5119 instr->hydrogen()->value()->type().IsHeapObject()
5120 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5121 __ RecordWriteContextSlot(context,
5122 target.offset(),
5123 value,
5124 scratch,
5125 GetLinkRegisterState(),
5126 kSaveFPRegs,
5127 EMIT_REMEMBERED_SET,
5128 check_needed);
5129 }
5130 __ Bind(&skip_assignment);
5131 }
5132
5133
DoStoreGlobalCell(LStoreGlobalCell * instr)5134 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
5135 Register value = ToRegister(instr->value());
5136 Register cell = ToRegister(instr->temp1());
5137
5138 // Load the cell.
5139 __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
5140
5141 // If the cell we are storing to contains the hole it could have
5142 // been deleted from the property dictionary. In that case, we need
5143 // to update the property details in the property dictionary to mark
5144 // it as no longer deleted. We deoptimize in that case.
5145 if (instr->hydrogen()->RequiresHoleCheck()) {
5146 Register payload = ToRegister(instr->temp2());
5147 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
5148 DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr);
5149 }
5150
5151 // Store the value.
5152 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
5153 // Cells are always rescanned, so no write barrier here.
5154 }
5155
5156
DoStoreKeyedExternal(LStoreKeyedExternal * instr)5157 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5158 Register ext_ptr = ToRegister(instr->elements());
5159 Register key = no_reg;
5160 Register scratch;
5161 ElementsKind elements_kind = instr->elements_kind();
5162
5163 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
5164 bool key_is_constant = instr->key()->IsConstantOperand();
5165 int constant_key = 0;
5166 if (key_is_constant) {
5167 DCHECK(instr->temp() == NULL);
5168 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5169 if (constant_key & 0xf0000000) {
5170 Abort(kArrayIndexConstantValueTooBig);
5171 }
5172 } else {
5173 key = ToRegister(instr->key());
5174 scratch = ToRegister(instr->temp());
5175 }
5176
5177 MemOperand dst =
5178 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
5179 key_is_constant, constant_key,
5180 elements_kind,
5181 instr->base_offset());
5182
5183 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
5184 (elements_kind == FLOAT32_ELEMENTS)) {
5185 DoubleRegister value = ToDoubleRegister(instr->value());
5186 DoubleRegister dbl_scratch = double_scratch();
5187 __ Fcvt(dbl_scratch.S(), value);
5188 __ Str(dbl_scratch.S(), dst);
5189 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
5190 (elements_kind == FLOAT64_ELEMENTS)) {
5191 DoubleRegister value = ToDoubleRegister(instr->value());
5192 __ Str(value, dst);
5193 } else {
5194 Register value = ToRegister(instr->value());
5195
5196 switch (elements_kind) {
5197 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
5198 case EXTERNAL_INT8_ELEMENTS:
5199 case EXTERNAL_UINT8_ELEMENTS:
5200 case UINT8_ELEMENTS:
5201 case UINT8_CLAMPED_ELEMENTS:
5202 case INT8_ELEMENTS:
5203 __ Strb(value, dst);
5204 break;
5205 case EXTERNAL_INT16_ELEMENTS:
5206 case EXTERNAL_UINT16_ELEMENTS:
5207 case INT16_ELEMENTS:
5208 case UINT16_ELEMENTS:
5209 __ Strh(value, dst);
5210 break;
5211 case EXTERNAL_INT32_ELEMENTS:
5212 case EXTERNAL_UINT32_ELEMENTS:
5213 case INT32_ELEMENTS:
5214 case UINT32_ELEMENTS:
5215 __ Str(value.W(), dst);
5216 break;
5217 case FLOAT32_ELEMENTS:
5218 case FLOAT64_ELEMENTS:
5219 case EXTERNAL_FLOAT32_ELEMENTS:
5220 case EXTERNAL_FLOAT64_ELEMENTS:
5221 case FAST_DOUBLE_ELEMENTS:
5222 case FAST_ELEMENTS:
5223 case FAST_SMI_ELEMENTS:
5224 case FAST_HOLEY_DOUBLE_ELEMENTS:
5225 case FAST_HOLEY_ELEMENTS:
5226 case FAST_HOLEY_SMI_ELEMENTS:
5227 case DICTIONARY_ELEMENTS:
5228 case SLOPPY_ARGUMENTS_ELEMENTS:
5229 UNREACHABLE();
5230 break;
5231 }
5232 }
5233 }
5234
5235
DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble * instr)5236 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
5237 Register elements = ToRegister(instr->elements());
5238 DoubleRegister value = ToDoubleRegister(instr->value());
5239 MemOperand mem_op;
5240
5241 if (instr->key()->IsConstantOperand()) {
5242 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5243 if (constant_key & 0xf0000000) {
5244 Abort(kArrayIndexConstantValueTooBig);
5245 }
5246 int offset = instr->base_offset() + constant_key * kDoubleSize;
5247 mem_op = MemOperand(elements, offset);
5248 } else {
5249 Register store_base = ToRegister(instr->temp());
5250 Register key = ToRegister(instr->key());
5251 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5252 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5253 instr->hydrogen()->elements_kind(),
5254 instr->hydrogen()->representation(),
5255 instr->base_offset());
5256 }
5257
5258 if (instr->NeedsCanonicalization()) {
5259 __ CanonicalizeNaN(double_scratch(), value);
5260 __ Str(double_scratch(), mem_op);
5261 } else {
5262 __ Str(value, mem_op);
5263 }
5264 }
5265
5266
DoStoreKeyedFixed(LStoreKeyedFixed * instr)5267 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
5268 Register value = ToRegister(instr->value());
5269 Register elements = ToRegister(instr->elements());
5270 Register scratch = no_reg;
5271 Register store_base = no_reg;
5272 Register key = no_reg;
5273 MemOperand mem_op;
5274
5275 if (!instr->key()->IsConstantOperand() ||
5276 instr->hydrogen()->NeedsWriteBarrier()) {
5277 scratch = ToRegister(instr->temp());
5278 }
5279
5280 Representation representation = instr->hydrogen()->value()->representation();
5281 if (instr->key()->IsConstantOperand()) {
5282 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
5283 int offset = instr->base_offset() +
5284 ToInteger32(const_operand) * kPointerSize;
5285 store_base = elements;
5286 if (representation.IsInteger32()) {
5287 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5288 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
5289 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5290 STATIC_ASSERT(kSmiTag == 0);
5291 mem_op = UntagSmiMemOperand(store_base, offset);
5292 } else {
5293 mem_op = MemOperand(store_base, offset);
5294 }
5295 } else {
5296 store_base = scratch;
5297 key = ToRegister(instr->key());
5298 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5299
5300 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5301 instr->hydrogen()->elements_kind(),
5302 representation, instr->base_offset());
5303 }
5304
5305 __ Store(value, mem_op, representation);
5306
5307 if (instr->hydrogen()->NeedsWriteBarrier()) {
5308 DCHECK(representation.IsTagged());
5309 // This assignment may cause element_addr to alias store_base.
5310 Register element_addr = scratch;
5311 SmiCheck check_needed =
5312 instr->hydrogen()->value()->type().IsHeapObject()
5313 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5314 // Compute address of modified element and store it into key register.
5315 __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
5316 __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
5317 kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
5318 instr->hydrogen()->PointersToHereCheckForValue());
5319 }
5320 }
5321
5322
DoStoreKeyedGeneric(LStoreKeyedGeneric * instr)5323 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5324 DCHECK(ToRegister(instr->context()).is(cp));
5325 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
5326 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
5327 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
5328
5329 Handle<Code> ic =
5330 CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
5331 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5332 }
5333
5334
DoStoreNamedField(LStoreNamedField * instr)5335 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5336 Representation representation = instr->representation();
5337
5338 Register object = ToRegister(instr->object());
5339 HObjectAccess access = instr->hydrogen()->access();
5340 int offset = access.offset();
5341
5342 if (access.IsExternalMemory()) {
5343 DCHECK(!instr->hydrogen()->has_transition());
5344 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5345 Register value = ToRegister(instr->value());
5346 __ Store(value, MemOperand(object, offset), representation);
5347 return;
5348 }
5349
5350 __ AssertNotSmi(object);
5351
5352 if (representation.IsDouble()) {
5353 DCHECK(access.IsInobject());
5354 DCHECK(!instr->hydrogen()->has_transition());
5355 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5356 FPRegister value = ToDoubleRegister(instr->value());
5357 __ Str(value, FieldMemOperand(object, offset));
5358 return;
5359 }
5360
5361 Register value = ToRegister(instr->value());
5362
5363 DCHECK(!representation.IsSmi() ||
5364 !instr->value()->IsConstantOperand() ||
5365 IsInteger32Constant(LConstantOperand::cast(instr->value())));
5366
5367 if (instr->hydrogen()->has_transition()) {
5368 Handle<Map> transition = instr->hydrogen()->transition_map();
5369 AddDeprecationDependency(transition);
5370 // Store the new map value.
5371 Register new_map_value = ToRegister(instr->temp0());
5372 __ Mov(new_map_value, Operand(transition));
5373 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
5374 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
5375 // Update the write barrier for the map field.
5376 __ RecordWriteForMap(object,
5377 new_map_value,
5378 ToRegister(instr->temp1()),
5379 GetLinkRegisterState(),
5380 kSaveFPRegs);
5381 }
5382 }
5383
5384 // Do the store.
5385 Register destination;
5386 if (access.IsInobject()) {
5387 destination = object;
5388 } else {
5389 Register temp0 = ToRegister(instr->temp0());
5390 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
5391 destination = temp0;
5392 }
5393
5394 if (representation.IsSmi() &&
5395 instr->hydrogen()->value()->representation().IsInteger32()) {
5396 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5397 #ifdef DEBUG
5398 Register temp0 = ToRegister(instr->temp0());
5399 __ Ldr(temp0, FieldMemOperand(destination, offset));
5400 __ AssertSmi(temp0);
5401 // If destination aliased temp0, restore it to the address calculated
5402 // earlier.
5403 if (destination.Is(temp0)) {
5404 DCHECK(!access.IsInobject());
5405 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
5406 }
5407 #endif
5408 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5409 STATIC_ASSERT(kSmiTag == 0);
5410 __ Store(value, UntagSmiFieldMemOperand(destination, offset),
5411 Representation::Integer32());
5412 } else {
5413 __ Store(value, FieldMemOperand(destination, offset), representation);
5414 }
5415 if (instr->hydrogen()->NeedsWriteBarrier()) {
5416 __ RecordWriteField(destination,
5417 offset,
5418 value, // Clobbered.
5419 ToRegister(instr->temp1()), // Clobbered.
5420 GetLinkRegisterState(),
5421 kSaveFPRegs,
5422 EMIT_REMEMBERED_SET,
5423 instr->hydrogen()->SmiCheckForWriteBarrier(),
5424 instr->hydrogen()->PointersToHereCheckForValue());
5425 }
5426 }
5427
5428
DoStoreNamedGeneric(LStoreNamedGeneric * instr)5429 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5430 DCHECK(ToRegister(instr->context()).is(cp));
5431 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
5432 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
5433
5434 __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
5435 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
5436 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5437 }
5438
5439
DoStringAdd(LStringAdd * instr)5440 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5441 DCHECK(ToRegister(instr->context()).is(cp));
5442 DCHECK(ToRegister(instr->left()).Is(x1));
5443 DCHECK(ToRegister(instr->right()).Is(x0));
5444 StringAddStub stub(isolate(),
5445 instr->hydrogen()->flags(),
5446 instr->hydrogen()->pretenure_flag());
5447 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5448 }
5449
5450
DoStringCharCodeAt(LStringCharCodeAt * instr)5451 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5452 class DeferredStringCharCodeAt: public LDeferredCode {
5453 public:
5454 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5455 : LDeferredCode(codegen), instr_(instr) { }
5456 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
5457 virtual LInstruction* instr() { return instr_; }
5458 private:
5459 LStringCharCodeAt* instr_;
5460 };
5461
5462 DeferredStringCharCodeAt* deferred =
5463 new(zone()) DeferredStringCharCodeAt(this, instr);
5464
5465 StringCharLoadGenerator::Generate(masm(),
5466 ToRegister(instr->string()),
5467 ToRegister32(instr->index()),
5468 ToRegister(instr->result()),
5469 deferred->entry());
5470 __ Bind(deferred->exit());
5471 }
5472
5473
DoDeferredStringCharCodeAt(LStringCharCodeAt * instr)5474 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5475 Register string = ToRegister(instr->string());
5476 Register result = ToRegister(instr->result());
5477
5478 // TODO(3095996): Get rid of this. For now, we need to make the
5479 // result register contain a valid pointer because it is already
5480 // contained in the register pointer map.
5481 __ Mov(result, 0);
5482
5483 PushSafepointRegistersScope scope(this);
5484 __ Push(string);
5485 // Push the index as a smi. This is safe because of the checks in
5486 // DoStringCharCodeAt above.
5487 Register index = ToRegister(instr->index());
5488 __ SmiTagAndPush(index);
5489
5490 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
5491 instr->context());
5492 __ AssertSmi(x0);
5493 __ SmiUntag(x0);
5494 __ StoreToSafepointRegisterSlot(x0, result);
5495 }
5496
5497
DoStringCharFromCode(LStringCharFromCode * instr)5498 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5499 class DeferredStringCharFromCode: public LDeferredCode {
5500 public:
5501 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5502 : LDeferredCode(codegen), instr_(instr) { }
5503 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
5504 virtual LInstruction* instr() { return instr_; }
5505 private:
5506 LStringCharFromCode* instr_;
5507 };
5508
5509 DeferredStringCharFromCode* deferred =
5510 new(zone()) DeferredStringCharFromCode(this, instr);
5511
5512 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
5513 Register char_code = ToRegister32(instr->char_code());
5514 Register result = ToRegister(instr->result());
5515
5516 __ Cmp(char_code, String::kMaxOneByteCharCode);
5517 __ B(hi, deferred->entry());
5518 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5519 __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
5520 __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
5521 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5522 __ B(eq, deferred->entry());
5523 __ Bind(deferred->exit());
5524 }
5525
5526
DoDeferredStringCharFromCode(LStringCharFromCode * instr)5527 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5528 Register char_code = ToRegister(instr->char_code());
5529 Register result = ToRegister(instr->result());
5530
5531 // TODO(3095996): Get rid of this. For now, we need to make the
5532 // result register contain a valid pointer because it is already
5533 // contained in the register pointer map.
5534 __ Mov(result, 0);
5535
5536 PushSafepointRegistersScope scope(this);
5537 __ SmiTagAndPush(char_code);
5538 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5539 __ StoreToSafepointRegisterSlot(x0, result);
5540 }
5541
5542
DoStringCompareAndBranch(LStringCompareAndBranch * instr)5543 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
5544 DCHECK(ToRegister(instr->context()).is(cp));
5545 Token::Value op = instr->op();
5546
5547 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
5548 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5549 InlineSmiCheckInfo::EmitNotInlined(masm());
5550
5551 Condition condition = TokenToCondition(op, false);
5552
5553 EmitCompareAndBranch(instr, condition, x0, 0);
5554 }
5555
5556
DoSubI(LSubI * instr)5557 void LCodeGen::DoSubI(LSubI* instr) {
5558 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5559 Register result = ToRegister32(instr->result());
5560 Register left = ToRegister32(instr->left());
5561 Operand right = ToShiftedRightOperand32(instr->right(), instr);
5562
5563 if (can_overflow) {
5564 __ Subs(result, left, right);
5565 DeoptimizeIf(vs, instr);
5566 } else {
5567 __ Sub(result, left, right);
5568 }
5569 }
5570
5571
DoSubS(LSubS * instr)5572 void LCodeGen::DoSubS(LSubS* instr) {
5573 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5574 Register result = ToRegister(instr->result());
5575 Register left = ToRegister(instr->left());
5576 Operand right = ToOperand(instr->right());
5577 if (can_overflow) {
5578 __ Subs(result, left, right);
5579 DeoptimizeIf(vs, instr);
5580 } else {
5581 __ Sub(result, left, right);
5582 }
5583 }
5584
5585
DoDeferredTaggedToI(LTaggedToI * instr,LOperand * value,LOperand * temp1,LOperand * temp2)5586 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5587 LOperand* value,
5588 LOperand* temp1,
5589 LOperand* temp2) {
5590 Register input = ToRegister(value);
5591 Register scratch1 = ToRegister(temp1);
5592 DoubleRegister dbl_scratch1 = double_scratch();
5593
5594 Label done;
5595
5596 if (instr->truncating()) {
5597 Register output = ToRegister(instr->result());
5598 Label check_bools;
5599
5600 // If it's not a heap number, jump to undefined check.
5601 __ JumpIfNotHeapNumber(input, &check_bools);
5602
5603 // A heap number: load value and convert to int32 using truncating function.
5604 __ TruncateHeapNumberToI(output, input);
5605 __ B(&done);
5606
5607 __ Bind(&check_bools);
5608
5609 Register true_root = output;
5610 Register false_root = scratch1;
5611 __ LoadTrueFalseRoots(true_root, false_root);
5612 __ Cmp(input, true_root);
5613 __ Cset(output, eq);
5614 __ Ccmp(input, false_root, ZFlag, ne);
5615 __ B(eq, &done);
5616
5617 // Output contains zero, undefined is converted to zero for truncating
5618 // conversions.
5619 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr);
5620 } else {
5621 Register output = ToRegister32(instr->result());
5622 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5623
5624 DeoptimizeIfNotHeapNumber(input, instr);
5625
5626 // A heap number: load value and convert to int32 using non-truncating
5627 // function. If the result is out of range, branch to deoptimize.
5628 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5629 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
5630 DeoptimizeIf(ne, instr, "lost precision or NaN");
5631
5632 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5633 __ Cmp(output, 0);
5634 __ B(ne, &done);
5635 __ Fmov(scratch1, dbl_scratch1);
5636 DeoptimizeIfNegative(scratch1, instr, "minus zero");
5637 }
5638 }
5639 __ Bind(&done);
5640 }
5641
5642
DoTaggedToI(LTaggedToI * instr)5643 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5644 class DeferredTaggedToI: public LDeferredCode {
5645 public:
5646 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5647 : LDeferredCode(codegen), instr_(instr) { }
5648 virtual void Generate() {
5649 codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
5650 instr_->temp2());
5651 }
5652
5653 virtual LInstruction* instr() { return instr_; }
5654 private:
5655 LTaggedToI* instr_;
5656 };
5657
5658 Register input = ToRegister(instr->value());
5659 Register output = ToRegister(instr->result());
5660
5661 if (instr->hydrogen()->value()->representation().IsSmi()) {
5662 __ SmiUntag(output, input);
5663 } else {
5664 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5665
5666 __ JumpIfNotSmi(input, deferred->entry());
5667 __ SmiUntag(output, input);
5668 __ Bind(deferred->exit());
5669 }
5670 }
5671
5672
DoThisFunction(LThisFunction * instr)5673 void LCodeGen::DoThisFunction(LThisFunction* instr) {
5674 Register result = ToRegister(instr->result());
5675 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5676 }
5677
5678
DoToFastProperties(LToFastProperties * instr)5679 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5680 DCHECK(ToRegister(instr->value()).Is(x0));
5681 DCHECK(ToRegister(instr->result()).Is(x0));
5682 __ Push(x0);
5683 CallRuntime(Runtime::kToFastProperties, 1, instr);
5684 }
5685
5686
DoRegExpLiteral(LRegExpLiteral * instr)5687 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5688 DCHECK(ToRegister(instr->context()).is(cp));
5689 Label materialized;
5690 // Registers will be used as follows:
5691 // x7 = literals array.
5692 // x1 = regexp literal.
5693 // x0 = regexp literal clone.
5694 // x10-x12 are used as temporaries.
5695 int literal_offset =
5696 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5697 __ LoadObject(x7, instr->hydrogen()->literals());
5698 __ Ldr(x1, FieldMemOperand(x7, literal_offset));
5699 __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
5700
5701 // Create regexp literal using runtime function
5702 // Result will be in x0.
5703 __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5704 __ Mov(x11, Operand(instr->hydrogen()->pattern()));
5705 __ Mov(x10, Operand(instr->hydrogen()->flags()));
5706 __ Push(x7, x12, x11, x10);
5707 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5708 __ Mov(x1, x0);
5709
5710 __ Bind(&materialized);
5711 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5712 Label allocated, runtime_allocate;
5713
5714 __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
5715 __ B(&allocated);
5716
5717 __ Bind(&runtime_allocate);
5718 __ Mov(x0, Smi::FromInt(size));
5719 __ Push(x1, x0);
5720 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5721 __ Pop(x1);
5722
5723 __ Bind(&allocated);
5724 // Copy the content into the newly allocated memory.
5725 __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
5726 }
5727
5728
DoTransitionElementsKind(LTransitionElementsKind * instr)5729 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5730 Register object = ToRegister(instr->object());
5731
5732 Handle<Map> from_map = instr->original_map();
5733 Handle<Map> to_map = instr->transitioned_map();
5734 ElementsKind from_kind = instr->from_kind();
5735 ElementsKind to_kind = instr->to_kind();
5736
5737 Label not_applicable;
5738
5739 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5740 Register temp1 = ToRegister(instr->temp1());
5741 Register new_map = ToRegister(instr->temp2());
5742 __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK);
5743 __ Mov(new_map, Operand(to_map));
5744 __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
5745 // Write barrier.
5746 __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
5747 kDontSaveFPRegs);
5748 } else {
5749 {
5750 UseScratchRegisterScope temps(masm());
5751 // Use the temp register only in a restricted scope - the codegen checks
5752 // that we do not use any register across a call.
5753 __ CheckMap(object, temps.AcquireX(), from_map, ¬_applicable,
5754 DONT_DO_SMI_CHECK);
5755 }
5756 DCHECK(object.is(x0));
5757 DCHECK(ToRegister(instr->context()).is(cp));
5758 PushSafepointRegistersScope scope(this);
5759 __ Mov(x1, Operand(to_map));
5760 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5761 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
5762 __ CallStub(&stub);
5763 RecordSafepointWithRegisters(
5764 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
5765 }
5766 __ Bind(¬_applicable);
5767 }
5768
5769
DoTrapAllocationMemento(LTrapAllocationMemento * instr)5770 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5771 Register object = ToRegister(instr->object());
5772 Register temp1 = ToRegister(instr->temp1());
5773 Register temp2 = ToRegister(instr->temp2());
5774
5775 Label no_memento_found;
5776 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
5777 DeoptimizeIf(eq, instr);
5778 __ Bind(&no_memento_found);
5779 }
5780
5781
DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi * instr)5782 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5783 DoubleRegister input = ToDoubleRegister(instr->value());
5784 Register result = ToRegister(instr->result());
5785 __ TruncateDoubleToI(result, input);
5786 if (instr->tag_result()) {
5787 __ SmiTag(result, result);
5788 }
5789 }
5790
5791
DoTypeof(LTypeof * instr)5792 void LCodeGen::DoTypeof(LTypeof* instr) {
5793 Register input = ToRegister(instr->value());
5794 __ Push(input);
5795 CallRuntime(Runtime::kTypeof, 1, instr);
5796 }
5797
5798
DoTypeofIsAndBranch(LTypeofIsAndBranch * instr)5799 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5800 Handle<String> type_name = instr->type_literal();
5801 Label* true_label = instr->TrueLabel(chunk_);
5802 Label* false_label = instr->FalseLabel(chunk_);
5803 Register value = ToRegister(instr->value());
5804
5805 Factory* factory = isolate()->factory();
5806 if (String::Equals(type_name, factory->number_string())) {
5807 __ JumpIfSmi(value, true_label);
5808
5809 int true_block = instr->TrueDestination(chunk_);
5810 int false_block = instr->FalseDestination(chunk_);
5811 int next_block = GetNextEmittedBlock();
5812
5813 if (true_block == false_block) {
5814 EmitGoto(true_block);
5815 } else if (true_block == next_block) {
5816 __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block));
5817 } else {
5818 __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block));
5819 if (false_block != next_block) {
5820 __ B(chunk_->GetAssemblyLabel(false_block));
5821 }
5822 }
5823
5824 } else if (String::Equals(type_name, factory->string_string())) {
5825 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5826 Register map = ToRegister(instr->temp1());
5827 Register scratch = ToRegister(instr->temp2());
5828
5829 __ JumpIfSmi(value, false_label);
5830 __ JumpIfObjectType(
5831 value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
5832 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5833 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5834
5835 } else if (String::Equals(type_name, factory->symbol_string())) {
5836 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5837 Register map = ToRegister(instr->temp1());
5838 Register scratch = ToRegister(instr->temp2());
5839
5840 __ JumpIfSmi(value, false_label);
5841 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
5842 EmitBranch(instr, eq);
5843
5844 } else if (String::Equals(type_name, factory->boolean_string())) {
5845 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
5846 __ CompareRoot(value, Heap::kFalseValueRootIndex);
5847 EmitBranch(instr, eq);
5848
5849 } else if (String::Equals(type_name, factory->undefined_string())) {
5850 DCHECK(instr->temp1() != NULL);
5851 Register scratch = ToRegister(instr->temp1());
5852
5853 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
5854 __ JumpIfSmi(value, false_label);
5855 // Check for undetectable objects and jump to the true branch in this case.
5856 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5857 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5858 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
5859
5860 } else if (String::Equals(type_name, factory->function_string())) {
5861 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5862 DCHECK(instr->temp1() != NULL);
5863 Register type = ToRegister(instr->temp1());
5864
5865 __ JumpIfSmi(value, false_label);
5866 __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
5867 // HeapObject's type has been loaded into type register by JumpIfObjectType.
5868 EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
5869
5870 } else if (String::Equals(type_name, factory->object_string())) {
5871 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5872 Register map = ToRegister(instr->temp1());
5873 Register scratch = ToRegister(instr->temp2());
5874
5875 __ JumpIfSmi(value, false_label);
5876 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5877 __ JumpIfObjectType(value, map, scratch,
5878 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
5879 __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5880 __ B(gt, false_label);
5881 // Check for undetectable objects => false.
5882 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5883 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5884
5885 } else {
5886 __ B(false_label);
5887 }
5888 }
5889
5890
DoUint32ToDouble(LUint32ToDouble * instr)5891 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5892 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5893 }
5894
5895
DoCheckMapValue(LCheckMapValue * instr)5896 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5897 Register object = ToRegister(instr->value());
5898 Register map = ToRegister(instr->map());
5899 Register temp = ToRegister(instr->temp());
5900 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5901 __ Cmp(map, temp);
5902 DeoptimizeIf(ne, instr);
5903 }
5904
5905
DoWrapReceiver(LWrapReceiver * instr)5906 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5907 Register receiver = ToRegister(instr->receiver());
5908 Register function = ToRegister(instr->function());
5909 Register result = ToRegister(instr->result());
5910
5911 // If the receiver is null or undefined, we have to pass the global object as
5912 // a receiver to normal functions. Values have to be passed unchanged to
5913 // builtins and strict-mode functions.
5914 Label global_object, done, copy_receiver;
5915
5916 if (!instr->hydrogen()->known_function()) {
5917 __ Ldr(result, FieldMemOperand(function,
5918 JSFunction::kSharedFunctionInfoOffset));
5919
5920 // CompilerHints is an int32 field. See objects.h.
5921 __ Ldr(result.W(),
5922 FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
5923
5924 // Do not transform the receiver to object for strict mode functions.
5925 __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, ©_receiver);
5926
5927 // Do not transform the receiver to object for builtins.
5928 __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver);
5929 }
5930
5931 // Normal function. Replace undefined or null with global receiver.
5932 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5933 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5934
5935 // Deoptimize if the receiver is not a JS object.
5936 DeoptimizeIfSmi(receiver, instr);
5937 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
5938 __ B(ge, ©_receiver);
5939 Deoptimize(instr);
5940
5941 __ Bind(&global_object);
5942 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
5943 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
5944 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
5945 __ B(&done);
5946
5947 __ Bind(©_receiver);
5948 __ Mov(result, receiver);
5949 __ Bind(&done);
5950 }
5951
5952
DoDeferredLoadMutableDouble(LLoadFieldByIndex * instr,Register result,Register object,Register index)5953 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5954 Register result,
5955 Register object,
5956 Register index) {
5957 PushSafepointRegistersScope scope(this);
5958 __ Push(object);
5959 __ Push(index);
5960 __ Mov(cp, 0);
5961 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5962 RecordSafepointWithRegisters(
5963 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5964 __ StoreToSafepointRegisterSlot(x0, result);
5965 }
5966
5967
DoLoadFieldByIndex(LLoadFieldByIndex * instr)5968 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5969 class DeferredLoadMutableDouble FINAL : public LDeferredCode {
5970 public:
5971 DeferredLoadMutableDouble(LCodeGen* codegen,
5972 LLoadFieldByIndex* instr,
5973 Register result,
5974 Register object,
5975 Register index)
5976 : LDeferredCode(codegen),
5977 instr_(instr),
5978 result_(result),
5979 object_(object),
5980 index_(index) {
5981 }
5982 virtual void Generate() OVERRIDE {
5983 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5984 }
5985 virtual LInstruction* instr() OVERRIDE { return instr_; }
5986 private:
5987 LLoadFieldByIndex* instr_;
5988 Register result_;
5989 Register object_;
5990 Register index_;
5991 };
5992 Register object = ToRegister(instr->object());
5993 Register index = ToRegister(instr->index());
5994 Register result = ToRegister(instr->result());
5995
5996 __ AssertSmi(index);
5997
5998 DeferredLoadMutableDouble* deferred;
5999 deferred = new(zone()) DeferredLoadMutableDouble(
6000 this, instr, result, object, index);
6001
6002 Label out_of_object, done;
6003
6004 __ TestAndBranchIfAnySet(
6005 index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
6006 __ Mov(index, Operand(index, ASR, 1));
6007
6008 __ Cmp(index, Smi::FromInt(0));
6009 __ B(lt, &out_of_object);
6010
6011 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
6012 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6013 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
6014
6015 __ B(&done);
6016
6017 __ Bind(&out_of_object);
6018 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
6019 // Index is equal to negated out of object property index plus 1.
6020 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6021 __ Ldr(result, FieldMemOperand(result,
6022 FixedArray::kHeaderSize - kPointerSize));
6023 __ Bind(deferred->exit());
6024 __ Bind(&done);
6025 }
6026
6027
DoStoreFrameContext(LStoreFrameContext * instr)6028 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6029 Register context = ToRegister(instr->context());
6030 __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
6031 }
6032
6033
DoAllocateBlockContext(LAllocateBlockContext * instr)6034 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6035 Handle<ScopeInfo> scope_info = instr->scope_info();
6036 __ Push(scope_info);
6037 __ Push(ToRegister(instr->function()));
6038 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6039 RecordSafepoint(Safepoint::kNoLazyDeopt);
6040 }
6041
6042
6043
6044 } } // namespace v8::internal
6045