1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/crankshaft/ppc/lithium-codegen-ppc.h"
6
7 #include "src/base/bits.h"
8 #include "src/code-factory.h"
9 #include "src/code-stubs.h"
10 #include "src/crankshaft/hydrogen-osr.h"
11 #include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
12 #include "src/ic/ic.h"
13 #include "src/ic/stub-cache.h"
14
15 namespace v8 {
16 namespace internal {
17
18
19 class SafepointGenerator final : public CallWrapper {
20 public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)21 SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
22 Safepoint::DeoptMode mode)
23 : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
~SafepointGenerator()24 virtual ~SafepointGenerator() {}
25
BeforeCall(int call_size) const26 void BeforeCall(int call_size) const override {}
27
AfterCall() const28 void AfterCall() const override {
29 codegen_->RecordSafepoint(pointers_, deopt_mode_);
30 }
31
32 private:
33 LCodeGen* codegen_;
34 LPointerMap* pointers_;
35 Safepoint::DeoptMode deopt_mode_;
36 };
37
PushSafepointRegistersScope(LCodeGen * codegen)38 LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
39 LCodeGen* codegen)
40 : codegen_(codegen) {
41 DCHECK(codegen_->info()->is_calling());
42 DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
43 codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
44 StoreRegistersStateStub stub(codegen_->isolate());
45 codegen_->masm_->CallStub(&stub);
46 }
47
~PushSafepointRegistersScope()48 LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
49 DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
50 RestoreRegistersStateStub stub(codegen_->isolate());
51 codegen_->masm_->CallStub(&stub);
52 codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
53 }
54
55 #define __ masm()->
56
GenerateCode()57 bool LCodeGen::GenerateCode() {
58 LPhase phase("Z_Code generation", chunk());
59 DCHECK(is_unused());
60 status_ = GENERATING;
61
62 // Open a frame scope to indicate that there is a frame on the stack. The
63 // NONE indicates that the scope shouldn't actually generate code to set up
64 // the frame (that is done in GeneratePrologue).
65 FrameScope frame_scope(masm_, StackFrame::NONE);
66
67 bool rc = GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
68 GenerateJumpTable() && GenerateSafepointTable();
69 if (FLAG_enable_embedded_constant_pool && !rc) {
70 masm()->AbortConstantPoolBuilding();
71 }
72 return rc;
73 }
74
75
FinishCode(Handle<Code> code)76 void LCodeGen::FinishCode(Handle<Code> code) {
77 DCHECK(is_done());
78 code->set_stack_slots(GetTotalFrameSlotCount());
79 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
80 PopulateDeoptimizationData(code);
81 }
82
83
SaveCallerDoubles()84 void LCodeGen::SaveCallerDoubles() {
85 DCHECK(info()->saves_caller_doubles());
86 DCHECK(NeedsEagerFrame());
87 Comment(";;; Save clobbered callee double registers");
88 int count = 0;
89 BitVector* doubles = chunk()->allocated_double_registers();
90 BitVector::Iterator save_iterator(doubles);
91 while (!save_iterator.Done()) {
92 __ stfd(DoubleRegister::from_code(save_iterator.Current()),
93 MemOperand(sp, count * kDoubleSize));
94 save_iterator.Advance();
95 count++;
96 }
97 }
98
99
RestoreCallerDoubles()100 void LCodeGen::RestoreCallerDoubles() {
101 DCHECK(info()->saves_caller_doubles());
102 DCHECK(NeedsEagerFrame());
103 Comment(";;; Restore clobbered callee double registers");
104 BitVector* doubles = chunk()->allocated_double_registers();
105 BitVector::Iterator save_iterator(doubles);
106 int count = 0;
107 while (!save_iterator.Done()) {
108 __ lfd(DoubleRegister::from_code(save_iterator.Current()),
109 MemOperand(sp, count * kDoubleSize));
110 save_iterator.Advance();
111 count++;
112 }
113 }
114
115
GeneratePrologue()116 bool LCodeGen::GeneratePrologue() {
117 DCHECK(is_generating());
118
119 if (info()->IsOptimizing()) {
120 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
121
122 // r4: Callee's JS function.
123 // cp: Callee's context.
124 // pp: Callee's constant pool pointer (if enabled)
125 // fp: Caller's frame pointer.
126 // lr: Caller's pc.
127 // ip: Our own function entry (required by the prologue)
128 }
129
130 int prologue_offset = masm_->pc_offset();
131
132 if (prologue_offset) {
133 // Prologue logic requires it's starting address in ip and the
134 // corresponding offset from the function entry.
135 prologue_offset += Instruction::kInstrSize;
136 __ addi(ip, ip, Operand(prologue_offset));
137 }
138 info()->set_prologue_offset(prologue_offset);
139 if (NeedsEagerFrame()) {
140 if (info()->IsStub()) {
141 __ StubPrologue(StackFrame::STUB, ip, prologue_offset);
142 } else {
143 __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
144 }
145 frame_is_built_ = true;
146 }
147
148 // Reserve space for the stack slots needed by the code.
149 int slots = GetStackSlotCount();
150 if (slots > 0) {
151 __ subi(sp, sp, Operand(slots * kPointerSize));
152 if (FLAG_debug_code) {
153 __ Push(r3, r4);
154 __ li(r0, Operand(slots));
155 __ mtctr(r0);
156 __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
157 __ mov(r4, Operand(kSlotsZapValue));
158 Label loop;
159 __ bind(&loop);
160 __ StorePU(r4, MemOperand(r3, -kPointerSize));
161 __ bdnz(&loop);
162 __ Pop(r3, r4);
163 }
164 }
165
166 if (info()->saves_caller_doubles()) {
167 SaveCallerDoubles();
168 }
169 return !is_aborted();
170 }
171
172
DoPrologue(LPrologue * instr)173 void LCodeGen::DoPrologue(LPrologue* instr) {
174 Comment(";;; Prologue begin");
175
176 // Possibly allocate a local context.
177 if (info()->scope()->NeedsContext()) {
178 Comment(";;; Allocate local context");
179 bool need_write_barrier = true;
180 // Argument to NewContext is the function, which is in r4.
181 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
182 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
183 if (info()->scope()->is_script_scope()) {
184 __ push(r4);
185 __ Push(info()->scope()->scope_info());
186 __ CallRuntime(Runtime::kNewScriptContext);
187 deopt_mode = Safepoint::kLazyDeopt;
188 } else {
189 if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
190 FastNewFunctionContextStub stub(isolate());
191 __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
192 Operand(slots));
193 __ CallStub(&stub);
194 // Result of FastNewFunctionContextStub is always in new space.
195 need_write_barrier = false;
196 } else {
197 __ push(r4);
198 __ CallRuntime(Runtime::kNewFunctionContext);
199 }
200 }
201 RecordSafepoint(deopt_mode);
202
203 // Context is returned in both r3 and cp. It replaces the context
204 // passed to us. It's saved in the stack and kept live in cp.
205 __ mr(cp, r3);
206 __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
207 // Copy any necessary parameters into the context.
208 int num_parameters = info()->scope()->num_parameters();
209 int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
210 for (int i = first_parameter; i < num_parameters; i++) {
211 Variable* var = (i == -1) ? info()->scope()->receiver()
212 : info()->scope()->parameter(i);
213 if (var->IsContextSlot()) {
214 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
215 (num_parameters - 1 - i) * kPointerSize;
216 // Load parameter from stack.
217 __ LoadP(r3, MemOperand(fp, parameter_offset));
218 // Store it in the context.
219 MemOperand target = ContextMemOperand(cp, var->index());
220 __ StoreP(r3, target, r0);
221 // Update the write barrier. This clobbers r6 and r3.
222 if (need_write_barrier) {
223 __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
224 GetLinkRegisterState(), kSaveFPRegs);
225 } else if (FLAG_debug_code) {
226 Label done;
227 __ JumpIfInNewSpace(cp, r3, &done);
228 __ Abort(kExpectedNewSpaceObject);
229 __ bind(&done);
230 }
231 }
232 }
233 Comment(";;; End allocate local context");
234 }
235
236 Comment(";;; Prologue end");
237 }
238
239
GenerateOsrPrologue()240 void LCodeGen::GenerateOsrPrologue() {
241 // Generate the OSR entry prologue at the first unknown OSR value, or if there
242 // are none, at the OSR entrypoint instruction.
243 if (osr_pc_offset_ >= 0) return;
244
245 osr_pc_offset_ = masm()->pc_offset();
246
247 // Adjust the frame size, subsuming the unoptimized frame into the
248 // optimized frame.
249 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
250 DCHECK(slots >= 0);
251 __ subi(sp, sp, Operand(slots * kPointerSize));
252 }
253
254
GenerateBodyInstructionPre(LInstruction * instr)255 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
256 if (instr->IsCall()) {
257 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
258 }
259 if (!instr->IsLazyBailout() && !instr->IsGap()) {
260 safepoints_.BumpLastLazySafepointIndex();
261 }
262 }
263
264
GenerateDeferredCode()265 bool LCodeGen::GenerateDeferredCode() {
266 DCHECK(is_generating());
267 if (deferred_.length() > 0) {
268 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
269 LDeferredCode* code = deferred_[i];
270
271 HValue* value =
272 instructions_->at(code->instruction_index())->hydrogen_value();
273 RecordAndWritePosition(value->position());
274
275 Comment(
276 ";;; <@%d,#%d> "
277 "-------------------- Deferred %s --------------------",
278 code->instruction_index(), code->instr()->hydrogen_value()->id(),
279 code->instr()->Mnemonic());
280 __ bind(code->entry());
281 if (NeedsDeferredFrame()) {
282 Comment(";;; Build frame");
283 DCHECK(!frame_is_built_);
284 DCHECK(info()->IsStub());
285 frame_is_built_ = true;
286 __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
287 __ PushCommonFrame(scratch0());
288 Comment(";;; Deferred code");
289 }
290 code->Generate();
291 if (NeedsDeferredFrame()) {
292 Comment(";;; Destroy frame");
293 DCHECK(frame_is_built_);
294 __ PopCommonFrame(scratch0());
295 frame_is_built_ = false;
296 }
297 __ b(code->exit());
298 }
299 }
300
301 return !is_aborted();
302 }
303
304
GenerateJumpTable()305 bool LCodeGen::GenerateJumpTable() {
306 // Check that the jump table is accessible from everywhere in the function
307 // code, i.e. that offsets to the table can be encoded in the 24bit signed
308 // immediate of a branch instruction.
309 // To simplify we consider the code size from the first instruction to the
310 // end of the jump table. We also don't consider the pc load delta.
311 // Each entry in the jump table generates one instruction and inlines one
312 // 32bit data after it.
313 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
314 jump_table_.length() * 7)) {
315 Abort(kGeneratedCodeIsTooLarge);
316 }
317
318 if (jump_table_.length() > 0) {
319 Label needs_frame, call_deopt_entry;
320
321 Comment(";;; -------------------- Jump table --------------------");
322 Address base = jump_table_[0].address;
323
324 Register entry_offset = scratch0();
325
326 int length = jump_table_.length();
327 for (int i = 0; i < length; i++) {
328 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
329 __ bind(&table_entry->label);
330
331 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
332 Address entry = table_entry->address;
333 DeoptComment(table_entry->deopt_info);
334
335 // Second-level deopt table entries are contiguous and small, so instead
336 // of loading the full, absolute address of each one, load an immediate
337 // offset which will be added to the base address later.
338 __ mov(entry_offset, Operand(entry - base));
339
340 if (table_entry->needs_frame) {
341 DCHECK(!info()->saves_caller_doubles());
342 Comment(";;; call deopt with frame");
343 __ PushCommonFrame();
344 __ b(&needs_frame, SetLK);
345 } else {
346 __ b(&call_deopt_entry, SetLK);
347 }
348 }
349
350 if (needs_frame.is_linked()) {
351 __ bind(&needs_frame);
352 // This variant of deopt can only be used with stubs. Since we don't
353 // have a function pointer to install in the stack frame that we're
354 // building, install a special marker there instead.
355 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
356 __ push(ip);
357 DCHECK(info()->IsStub());
358 }
359
360 Comment(";;; call deopt");
361 __ bind(&call_deopt_entry);
362
363 if (info()->saves_caller_doubles()) {
364 DCHECK(info()->IsStub());
365 RestoreCallerDoubles();
366 }
367
368 // Add the base address to the offset previously loaded in entry_offset.
369 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
370 __ add(ip, entry_offset, ip);
371 __ Jump(ip);
372 }
373
374 // The deoptimization jump table is the last part of the instruction
375 // sequence. Mark the generated code as done unless we bailed out.
376 if (!is_aborted()) status_ = DONE;
377 return !is_aborted();
378 }
379
380
GenerateSafepointTable()381 bool LCodeGen::GenerateSafepointTable() {
382 DCHECK(is_done());
383 safepoints_.Emit(masm(), GetTotalFrameSlotCount());
384 return !is_aborted();
385 }
386
387
ToRegister(int code) const388 Register LCodeGen::ToRegister(int code) const {
389 return Register::from_code(code);
390 }
391
392
ToDoubleRegister(int code) const393 DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
394 return DoubleRegister::from_code(code);
395 }
396
397
ToRegister(LOperand * op) const398 Register LCodeGen::ToRegister(LOperand* op) const {
399 DCHECK(op->IsRegister());
400 return ToRegister(op->index());
401 }
402
403
EmitLoadRegister(LOperand * op,Register scratch)404 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
405 if (op->IsRegister()) {
406 return ToRegister(op->index());
407 } else if (op->IsConstantOperand()) {
408 LConstantOperand* const_op = LConstantOperand::cast(op);
409 HConstant* constant = chunk_->LookupConstant(const_op);
410 Handle<Object> literal = constant->handle(isolate());
411 Representation r = chunk_->LookupLiteralRepresentation(const_op);
412 if (r.IsInteger32()) {
413 AllowDeferredHandleDereference get_number;
414 DCHECK(literal->IsNumber());
415 __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
416 } else if (r.IsDouble()) {
417 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
418 } else {
419 DCHECK(r.IsSmiOrTagged());
420 __ Move(scratch, literal);
421 }
422 return scratch;
423 } else if (op->IsStackSlot()) {
424 __ LoadP(scratch, ToMemOperand(op));
425 return scratch;
426 }
427 UNREACHABLE();
428 return scratch;
429 }
430
431
EmitLoadIntegerConstant(LConstantOperand * const_op,Register dst)432 void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
433 Register dst) {
434 DCHECK(IsInteger32(const_op));
435 HConstant* constant = chunk_->LookupConstant(const_op);
436 int32_t value = constant->Integer32Value();
437 if (IsSmi(const_op)) {
438 __ LoadSmiLiteral(dst, Smi::FromInt(value));
439 } else {
440 __ LoadIntLiteral(dst, value);
441 }
442 }
443
444
ToDoubleRegister(LOperand * op) const445 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
446 DCHECK(op->IsDoubleRegister());
447 return ToDoubleRegister(op->index());
448 }
449
450
ToHandle(LConstantOperand * op) const451 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
452 HConstant* constant = chunk_->LookupConstant(op);
453 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
454 return constant->handle(isolate());
455 }
456
457
IsInteger32(LConstantOperand * op) const458 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
459 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
460 }
461
462
IsSmi(LConstantOperand * op) const463 bool LCodeGen::IsSmi(LConstantOperand* op) const {
464 return chunk_->LookupLiteralRepresentation(op).IsSmi();
465 }
466
467
ToInteger32(LConstantOperand * op) const468 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
469 return ToRepresentation(op, Representation::Integer32());
470 }
471
472
ToRepresentation(LConstantOperand * op,const Representation & r) const473 intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
474 const Representation& r) const {
475 HConstant* constant = chunk_->LookupConstant(op);
476 int32_t value = constant->Integer32Value();
477 if (r.IsInteger32()) return value;
478 DCHECK(r.IsSmiOrTagged());
479 return reinterpret_cast<intptr_t>(Smi::FromInt(value));
480 }
481
482
ToSmi(LConstantOperand * op) const483 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
484 HConstant* constant = chunk_->LookupConstant(op);
485 return Smi::FromInt(constant->Integer32Value());
486 }
487
488
ToDouble(LConstantOperand * op) const489 double LCodeGen::ToDouble(LConstantOperand* op) const {
490 HConstant* constant = chunk_->LookupConstant(op);
491 DCHECK(constant->HasDoubleValue());
492 return constant->DoubleValue();
493 }
494
495
ToOperand(LOperand * op)496 Operand LCodeGen::ToOperand(LOperand* op) {
497 if (op->IsConstantOperand()) {
498 LConstantOperand* const_op = LConstantOperand::cast(op);
499 HConstant* constant = chunk()->LookupConstant(const_op);
500 Representation r = chunk_->LookupLiteralRepresentation(const_op);
501 if (r.IsSmi()) {
502 DCHECK(constant->HasSmiValue());
503 return Operand(Smi::FromInt(constant->Integer32Value()));
504 } else if (r.IsInteger32()) {
505 DCHECK(constant->HasInteger32Value());
506 return Operand(constant->Integer32Value());
507 } else if (r.IsDouble()) {
508 Abort(kToOperandUnsupportedDoubleImmediate);
509 }
510 DCHECK(r.IsTagged());
511 return Operand(constant->handle(isolate()));
512 } else if (op->IsRegister()) {
513 return Operand(ToRegister(op));
514 } else if (op->IsDoubleRegister()) {
515 Abort(kToOperandIsDoubleRegisterUnimplemented);
516 return Operand::Zero();
517 }
518 // Stack slots not implemented, use ToMemOperand instead.
519 UNREACHABLE();
520 return Operand::Zero();
521 }
522
523
ArgumentsOffsetWithoutFrame(int index)524 static int ArgumentsOffsetWithoutFrame(int index) {
525 DCHECK(index < 0);
526 return -(index + 1) * kPointerSize;
527 }
528
529
ToMemOperand(LOperand * op) const530 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
531 DCHECK(!op->IsRegister());
532 DCHECK(!op->IsDoubleRegister());
533 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
534 if (NeedsEagerFrame()) {
535 return MemOperand(fp, FrameSlotToFPOffset(op->index()));
536 } else {
537 // Retrieve parameter without eager stack-frame relative to the
538 // stack-pointer.
539 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
540 }
541 }
542
543
ToHighMemOperand(LOperand * op) const544 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
545 DCHECK(op->IsDoubleStackSlot());
546 if (NeedsEagerFrame()) {
547 return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
548 } else {
549 // Retrieve parameter without eager stack-frame relative to the
550 // stack-pointer.
551 return MemOperand(sp,
552 ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
553 }
554 }
555
556
WriteTranslation(LEnvironment * environment,Translation * translation)557 void LCodeGen::WriteTranslation(LEnvironment* environment,
558 Translation* translation) {
559 if (environment == NULL) return;
560
561 // The translation includes one command per value in the environment.
562 int translation_size = environment->translation_size();
563
564 WriteTranslation(environment->outer(), translation);
565 WriteTranslationFrame(environment, translation);
566
567 int object_index = 0;
568 int dematerialized_index = 0;
569 for (int i = 0; i < translation_size; ++i) {
570 LOperand* value = environment->values()->at(i);
571 AddToTranslation(
572 environment, translation, value, environment->HasTaggedValueAt(i),
573 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
574 }
575 }
576
577
AddToTranslation(LEnvironment * environment,Translation * translation,LOperand * op,bool is_tagged,bool is_uint32,int * object_index_pointer,int * dematerialized_index_pointer)578 void LCodeGen::AddToTranslation(LEnvironment* environment,
579 Translation* translation, LOperand* op,
580 bool is_tagged, bool is_uint32,
581 int* object_index_pointer,
582 int* dematerialized_index_pointer) {
583 if (op == LEnvironment::materialization_marker()) {
584 int object_index = (*object_index_pointer)++;
585 if (environment->ObjectIsDuplicateAt(object_index)) {
586 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
587 translation->DuplicateObject(dupe_of);
588 return;
589 }
590 int object_length = environment->ObjectLengthAt(object_index);
591 if (environment->ObjectIsArgumentsAt(object_index)) {
592 translation->BeginArgumentsObject(object_length);
593 } else {
594 translation->BeginCapturedObject(object_length);
595 }
596 int dematerialized_index = *dematerialized_index_pointer;
597 int env_offset = environment->translation_size() + dematerialized_index;
598 *dematerialized_index_pointer += object_length;
599 for (int i = 0; i < object_length; ++i) {
600 LOperand* value = environment->values()->at(env_offset + i);
601 AddToTranslation(environment, translation, value,
602 environment->HasTaggedValueAt(env_offset + i),
603 environment->HasUint32ValueAt(env_offset + i),
604 object_index_pointer, dematerialized_index_pointer);
605 }
606 return;
607 }
608
609 if (op->IsStackSlot()) {
610 int index = op->index();
611 if (is_tagged) {
612 translation->StoreStackSlot(index);
613 } else if (is_uint32) {
614 translation->StoreUint32StackSlot(index);
615 } else {
616 translation->StoreInt32StackSlot(index);
617 }
618 } else if (op->IsDoubleStackSlot()) {
619 int index = op->index();
620 translation->StoreDoubleStackSlot(index);
621 } else if (op->IsRegister()) {
622 Register reg = ToRegister(op);
623 if (is_tagged) {
624 translation->StoreRegister(reg);
625 } else if (is_uint32) {
626 translation->StoreUint32Register(reg);
627 } else {
628 translation->StoreInt32Register(reg);
629 }
630 } else if (op->IsDoubleRegister()) {
631 DoubleRegister reg = ToDoubleRegister(op);
632 translation->StoreDoubleRegister(reg);
633 } else if (op->IsConstantOperand()) {
634 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
635 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
636 translation->StoreLiteral(src_index);
637 } else {
638 UNREACHABLE();
639 }
640 }
641
642
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)643 void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
644 LInstruction* instr) {
645 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
646 }
647
648
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode)649 void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
650 LInstruction* instr,
651 SafepointMode safepoint_mode) {
652 DCHECK(instr != NULL);
653 __ Call(code, mode);
654 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
655
656 // Signal that we don't inline smi code before these stubs in the
657 // optimizing code generator.
658 if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
659 __ nop();
660 }
661 }
662
663
CallRuntime(const Runtime::Function * function,int num_arguments,LInstruction * instr,SaveFPRegsMode save_doubles)664 void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
665 LInstruction* instr, SaveFPRegsMode save_doubles) {
666 DCHECK(instr != NULL);
667
668 __ CallRuntime(function, num_arguments, save_doubles);
669
670 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
671 }
672
673
LoadContextFromDeferred(LOperand * context)674 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
675 if (context->IsRegister()) {
676 __ Move(cp, ToRegister(context));
677 } else if (context->IsStackSlot()) {
678 __ LoadP(cp, ToMemOperand(context));
679 } else if (context->IsConstantOperand()) {
680 HConstant* constant =
681 chunk_->LookupConstant(LConstantOperand::cast(context));
682 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
683 } else {
684 UNREACHABLE();
685 }
686 }
687
688
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr,LOperand * context)689 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
690 LInstruction* instr, LOperand* context) {
691 LoadContextFromDeferred(context);
692 __ CallRuntimeSaveDoubles(id);
693 RecordSafepointWithRegisters(instr->pointer_map(), argc,
694 Safepoint::kNoLazyDeopt);
695 }
696
697
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)698 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
699 Safepoint::DeoptMode mode) {
700 environment->set_has_been_used();
701 if (!environment->HasBeenRegistered()) {
702 // Physical stack frame layout:
703 // -x ............. -4 0 ..................................... y
704 // [incoming arguments] [spill slots] [pushed outgoing arguments]
705
706 // Layout of the environment:
707 // 0 ..................................................... size-1
708 // [parameters] [locals] [expression stack including arguments]
709
710 // Layout of the translation:
711 // 0 ........................................................ size - 1 + 4
712 // [expression stack including arguments] [locals] [4 words] [parameters]
713 // |>------------ translation_size ------------<|
714
715 int frame_count = 0;
716 int jsframe_count = 0;
717 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
718 ++frame_count;
719 if (e->frame_type() == JS_FUNCTION) {
720 ++jsframe_count;
721 }
722 }
723 Translation translation(&translations_, frame_count, jsframe_count, zone());
724 WriteTranslation(environment, &translation);
725 int deoptimization_index = deoptimizations_.length();
726 int pc_offset = masm()->pc_offset();
727 environment->Register(deoptimization_index, translation.index(),
728 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
729 deoptimizations_.Add(environment, zone());
730 }
731 }
732
DeoptimizeIf(Condition cond,LInstruction * instr,DeoptimizeReason deopt_reason,Deoptimizer::BailoutType bailout_type,CRegister cr)733 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
734 DeoptimizeReason deopt_reason,
735 Deoptimizer::BailoutType bailout_type,
736 CRegister cr) {
737 LEnvironment* environment = instr->environment();
738 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
739 DCHECK(environment->HasBeenRegistered());
740 int id = environment->deoptimization_index();
741 Address entry =
742 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
743 if (entry == NULL) {
744 Abort(kBailoutWasNotPrepared);
745 return;
746 }
747
748 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
749 CRegister alt_cr = cr6;
750 Register scratch = scratch0();
751 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
752 Label no_deopt;
753 DCHECK(!alt_cr.is(cr));
754 __ Push(r4, scratch);
755 __ mov(scratch, Operand(count));
756 __ lwz(r4, MemOperand(scratch));
757 __ subi(r4, r4, Operand(1));
758 __ cmpi(r4, Operand::Zero(), alt_cr);
759 __ bne(&no_deopt, alt_cr);
760 __ li(r4, Operand(FLAG_deopt_every_n_times));
761 __ stw(r4, MemOperand(scratch));
762 __ Pop(r4, scratch);
763
764 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
765 __ bind(&no_deopt);
766 __ stw(r4, MemOperand(scratch));
767 __ Pop(r4, scratch);
768 }
769
770 if (info()->ShouldTrapOnDeopt()) {
771 __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
772 }
773
774 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
775
776 DCHECK(info()->IsStub() || frame_is_built_);
777 // Go through jump table if we need to handle condition, build frame, or
778 // restore caller doubles.
779 if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
780 DeoptComment(deopt_info);
781 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
782 } else {
783 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
784 !frame_is_built_);
785 // We often have several deopts to the same entry, reuse the last
786 // jump entry if this is the case.
787 if (FLAG_trace_deopt || isolate()->is_profiling() ||
788 jump_table_.is_empty() ||
789 !table_entry.IsEquivalentTo(jump_table_.last())) {
790 jump_table_.Add(table_entry, zone());
791 }
792 __ b(cond, &jump_table_.last().label, cr);
793 }
794 }
795
DeoptimizeIf(Condition condition,LInstruction * instr,DeoptimizeReason deopt_reason,CRegister cr)796 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
797 DeoptimizeReason deopt_reason, CRegister cr) {
798 Deoptimizer::BailoutType bailout_type =
799 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
800 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
801 }
802
803
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode)804 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
805 SafepointMode safepoint_mode) {
806 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
807 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
808 } else {
809 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
810 RecordSafepointWithRegisters(instr->pointer_map(), 0,
811 Safepoint::kLazyDeopt);
812 }
813 }
814
815
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)816 void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
817 int arguments, Safepoint::DeoptMode deopt_mode) {
818 DCHECK(expected_safepoint_kind_ == kind);
819
820 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
821 Safepoint safepoint =
822 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
823 for (int i = 0; i < operands->length(); i++) {
824 LOperand* pointer = operands->at(i);
825 if (pointer->IsStackSlot()) {
826 safepoint.DefinePointerSlot(pointer->index(), zone());
827 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
828 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
829 }
830 }
831 }
832
833
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode deopt_mode)834 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
835 Safepoint::DeoptMode deopt_mode) {
836 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
837 }
838
839
RecordSafepoint(Safepoint::DeoptMode deopt_mode)840 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
841 LPointerMap empty_pointers(zone());
842 RecordSafepoint(&empty_pointers, deopt_mode);
843 }
844
845
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode deopt_mode)846 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
847 int arguments,
848 Safepoint::DeoptMode deopt_mode) {
849 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
850 }
851
852
LabelType(LLabel * label)853 static const char* LabelType(LLabel* label) {
854 if (label->is_loop_header()) return " (loop header)";
855 if (label->is_osr_entry()) return " (OSR entry)";
856 return "";
857 }
858
859
DoLabel(LLabel * label)860 void LCodeGen::DoLabel(LLabel* label) {
861 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
862 current_instruction_, label->hydrogen_value()->id(),
863 label->block_id(), LabelType(label));
864 __ bind(label->label());
865 current_block_ = label->block_id();
866 DoGap(label);
867 }
868
869
DoParallelMove(LParallelMove * move)870 void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
871
872
DoGap(LGap * gap)873 void LCodeGen::DoGap(LGap* gap) {
874 for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
875 i++) {
876 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
877 LParallelMove* move = gap->GetParallelMove(inner_pos);
878 if (move != NULL) DoParallelMove(move);
879 }
880 }
881
882
DoInstructionGap(LInstructionGap * instr)883 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
884
885
DoParameter(LParameter * instr)886 void LCodeGen::DoParameter(LParameter* instr) {
887 // Nothing to do.
888 }
889
890
DoUnknownOSRValue(LUnknownOSRValue * instr)891 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
892 GenerateOsrPrologue();
893 }
894
895
DoModByPowerOf2I(LModByPowerOf2I * instr)896 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
897 Register dividend = ToRegister(instr->dividend());
898 int32_t divisor = instr->divisor();
899 DCHECK(dividend.is(ToRegister(instr->result())));
900
901 // Theoretically, a variation of the branch-free code for integer division by
902 // a power of 2 (calculating the remainder via an additional multiplication
903 // (which gets simplified to an 'and') and subtraction) should be faster, and
904 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
905 // indicate that positive dividends are heavily favored, so the branching
906 // version performs better.
907 HMod* hmod = instr->hydrogen();
908 int32_t shift = WhichPowerOf2Abs(divisor);
909 Label dividend_is_not_negative, done;
910 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
911 __ cmpwi(dividend, Operand::Zero());
912 __ bge(÷nd_is_not_negative);
913 if (shift) {
914 // Note that this is correct even for kMinInt operands.
915 __ neg(dividend, dividend);
916 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
917 __ neg(dividend, dividend, LeaveOE, SetRC);
918 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
919 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
920 }
921 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
922 __ li(dividend, Operand::Zero());
923 } else {
924 DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero);
925 }
926 __ b(&done);
927 }
928
929 __ bind(÷nd_is_not_negative);
930 if (shift) {
931 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
932 } else {
933 __ li(dividend, Operand::Zero());
934 }
935 __ bind(&done);
936 }
937
938
DoModByConstI(LModByConstI * instr)939 void LCodeGen::DoModByConstI(LModByConstI* instr) {
940 Register dividend = ToRegister(instr->dividend());
941 int32_t divisor = instr->divisor();
942 Register result = ToRegister(instr->result());
943 DCHECK(!dividend.is(result));
944
945 if (divisor == 0) {
946 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
947 return;
948 }
949
950 __ TruncatingDiv(result, dividend, Abs(divisor));
951 __ mov(ip, Operand(Abs(divisor)));
952 __ mullw(result, result, ip);
953 __ sub(result, dividend, result, LeaveOE, SetRC);
954
955 // Check for negative zero.
956 HMod* hmod = instr->hydrogen();
957 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
958 Label remainder_not_zero;
959 __ bne(&remainder_not_zero, cr0);
960 __ cmpwi(dividend, Operand::Zero());
961 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
962 __ bind(&remainder_not_zero);
963 }
964 }
965
966
DoModI(LModI * instr)967 void LCodeGen::DoModI(LModI* instr) {
968 HMod* hmod = instr->hydrogen();
969 Register left_reg = ToRegister(instr->left());
970 Register right_reg = ToRegister(instr->right());
971 Register result_reg = ToRegister(instr->result());
972 Register scratch = scratch0();
973 bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow);
974 Label done;
975
976 if (can_overflow) {
977 __ li(r0, Operand::Zero()); // clear xer
978 __ mtxer(r0);
979 }
980
981 __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
982
983 // Check for x % 0.
984 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
985 __ cmpwi(right_reg, Operand::Zero());
986 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
987 }
988
989 // Check for kMinInt % -1, divw will return undefined, which is not what we
990 // want. We have to deopt if we care about -0, because we can't return that.
991 if (can_overflow) {
992 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
993 DeoptimizeIf(overflow, instr, DeoptimizeReason::kMinusZero, cr0);
994 } else {
995 if (CpuFeatures::IsSupported(ISELECT)) {
996 __ isel(overflow, result_reg, r0, result_reg, cr0);
997 __ boverflow(&done, cr0);
998 } else {
999 Label no_overflow_possible;
1000 __ bnooverflow(&no_overflow_possible, cr0);
1001 __ li(result_reg, Operand::Zero());
1002 __ b(&done);
1003 __ bind(&no_overflow_possible);
1004 }
1005 }
1006 }
1007
1008 __ mullw(scratch, right_reg, scratch);
1009 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
1010
1011 // If we care about -0, test if the dividend is <0 and the result is 0.
1012 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1013 __ bne(&done, cr0);
1014 __ cmpwi(left_reg, Operand::Zero());
1015 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
1016 }
1017
1018 __ bind(&done);
1019 }
1020
1021
DoDivByPowerOf2I(LDivByPowerOf2I * instr)1022 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1023 Register dividend = ToRegister(instr->dividend());
1024 int32_t divisor = instr->divisor();
1025 Register result = ToRegister(instr->result());
1026 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1027 DCHECK(!result.is(dividend));
1028
1029 // Check for (0 / -x) that will produce negative zero.
1030 HDiv* hdiv = instr->hydrogen();
1031 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1032 __ cmpwi(dividend, Operand::Zero());
1033 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
1034 }
1035 // Check for (kMinInt / -1).
1036 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1037 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1038 __ cmpw(dividend, r0);
1039 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
1040 }
1041
1042 int32_t shift = WhichPowerOf2Abs(divisor);
1043
1044 // Deoptimize if remainder will not be 0.
1045 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
1046 __ TestBitRange(dividend, shift - 1, 0, r0);
1047 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0);
1048 }
1049
1050 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1051 __ neg(result, dividend);
1052 return;
1053 }
1054 if (shift == 0) {
1055 __ mr(result, dividend);
1056 } else {
1057 if (shift == 1) {
1058 __ srwi(result, dividend, Operand(31));
1059 } else {
1060 __ srawi(result, dividend, 31);
1061 __ srwi(result, result, Operand(32 - shift));
1062 }
1063 __ add(result, dividend, result);
1064 __ srawi(result, result, shift);
1065 }
1066 if (divisor < 0) __ neg(result, result);
1067 }
1068
1069
DoDivByConstI(LDivByConstI * instr)1070 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1071 Register dividend = ToRegister(instr->dividend());
1072 int32_t divisor = instr->divisor();
1073 Register result = ToRegister(instr->result());
1074 DCHECK(!dividend.is(result));
1075
1076 if (divisor == 0) {
1077 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
1078 return;
1079 }
1080
1081 // Check for (0 / -x) that will produce negative zero.
1082 HDiv* hdiv = instr->hydrogen();
1083 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1084 __ cmpwi(dividend, Operand::Zero());
1085 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
1086 }
1087
1088 __ TruncatingDiv(result, dividend, Abs(divisor));
1089 if (divisor < 0) __ neg(result, result);
1090
1091 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1092 Register scratch = scratch0();
1093 __ mov(ip, Operand(divisor));
1094 __ mullw(scratch, result, ip);
1095 __ cmpw(scratch, dividend);
1096 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
1097 }
1098 }
1099
1100
1101 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
DoDivI(LDivI * instr)1102 void LCodeGen::DoDivI(LDivI* instr) {
1103 HBinaryOperation* hdiv = instr->hydrogen();
1104 const Register dividend = ToRegister(instr->dividend());
1105 const Register divisor = ToRegister(instr->divisor());
1106 Register result = ToRegister(instr->result());
1107 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1108
1109 DCHECK(!dividend.is(result));
1110 DCHECK(!divisor.is(result));
1111
1112 if (can_overflow) {
1113 __ li(r0, Operand::Zero()); // clear xer
1114 __ mtxer(r0);
1115 }
1116
1117 __ divw(result, dividend, divisor, SetOE, SetRC);
1118
1119 // Check for x / 0.
1120 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1121 __ cmpwi(divisor, Operand::Zero());
1122 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
1123 }
1124
1125 // Check for (0 / -x) that will produce negative zero.
1126 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1127 Label dividend_not_zero;
1128 __ cmpwi(dividend, Operand::Zero());
1129 __ bne(÷nd_not_zero);
1130 __ cmpwi(divisor, Operand::Zero());
1131 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
1132 __ bind(÷nd_not_zero);
1133 }
1134
1135 // Check for (kMinInt / -1).
1136 if (can_overflow) {
1137 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1138 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
1139 } else {
1140 // When truncating, we want kMinInt / -1 = kMinInt.
1141 if (CpuFeatures::IsSupported(ISELECT)) {
1142 __ isel(overflow, result, dividend, result, cr0);
1143 } else {
1144 Label no_overflow_possible;
1145 __ bnooverflow(&no_overflow_possible, cr0);
1146 __ mr(result, dividend);
1147 __ bind(&no_overflow_possible);
1148 }
1149 }
1150 }
1151
1152 #if V8_TARGET_ARCH_PPC64
1153 __ extsw(result, result);
1154 #endif
1155
1156 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1157 // Deoptimize if remainder is not 0.
1158 Register scratch = scratch0();
1159 __ mullw(scratch, divisor, result);
1160 __ cmpw(dividend, scratch);
1161 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
1162 }
1163 }
1164
1165
DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I * instr)1166 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1167 HBinaryOperation* hdiv = instr->hydrogen();
1168 Register dividend = ToRegister(instr->dividend());
1169 Register result = ToRegister(instr->result());
1170 int32_t divisor = instr->divisor();
1171 bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
1172
1173 // If the divisor is positive, things are easy: There can be no deopts and we
1174 // can simply do an arithmetic right shift.
1175 int32_t shift = WhichPowerOf2Abs(divisor);
1176 if (divisor > 0) {
1177 if (shift || !result.is(dividend)) {
1178 __ srawi(result, dividend, shift);
1179 }
1180 return;
1181 }
1182
1183 // If the divisor is negative, we have to negate and handle edge cases.
1184 OEBit oe = LeaveOE;
1185 #if V8_TARGET_ARCH_PPC64
1186 if (divisor == -1 && can_overflow) {
1187 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1188 __ cmpw(dividend, r0);
1189 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
1190 }
1191 #else
1192 if (can_overflow) {
1193 __ li(r0, Operand::Zero()); // clear xer
1194 __ mtxer(r0);
1195 oe = SetOE;
1196 }
1197 #endif
1198
1199 __ neg(result, dividend, oe, SetRC);
1200 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1201 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
1202 }
1203
1204 // If the negation could not overflow, simply shifting is OK.
1205 #if !V8_TARGET_ARCH_PPC64
1206 if (!can_overflow) {
1207 #endif
1208 if (shift) {
1209 __ ShiftRightArithImm(result, result, shift);
1210 }
1211 return;
1212 #if !V8_TARGET_ARCH_PPC64
1213 }
1214
1215 // Dividing by -1 is basically negation, unless we overflow.
1216 if (divisor == -1) {
1217 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
1218 return;
1219 }
1220
1221 Label overflow, done;
1222 __ boverflow(&overflow, cr0);
1223 __ srawi(result, result, shift);
1224 __ b(&done);
1225 __ bind(&overflow);
1226 __ mov(result, Operand(kMinInt / divisor));
1227 __ bind(&done);
1228 #endif
1229 }
1230
1231
DoFlooringDivByConstI(LFlooringDivByConstI * instr)1232 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1233 Register dividend = ToRegister(instr->dividend());
1234 int32_t divisor = instr->divisor();
1235 Register result = ToRegister(instr->result());
1236 DCHECK(!dividend.is(result));
1237
1238 if (divisor == 0) {
1239 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
1240 return;
1241 }
1242
1243 // Check for (0 / -x) that will produce negative zero.
1244 HMathFloorOfDiv* hdiv = instr->hydrogen();
1245 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1246 __ cmpwi(dividend, Operand::Zero());
1247 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
1248 }
1249
1250 // Easy case: We need no dynamic check for the dividend and the flooring
1251 // division is the same as the truncating division.
1252 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1253 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1254 __ TruncatingDiv(result, dividend, Abs(divisor));
1255 if (divisor < 0) __ neg(result, result);
1256 return;
1257 }
1258
1259 // In the general case we may need to adjust before and after the truncating
1260 // division to get a flooring division.
1261 Register temp = ToRegister(instr->temp());
1262 DCHECK(!temp.is(dividend) && !temp.is(result));
1263 Label needs_adjustment, done;
1264 __ cmpwi(dividend, Operand::Zero());
1265 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1266 __ TruncatingDiv(result, dividend, Abs(divisor));
1267 if (divisor < 0) __ neg(result, result);
1268 __ b(&done);
1269 __ bind(&needs_adjustment);
1270 __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1271 __ TruncatingDiv(result, temp, Abs(divisor));
1272 if (divisor < 0) __ neg(result, result);
1273 __ subi(result, result, Operand(1));
1274 __ bind(&done);
1275 }
1276
1277
1278 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
DoFlooringDivI(LFlooringDivI * instr)1279 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1280 HBinaryOperation* hdiv = instr->hydrogen();
1281 const Register dividend = ToRegister(instr->dividend());
1282 const Register divisor = ToRegister(instr->divisor());
1283 Register result = ToRegister(instr->result());
1284 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1285
1286 DCHECK(!dividend.is(result));
1287 DCHECK(!divisor.is(result));
1288
1289 if (can_overflow) {
1290 __ li(r0, Operand::Zero()); // clear xer
1291 __ mtxer(r0);
1292 }
1293
1294 __ divw(result, dividend, divisor, SetOE, SetRC);
1295
1296 // Check for x / 0.
1297 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1298 __ cmpwi(divisor, Operand::Zero());
1299 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
1300 }
1301
1302 // Check for (0 / -x) that will produce negative zero.
1303 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1304 Label dividend_not_zero;
1305 __ cmpwi(dividend, Operand::Zero());
1306 __ bne(÷nd_not_zero);
1307 __ cmpwi(divisor, Operand::Zero());
1308 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
1309 __ bind(÷nd_not_zero);
1310 }
1311
1312 // Check for (kMinInt / -1).
1313 if (can_overflow) {
1314 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1315 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
1316 } else {
1317 // When truncating, we want kMinInt / -1 = kMinInt.
1318 if (CpuFeatures::IsSupported(ISELECT)) {
1319 __ isel(overflow, result, dividend, result, cr0);
1320 } else {
1321 Label no_overflow_possible;
1322 __ bnooverflow(&no_overflow_possible, cr0);
1323 __ mr(result, dividend);
1324 __ bind(&no_overflow_possible);
1325 }
1326 }
1327 }
1328
1329 Label done;
1330 Register scratch = scratch0();
1331 // If both operands have the same sign then we are done.
1332 #if V8_TARGET_ARCH_PPC64
1333 __ xor_(scratch, dividend, divisor);
1334 __ cmpwi(scratch, Operand::Zero());
1335 __ bge(&done);
1336 #else
1337 __ xor_(scratch, dividend, divisor, SetRC);
1338 __ bge(&done, cr0);
1339 #endif
1340
1341 // If there is no remainder then we are done.
1342 __ mullw(scratch, divisor, result);
1343 __ cmpw(dividend, scratch);
1344 __ beq(&done);
1345
1346 // We performed a truncating division. Correct the result.
1347 __ subi(result, result, Operand(1));
1348 __ bind(&done);
1349 #if V8_TARGET_ARCH_PPC64
1350 __ extsw(result, result);
1351 #endif
1352 }
1353
1354
DoMultiplyAddD(LMultiplyAddD * instr)1355 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1356 DoubleRegister addend = ToDoubleRegister(instr->addend());
1357 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1358 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1359 DoubleRegister result = ToDoubleRegister(instr->result());
1360
1361 __ fmadd(result, multiplier, multiplicand, addend);
1362 }
1363
1364
DoMultiplySubD(LMultiplySubD * instr)1365 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1366 DoubleRegister minuend = ToDoubleRegister(instr->minuend());
1367 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1368 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1369 DoubleRegister result = ToDoubleRegister(instr->result());
1370
1371 __ fmsub(result, multiplier, multiplicand, minuend);
1372 }
1373
1374
DoMulI(LMulI * instr)1375 void LCodeGen::DoMulI(LMulI* instr) {
1376 Register scratch = scratch0();
1377 Register result = ToRegister(instr->result());
1378 // Note that result may alias left.
1379 Register left = ToRegister(instr->left());
1380 LOperand* right_op = instr->right();
1381
1382 bool bailout_on_minus_zero =
1383 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1384 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1385
1386 if (right_op->IsConstantOperand()) {
1387 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1388
1389 if (bailout_on_minus_zero && (constant < 0)) {
1390 // The case of a null constant will be handled separately.
1391 // If constant is negative and left is null, the result should be -0.
1392 __ cmpi(left, Operand::Zero());
1393 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
1394 }
1395
1396 switch (constant) {
1397 case -1:
1398 if (can_overflow) {
1399 #if V8_TARGET_ARCH_PPC64
1400 if (instr->hydrogen()->representation().IsSmi()) {
1401 #endif
1402 __ li(r0, Operand::Zero()); // clear xer
1403 __ mtxer(r0);
1404 __ neg(result, left, SetOE, SetRC);
1405 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
1406 #if V8_TARGET_ARCH_PPC64
1407 } else {
1408 __ neg(result, left);
1409 __ TestIfInt32(result, r0);
1410 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
1411 }
1412 #endif
1413 } else {
1414 __ neg(result, left);
1415 }
1416 break;
1417 case 0:
1418 if (bailout_on_minus_zero) {
1419 // If left is strictly negative and the constant is null, the
1420 // result is -0. Deoptimize if required, otherwise return 0.
1421 #if V8_TARGET_ARCH_PPC64
1422 if (instr->hydrogen()->representation().IsSmi()) {
1423 #endif
1424 __ cmpi(left, Operand::Zero());
1425 #if V8_TARGET_ARCH_PPC64
1426 } else {
1427 __ cmpwi(left, Operand::Zero());
1428 }
1429 #endif
1430 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
1431 }
1432 __ li(result, Operand::Zero());
1433 break;
1434 case 1:
1435 __ Move(result, left);
1436 break;
1437 default:
1438 // Multiplying by powers of two and powers of two plus or minus
1439 // one can be done faster with shifted operands.
1440 // For other constants we emit standard code.
1441 int32_t mask = constant >> 31;
1442 uint32_t constant_abs = (constant + mask) ^ mask;
1443
1444 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1445 int32_t shift = WhichPowerOf2(constant_abs);
1446 __ ShiftLeftImm(result, left, Operand(shift));
1447 // Correct the sign of the result if the constant is negative.
1448 if (constant < 0) __ neg(result, result);
1449 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1450 int32_t shift = WhichPowerOf2(constant_abs - 1);
1451 __ ShiftLeftImm(scratch, left, Operand(shift));
1452 __ add(result, scratch, left);
1453 // Correct the sign of the result if the constant is negative.
1454 if (constant < 0) __ neg(result, result);
1455 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1456 int32_t shift = WhichPowerOf2(constant_abs + 1);
1457 __ ShiftLeftImm(scratch, left, Operand(shift));
1458 __ sub(result, scratch, left);
1459 // Correct the sign of the result if the constant is negative.
1460 if (constant < 0) __ neg(result, result);
1461 } else {
1462 // Generate standard code.
1463 __ mov(ip, Operand(constant));
1464 __ Mul(result, left, ip);
1465 }
1466 }
1467
1468 } else {
1469 DCHECK(right_op->IsRegister());
1470 Register right = ToRegister(right_op);
1471
1472 if (can_overflow) {
1473 #if V8_TARGET_ARCH_PPC64
1474 // result = left * right.
1475 if (instr->hydrogen()->representation().IsSmi()) {
1476 __ SmiUntag(result, left);
1477 __ SmiUntag(scratch, right);
1478 __ Mul(result, result, scratch);
1479 } else {
1480 __ Mul(result, left, right);
1481 }
1482 __ TestIfInt32(result, r0);
1483 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
1484 if (instr->hydrogen()->representation().IsSmi()) {
1485 __ SmiTag(result);
1486 }
1487 #else
1488 // scratch:result = left * right.
1489 if (instr->hydrogen()->representation().IsSmi()) {
1490 __ SmiUntag(result, left);
1491 __ mulhw(scratch, result, right);
1492 __ mullw(result, result, right);
1493 } else {
1494 __ mulhw(scratch, left, right);
1495 __ mullw(result, left, right);
1496 }
1497 __ TestIfInt32(scratch, result, r0);
1498 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
1499 #endif
1500 } else {
1501 if (instr->hydrogen()->representation().IsSmi()) {
1502 __ SmiUntag(result, left);
1503 __ Mul(result, result, right);
1504 } else {
1505 __ Mul(result, left, right);
1506 }
1507 }
1508
1509 if (bailout_on_minus_zero) {
1510 Label done;
1511 #if V8_TARGET_ARCH_PPC64
1512 if (instr->hydrogen()->representation().IsSmi()) {
1513 #endif
1514 __ xor_(r0, left, right, SetRC);
1515 __ bge(&done, cr0);
1516 #if V8_TARGET_ARCH_PPC64
1517 } else {
1518 __ xor_(r0, left, right);
1519 __ cmpwi(r0, Operand::Zero());
1520 __ bge(&done);
1521 }
1522 #endif
1523 // Bail out if the result is minus zero.
1524 __ cmpi(result, Operand::Zero());
1525 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
1526 __ bind(&done);
1527 }
1528 }
1529 }
1530
1531
DoBitI(LBitI * instr)1532 void LCodeGen::DoBitI(LBitI* instr) {
1533 LOperand* left_op = instr->left();
1534 LOperand* right_op = instr->right();
1535 DCHECK(left_op->IsRegister());
1536 Register left = ToRegister(left_op);
1537 Register result = ToRegister(instr->result());
1538 Operand right(no_reg);
1539
1540 if (right_op->IsStackSlot()) {
1541 right = Operand(EmitLoadRegister(right_op, ip));
1542 } else {
1543 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1544 right = ToOperand(right_op);
1545
1546 if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
1547 switch (instr->op()) {
1548 case Token::BIT_AND:
1549 __ andi(result, left, right);
1550 break;
1551 case Token::BIT_OR:
1552 __ ori(result, left, right);
1553 break;
1554 case Token::BIT_XOR:
1555 __ xori(result, left, right);
1556 break;
1557 default:
1558 UNREACHABLE();
1559 break;
1560 }
1561 return;
1562 }
1563 }
1564
1565 switch (instr->op()) {
1566 case Token::BIT_AND:
1567 __ And(result, left, right);
1568 break;
1569 case Token::BIT_OR:
1570 __ Or(result, left, right);
1571 break;
1572 case Token::BIT_XOR:
1573 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1574 __ notx(result, left);
1575 } else {
1576 __ Xor(result, left, right);
1577 }
1578 break;
1579 default:
1580 UNREACHABLE();
1581 break;
1582 }
1583 }
1584
1585
DoShiftI(LShiftI * instr)1586 void LCodeGen::DoShiftI(LShiftI* instr) {
1587 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1588 // result may alias either of them.
1589 LOperand* right_op = instr->right();
1590 Register left = ToRegister(instr->left());
1591 Register result = ToRegister(instr->result());
1592 Register scratch = scratch0();
1593 if (right_op->IsRegister()) {
1594 // Mask the right_op operand.
1595 __ andi(scratch, ToRegister(right_op), Operand(0x1F));
1596 switch (instr->op()) {
1597 case Token::ROR:
1598 // rotate_right(a, b) == rotate_left(a, 32 - b)
1599 __ subfic(scratch, scratch, Operand(32));
1600 __ rotlw(result, left, scratch);
1601 break;
1602 case Token::SAR:
1603 __ sraw(result, left, scratch);
1604 break;
1605 case Token::SHR:
1606 if (instr->can_deopt()) {
1607 __ srw(result, left, scratch, SetRC);
1608 #if V8_TARGET_ARCH_PPC64
1609 __ extsw(result, result, SetRC);
1610 #endif
1611 DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0);
1612 } else {
1613 __ srw(result, left, scratch);
1614 }
1615 break;
1616 case Token::SHL:
1617 __ slw(result, left, scratch);
1618 #if V8_TARGET_ARCH_PPC64
1619 __ extsw(result, result);
1620 #endif
1621 break;
1622 default:
1623 UNREACHABLE();
1624 break;
1625 }
1626 } else {
1627 // Mask the right_op operand.
1628 int value = ToInteger32(LConstantOperand::cast(right_op));
1629 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1630 switch (instr->op()) {
1631 case Token::ROR:
1632 if (shift_count != 0) {
1633 __ rotrwi(result, left, shift_count);
1634 } else {
1635 __ Move(result, left);
1636 }
1637 break;
1638 case Token::SAR:
1639 if (shift_count != 0) {
1640 __ srawi(result, left, shift_count);
1641 } else {
1642 __ Move(result, left);
1643 }
1644 break;
1645 case Token::SHR:
1646 if (shift_count != 0) {
1647 __ srwi(result, left, Operand(shift_count));
1648 } else {
1649 if (instr->can_deopt()) {
1650 __ cmpwi(left, Operand::Zero());
1651 DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue);
1652 }
1653 __ Move(result, left);
1654 }
1655 break;
1656 case Token::SHL:
1657 if (shift_count != 0) {
1658 #if V8_TARGET_ARCH_PPC64
1659 if (instr->hydrogen_value()->representation().IsSmi()) {
1660 __ sldi(result, left, Operand(shift_count));
1661 #else
1662 if (instr->hydrogen_value()->representation().IsSmi() &&
1663 instr->can_deopt()) {
1664 if (shift_count != 1) {
1665 __ slwi(result, left, Operand(shift_count - 1));
1666 __ SmiTagCheckOverflow(result, result, scratch);
1667 } else {
1668 __ SmiTagCheckOverflow(result, left, scratch);
1669 }
1670 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
1671 #endif
1672 } else {
1673 __ slwi(result, left, Operand(shift_count));
1674 #if V8_TARGET_ARCH_PPC64
1675 __ extsw(result, result);
1676 #endif
1677 }
1678 } else {
1679 __ Move(result, left);
1680 }
1681 break;
1682 default:
1683 UNREACHABLE();
1684 break;
1685 }
1686 }
1687 }
1688
1689
1690 void LCodeGen::DoSubI(LSubI* instr) {
1691 LOperand* right = instr->right();
1692 Register left = ToRegister(instr->left());
1693 Register result = ToRegister(instr->result());
1694 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1695 #if V8_TARGET_ARCH_PPC64
1696 const bool isInteger = !instr->hydrogen()->representation().IsSmi();
1697 #else
1698 const bool isInteger = false;
1699 #endif
1700 if (!can_overflow || isInteger) {
1701 if (right->IsConstantOperand()) {
1702 __ Add(result, left, -(ToOperand(right).immediate()), r0);
1703 } else {
1704 __ sub(result, left, EmitLoadRegister(right, ip));
1705 }
1706 #if V8_TARGET_ARCH_PPC64
1707 if (can_overflow) {
1708 __ TestIfInt32(result, r0);
1709 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
1710 }
1711 #endif
1712 } else {
1713 if (right->IsConstantOperand()) {
1714 __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
1715 scratch0(), r0);
1716 } else {
1717 __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
1718 scratch0(), r0);
1719 }
1720 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
1721 }
1722 }
1723
1724
1725 void LCodeGen::DoRSubI(LRSubI* instr) {
1726 LOperand* left = instr->left();
1727 LOperand* right = instr->right();
1728 LOperand* result = instr->result();
1729
1730 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
1731 right->IsConstantOperand());
1732
1733 Operand right_operand = ToOperand(right);
1734 if (is_int16(right_operand.immediate())) {
1735 __ subfic(ToRegister(result), ToRegister(left), right_operand);
1736 } else {
1737 __ mov(r0, right_operand);
1738 __ sub(ToRegister(result), r0, ToRegister(left));
1739 }
1740 }
1741
1742
1743 void LCodeGen::DoConstantI(LConstantI* instr) {
1744 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1745 }
1746
1747
1748 void LCodeGen::DoConstantS(LConstantS* instr) {
1749 __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
1750 }
1751
1752
1753 void LCodeGen::DoConstantD(LConstantD* instr) {
1754 DCHECK(instr->result()->IsDoubleRegister());
1755 DoubleRegister result = ToDoubleRegister(instr->result());
1756 #if V8_HOST_ARCH_IA32
1757 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1758 // builds.
1759 uint64_t bits = instr->bits();
1760 if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
1761 V8_UINT64_C(0x7FF0000000000000)) {
1762 uint32_t lo = static_cast<uint32_t>(bits);
1763 uint32_t hi = static_cast<uint32_t>(bits >> 32);
1764 __ mov(ip, Operand(lo));
1765 __ mov(scratch0(), Operand(hi));
1766 __ MovInt64ToDouble(result, scratch0(), ip);
1767 return;
1768 }
1769 #endif
1770 double v = instr->value();
1771 __ LoadDoubleLiteral(result, v, scratch0());
1772 }
1773
1774
1775 void LCodeGen::DoConstantE(LConstantE* instr) {
1776 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1777 }
1778
1779
1780 void LCodeGen::DoConstantT(LConstantT* instr) {
1781 Handle<Object> object = instr->value(isolate());
1782 AllowDeferredHandleDereference smi_check;
1783 __ Move(ToRegister(instr->result()), object);
1784 }
1785
1786
1787 MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
1788 String::Encoding encoding) {
1789 if (index->IsConstantOperand()) {
1790 int offset = ToInteger32(LConstantOperand::cast(index));
1791 if (encoding == String::TWO_BYTE_ENCODING) {
1792 offset *= kUC16Size;
1793 }
1794 STATIC_ASSERT(kCharSize == 1);
1795 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1796 }
1797 Register scratch = scratch0();
1798 DCHECK(!scratch.is(string));
1799 DCHECK(!scratch.is(ToRegister(index)));
1800 if (encoding == String::ONE_BYTE_ENCODING) {
1801 __ add(scratch, string, ToRegister(index));
1802 } else {
1803 STATIC_ASSERT(kUC16Size == 2);
1804 __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
1805 __ add(scratch, string, scratch);
1806 }
1807 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1808 }
1809
1810
1811 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1812 String::Encoding encoding = instr->hydrogen()->encoding();
1813 Register string = ToRegister(instr->string());
1814 Register result = ToRegister(instr->result());
1815
1816 if (FLAG_debug_code) {
1817 Register scratch = scratch0();
1818 __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1819 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1820
1821 __ andi(scratch, scratch,
1822 Operand(kStringRepresentationMask | kStringEncodingMask));
1823 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1824 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1825 __ cmpi(scratch,
1826 Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
1827 : two_byte_seq_type));
1828 __ Check(eq, kUnexpectedStringType);
1829 }
1830
1831 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1832 if (encoding == String::ONE_BYTE_ENCODING) {
1833 __ lbz(result, operand);
1834 } else {
1835 __ lhz(result, operand);
1836 }
1837 }
1838
1839
1840 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1841 String::Encoding encoding = instr->hydrogen()->encoding();
1842 Register string = ToRegister(instr->string());
1843 Register value = ToRegister(instr->value());
1844
1845 if (FLAG_debug_code) {
1846 Register index = ToRegister(instr->index());
1847 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1848 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1849 int encoding_mask =
1850 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1851 ? one_byte_seq_type
1852 : two_byte_seq_type;
1853 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1854 }
1855
1856 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1857 if (encoding == String::ONE_BYTE_ENCODING) {
1858 __ stb(value, operand);
1859 } else {
1860 __ sth(value, operand);
1861 }
1862 }
1863
1864
1865 void LCodeGen::DoAddI(LAddI* instr) {
1866 LOperand* right = instr->right();
1867 Register left = ToRegister(instr->left());
1868 Register result = ToRegister(instr->result());
1869 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1870 #if V8_TARGET_ARCH_PPC64
1871 const bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
1872 instr->hydrogen()->representation().IsExternal());
1873 #else
1874 const bool isInteger = false;
1875 #endif
1876
1877 if (!can_overflow || isInteger) {
1878 if (right->IsConstantOperand()) {
1879 __ Add(result, left, ToOperand(right).immediate(), r0);
1880 } else {
1881 __ add(result, left, EmitLoadRegister(right, ip));
1882 }
1883 #if V8_TARGET_ARCH_PPC64
1884 if (can_overflow) {
1885 __ TestIfInt32(result, r0);
1886 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
1887 }
1888 #endif
1889 } else {
1890 if (right->IsConstantOperand()) {
1891 __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
1892 scratch0(), r0);
1893 } else {
1894 __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
1895 scratch0(), r0);
1896 }
1897 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
1898 }
1899 }
1900
1901
1902 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1903 LOperand* left = instr->left();
1904 LOperand* right = instr->right();
1905 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1906 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
1907 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1908 Register left_reg = ToRegister(left);
1909 Register right_reg = EmitLoadRegister(right, ip);
1910 Register result_reg = ToRegister(instr->result());
1911 Label return_left, done;
1912 #if V8_TARGET_ARCH_PPC64
1913 if (instr->hydrogen_value()->representation().IsSmi()) {
1914 #endif
1915 __ cmp(left_reg, right_reg);
1916 #if V8_TARGET_ARCH_PPC64
1917 } else {
1918 __ cmpw(left_reg, right_reg);
1919 }
1920 #endif
1921 if (CpuFeatures::IsSupported(ISELECT)) {
1922 __ isel(cond, result_reg, left_reg, right_reg);
1923 } else {
1924 __ b(cond, &return_left);
1925 __ Move(result_reg, right_reg);
1926 __ b(&done);
1927 __ bind(&return_left);
1928 __ Move(result_reg, left_reg);
1929 __ bind(&done);
1930 }
1931 } else {
1932 DCHECK(instr->hydrogen()->representation().IsDouble());
1933 DoubleRegister left_reg = ToDoubleRegister(left);
1934 DoubleRegister right_reg = ToDoubleRegister(right);
1935 DoubleRegister result_reg = ToDoubleRegister(instr->result());
1936 Label check_nan_left, check_zero, return_left, return_right, done;
1937 __ fcmpu(left_reg, right_reg);
1938 __ bunordered(&check_nan_left);
1939 __ beq(&check_zero);
1940 __ b(cond, &return_left);
1941 __ b(&return_right);
1942
1943 __ bind(&check_zero);
1944 __ fcmpu(left_reg, kDoubleRegZero);
1945 __ bne(&return_left); // left == right != 0.
1946
1947 // At this point, both left and right are either 0 or -0.
1948 if (operation == HMathMinMax::kMathMin) {
1949 // Min: The algorithm is: -((-L) + (-R)), which in case of L and R being
1950 // different registers is most efficiently expressed as -((-L) - R).
1951 __ fneg(left_reg, left_reg);
1952 if (left_reg.is(right_reg)) {
1953 __ fadd(result_reg, left_reg, right_reg);
1954 } else {
1955 __ fsub(result_reg, left_reg, right_reg);
1956 }
1957 __ fneg(result_reg, result_reg);
1958 } else {
1959 // Max: The following works because +0 + -0 == +0
1960 __ fadd(result_reg, left_reg, right_reg);
1961 }
1962 __ b(&done);
1963
1964 __ bind(&check_nan_left);
1965 __ fcmpu(left_reg, left_reg);
1966 __ bunordered(&return_left); // left == NaN.
1967
1968 __ bind(&return_right);
1969 if (!right_reg.is(result_reg)) {
1970 __ fmr(result_reg, right_reg);
1971 }
1972 __ b(&done);
1973
1974 __ bind(&return_left);
1975 if (!left_reg.is(result_reg)) {
1976 __ fmr(result_reg, left_reg);
1977 }
1978 __ bind(&done);
1979 }
1980 }
1981
1982
1983 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1984 DoubleRegister left = ToDoubleRegister(instr->left());
1985 DoubleRegister right = ToDoubleRegister(instr->right());
1986 DoubleRegister result = ToDoubleRegister(instr->result());
1987 switch (instr->op()) {
1988 case Token::ADD:
1989 __ fadd(result, left, right);
1990 break;
1991 case Token::SUB:
1992 __ fsub(result, left, right);
1993 break;
1994 case Token::MUL:
1995 __ fmul(result, left, right);
1996 break;
1997 case Token::DIV:
1998 __ fdiv(result, left, right);
1999 break;
2000 case Token::MOD: {
2001 __ PrepareCallCFunction(0, 2, scratch0());
2002 __ MovToFloatParameters(left, right);
2003 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
2004 0, 2);
2005 // Move the result in the double result register.
2006 __ MovFromFloatResult(result);
2007 break;
2008 }
2009 default:
2010 UNREACHABLE();
2011 break;
2012 }
2013 }
2014
2015
2016 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2017 DCHECK(ToRegister(instr->context()).is(cp));
2018 DCHECK(ToRegister(instr->left()).is(r4));
2019 DCHECK(ToRegister(instr->right()).is(r3));
2020 DCHECK(ToRegister(instr->result()).is(r3));
2021
2022 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
2023 CallCode(code, RelocInfo::CODE_TARGET, instr);
2024 }
2025
2026
2027 template <class InstrType>
2028 void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
2029 int left_block = instr->TrueDestination(chunk_);
2030 int right_block = instr->FalseDestination(chunk_);
2031
2032 int next_block = GetNextEmittedBlock();
2033
2034 if (right_block == left_block || cond == al) {
2035 EmitGoto(left_block);
2036 } else if (left_block == next_block) {
2037 __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
2038 } else if (right_block == next_block) {
2039 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2040 } else {
2041 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2042 __ b(chunk_->GetAssemblyLabel(right_block));
2043 }
2044 }
2045
2046
2047 template <class InstrType>
2048 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond, CRegister cr) {
2049 int true_block = instr->TrueDestination(chunk_);
2050 __ b(cond, chunk_->GetAssemblyLabel(true_block), cr);
2051 }
2052
2053
2054 template <class InstrType>
2055 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
2056 int false_block = instr->FalseDestination(chunk_);
2057 __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
2058 }
2059
2060
2061 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
2062
2063
2064 void LCodeGen::DoBranch(LBranch* instr) {
2065 Representation r = instr->hydrogen()->value()->representation();
2066 DoubleRegister dbl_scratch = double_scratch0();
2067 const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
2068 1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
2069
2070 if (r.IsInteger32()) {
2071 DCHECK(!info()->IsStub());
2072 Register reg = ToRegister(instr->value());
2073 __ cmpwi(reg, Operand::Zero());
2074 EmitBranch(instr, ne);
2075 } else if (r.IsSmi()) {
2076 DCHECK(!info()->IsStub());
2077 Register reg = ToRegister(instr->value());
2078 __ cmpi(reg, Operand::Zero());
2079 EmitBranch(instr, ne);
2080 } else if (r.IsDouble()) {
2081 DCHECK(!info()->IsStub());
2082 DoubleRegister reg = ToDoubleRegister(instr->value());
2083 // Test the double value. Zero and NaN are false.
2084 __ fcmpu(reg, kDoubleRegZero, cr7);
2085 __ mfcr(r0);
2086 __ andi(r0, r0, Operand(crZOrNaNBits));
2087 EmitBranch(instr, eq, cr0);
2088 } else {
2089 DCHECK(r.IsTagged());
2090 Register reg = ToRegister(instr->value());
2091 HType type = instr->hydrogen()->value()->type();
2092 if (type.IsBoolean()) {
2093 DCHECK(!info()->IsStub());
2094 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2095 EmitBranch(instr, eq);
2096 } else if (type.IsSmi()) {
2097 DCHECK(!info()->IsStub());
2098 __ cmpi(reg, Operand::Zero());
2099 EmitBranch(instr, ne);
2100 } else if (type.IsJSArray()) {
2101 DCHECK(!info()->IsStub());
2102 EmitBranch(instr, al);
2103 } else if (type.IsHeapNumber()) {
2104 DCHECK(!info()->IsStub());
2105 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2106 // Test the double value. Zero and NaN are false.
2107 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2108 __ mfcr(r0);
2109 __ andi(r0, r0, Operand(crZOrNaNBits));
2110 EmitBranch(instr, eq, cr0);
2111 } else if (type.IsString()) {
2112 DCHECK(!info()->IsStub());
2113 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2114 __ cmpi(ip, Operand::Zero());
2115 EmitBranch(instr, ne);
2116 } else {
2117 ToBooleanHints expected = instr->hydrogen()->expected_input_types();
2118 // Avoid deopts in the case where we've never executed this path before.
2119 if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
2120
2121 if (expected & ToBooleanHint::kUndefined) {
2122 // undefined -> false.
2123 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2124 __ beq(instr->FalseLabel(chunk_));
2125 }
2126 if (expected & ToBooleanHint::kBoolean) {
2127 // Boolean -> its value.
2128 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2129 __ beq(instr->TrueLabel(chunk_));
2130 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2131 __ beq(instr->FalseLabel(chunk_));
2132 }
2133 if (expected & ToBooleanHint::kNull) {
2134 // 'null' -> false.
2135 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2136 __ beq(instr->FalseLabel(chunk_));
2137 }
2138
2139 if (expected & ToBooleanHint::kSmallInteger) {
2140 // Smis: 0 -> false, all other -> true.
2141 __ cmpi(reg, Operand::Zero());
2142 __ beq(instr->FalseLabel(chunk_));
2143 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2144 } else if (expected & ToBooleanHint::kNeedsMap) {
2145 // If we need a map later and have a Smi -> deopt.
2146 __ TestIfSmi(reg, r0);
2147 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
2148 }
2149
2150 const Register map = scratch0();
2151 if (expected & ToBooleanHint::kNeedsMap) {
2152 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2153
2154 if (expected & ToBooleanHint::kCanBeUndetectable) {
2155 // Undetectable -> false.
2156 __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2157 __ TestBit(ip, Map::kIsUndetectable, r0);
2158 __ bne(instr->FalseLabel(chunk_), cr0);
2159 }
2160 }
2161
2162 if (expected & ToBooleanHint::kReceiver) {
2163 // spec object -> true.
2164 __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
2165 __ bge(instr->TrueLabel(chunk_));
2166 }
2167
2168 if (expected & ToBooleanHint::kString) {
2169 // String value -> false iff empty.
2170 Label not_string;
2171 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2172 __ bge(¬_string);
2173 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2174 __ cmpi(ip, Operand::Zero());
2175 __ bne(instr->TrueLabel(chunk_));
2176 __ b(instr->FalseLabel(chunk_));
2177 __ bind(¬_string);
2178 }
2179
2180 if (expected & ToBooleanHint::kSymbol) {
2181 // Symbol value -> true.
2182 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2183 __ beq(instr->TrueLabel(chunk_));
2184 }
2185
2186 if (expected & ToBooleanHint::kSimdValue) {
2187 // SIMD value -> true.
2188 Label not_simd;
2189 __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
2190 __ beq(instr->TrueLabel(chunk_));
2191 }
2192
2193 if (expected & ToBooleanHint::kHeapNumber) {
2194 // heap number -> false iff +0, -0, or NaN.
2195 Label not_heap_number;
2196 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2197 __ bne(¬_heap_number);
2198 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2199 // Test the double value. Zero and NaN are false.
2200 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2201 __ mfcr(r0);
2202 __ andi(r0, r0, Operand(crZOrNaNBits));
2203 __ bne(instr->FalseLabel(chunk_), cr0);
2204 __ b(instr->TrueLabel(chunk_));
2205 __ bind(¬_heap_number);
2206 }
2207
2208 if (expected != ToBooleanHint::kAny) {
2209 // We've seen something for the first time -> deopt.
2210 // This can only happen if we are not generic already.
2211 DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
2212 }
2213 }
2214 }
2215 }
2216
2217
2218 void LCodeGen::EmitGoto(int block) {
2219 if (!IsNextEmittedBlock(block)) {
2220 __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
2221 }
2222 }
2223
2224
2225 void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
2226
2227
2228 Condition LCodeGen::TokenToCondition(Token::Value op) {
2229 Condition cond = kNoCondition;
2230 switch (op) {
2231 case Token::EQ:
2232 case Token::EQ_STRICT:
2233 cond = eq;
2234 break;
2235 case Token::NE:
2236 case Token::NE_STRICT:
2237 cond = ne;
2238 break;
2239 case Token::LT:
2240 cond = lt;
2241 break;
2242 case Token::GT:
2243 cond = gt;
2244 break;
2245 case Token::LTE:
2246 cond = le;
2247 break;
2248 case Token::GTE:
2249 cond = ge;
2250 break;
2251 case Token::IN:
2252 case Token::INSTANCEOF:
2253 default:
2254 UNREACHABLE();
2255 }
2256 return cond;
2257 }
2258
2259
2260 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2261 LOperand* left = instr->left();
2262 LOperand* right = instr->right();
2263 bool is_unsigned =
2264 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2265 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2266 Condition cond = TokenToCondition(instr->op());
2267
2268 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2269 // We can statically evaluate the comparison.
2270 double left_val = ToDouble(LConstantOperand::cast(left));
2271 double right_val = ToDouble(LConstantOperand::cast(right));
2272 int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2273 ? instr->TrueDestination(chunk_)
2274 : instr->FalseDestination(chunk_);
2275 EmitGoto(next_block);
2276 } else {
2277 if (instr->is_double()) {
2278 // Compare left and right operands as doubles and load the
2279 // resulting flags into the normal status register.
2280 __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
2281 // If a NaN is involved, i.e. the result is unordered,
2282 // jump to false block label.
2283 __ bunordered(instr->FalseLabel(chunk_));
2284 } else {
2285 if (right->IsConstantOperand()) {
2286 int32_t value = ToInteger32(LConstantOperand::cast(right));
2287 if (instr->hydrogen_value()->representation().IsSmi()) {
2288 if (is_unsigned) {
2289 __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2290 } else {
2291 __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2292 }
2293 } else {
2294 if (is_unsigned) {
2295 __ Cmplwi(ToRegister(left), Operand(value), r0);
2296 } else {
2297 __ Cmpwi(ToRegister(left), Operand(value), r0);
2298 }
2299 }
2300 } else if (left->IsConstantOperand()) {
2301 int32_t value = ToInteger32(LConstantOperand::cast(left));
2302 if (instr->hydrogen_value()->representation().IsSmi()) {
2303 if (is_unsigned) {
2304 __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2305 } else {
2306 __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2307 }
2308 } else {
2309 if (is_unsigned) {
2310 __ Cmplwi(ToRegister(right), Operand(value), r0);
2311 } else {
2312 __ Cmpwi(ToRegister(right), Operand(value), r0);
2313 }
2314 }
2315 // We commuted the operands, so commute the condition.
2316 cond = CommuteCondition(cond);
2317 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2318 if (is_unsigned) {
2319 __ cmpl(ToRegister(left), ToRegister(right));
2320 } else {
2321 __ cmp(ToRegister(left), ToRegister(right));
2322 }
2323 } else {
2324 if (is_unsigned) {
2325 __ cmplw(ToRegister(left), ToRegister(right));
2326 } else {
2327 __ cmpw(ToRegister(left), ToRegister(right));
2328 }
2329 }
2330 }
2331 EmitBranch(instr, cond);
2332 }
2333 }
2334
2335
2336 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2337 Register left = ToRegister(instr->left());
2338 Register right = ToRegister(instr->right());
2339
2340 __ cmp(left, right);
2341 EmitBranch(instr, eq);
2342 }
2343
2344
2345 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2346 if (instr->hydrogen()->representation().IsTagged()) {
2347 Register input_reg = ToRegister(instr->object());
2348 __ mov(ip, Operand(factory()->the_hole_value()));
2349 __ cmp(input_reg, ip);
2350 EmitBranch(instr, eq);
2351 return;
2352 }
2353
2354 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2355 __ fcmpu(input_reg, input_reg);
2356 EmitFalseBranch(instr, ordered);
2357
2358 Register scratch = scratch0();
2359 __ MovDoubleHighToInt(scratch, input_reg);
2360 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
2361 EmitBranch(instr, eq);
2362 }
2363
2364
2365 Condition LCodeGen::EmitIsString(Register input, Register temp1,
2366 Label* is_not_string,
2367 SmiCheck check_needed = INLINE_SMI_CHECK) {
2368 if (check_needed == INLINE_SMI_CHECK) {
2369 __ JumpIfSmi(input, is_not_string);
2370 }
2371 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2372
2373 return lt;
2374 }
2375
2376
2377 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2378 Register reg = ToRegister(instr->value());
2379 Register temp1 = ToRegister(instr->temp());
2380
2381 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2382 ? OMIT_SMI_CHECK
2383 : INLINE_SMI_CHECK;
2384 Condition true_cond =
2385 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2386
2387 EmitBranch(instr, true_cond);
2388 }
2389
2390
2391 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2392 Register input_reg = EmitLoadRegister(instr->value(), ip);
2393 __ TestIfSmi(input_reg, r0);
2394 EmitBranch(instr, eq, cr0);
2395 }
2396
2397
2398 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2399 Register input = ToRegister(instr->value());
2400 Register temp = ToRegister(instr->temp());
2401
2402 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2403 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2404 }
2405 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2406 __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2407 __ TestBit(temp, Map::kIsUndetectable, r0);
2408 EmitBranch(instr, ne, cr0);
2409 }
2410
2411
2412 static Condition ComputeCompareCondition(Token::Value op) {
2413 switch (op) {
2414 case Token::EQ_STRICT:
2415 case Token::EQ:
2416 return eq;
2417 case Token::LT:
2418 return lt;
2419 case Token::GT:
2420 return gt;
2421 case Token::LTE:
2422 return le;
2423 case Token::GTE:
2424 return ge;
2425 default:
2426 UNREACHABLE();
2427 return kNoCondition;
2428 }
2429 }
2430
2431
2432 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2433 DCHECK(ToRegister(instr->context()).is(cp));
2434 DCHECK(ToRegister(instr->left()).is(r4));
2435 DCHECK(ToRegister(instr->right()).is(r3));
2436
2437 Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
2438 CallCode(code, RelocInfo::CODE_TARGET, instr);
2439 __ CompareRoot(r3, Heap::kTrueValueRootIndex);
2440 EmitBranch(instr, eq);
2441 }
2442
2443
2444 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2445 InstanceType from = instr->from();
2446 InstanceType to = instr->to();
2447 if (from == FIRST_TYPE) return to;
2448 DCHECK(from == to || to == LAST_TYPE);
2449 return from;
2450 }
2451
2452
2453 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2454 InstanceType from = instr->from();
2455 InstanceType to = instr->to();
2456 if (from == to) return eq;
2457 if (to == LAST_TYPE) return ge;
2458 if (from == FIRST_TYPE) return le;
2459 UNREACHABLE();
2460 return eq;
2461 }
2462
2463
2464 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2465 Register scratch = scratch0();
2466 Register input = ToRegister(instr->value());
2467
2468 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2469 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2470 }
2471
2472 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2473 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2474 }
2475
2476 // Branches to a label or falls through with the answer in flags. Trashes
2477 // the temp registers, but not the input.
2478 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
2479 Handle<String> class_name, Register input,
2480 Register temp, Register temp2) {
2481 DCHECK(!input.is(temp));
2482 DCHECK(!input.is(temp2));
2483 DCHECK(!temp.is(temp2));
2484
2485 __ JumpIfSmi(input, is_false);
2486
2487 __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE);
2488 STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
2489 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2490 __ bge(is_true);
2491 } else {
2492 __ bge(is_false);
2493 }
2494
2495 // Check if the constructor in the map is a function.
2496 Register instance_type = ip;
2497 __ GetMapConstructor(temp, temp, temp2, instance_type);
2498
2499 // Objects with a non-function constructor have class 'Object'.
2500 __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
2501 if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
2502 __ bne(is_true);
2503 } else {
2504 __ bne(is_false);
2505 }
2506
2507 // temp now contains the constructor function. Grab the
2508 // instance class name from there.
2509 __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2510 __ LoadP(temp,
2511 FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
2512 // The class name we are testing against is internalized since it's a literal.
2513 // The name in the constructor is internalized because of the way the context
2514 // is booted. This routine isn't expected to work for random API-created
2515 // classes and it doesn't have to because you can't access it with natives
2516 // syntax. Since both sides are internalized it is sufficient to use an
2517 // identity comparison.
2518 __ Cmpi(temp, Operand(class_name), r0);
2519 // End with the answer in flags.
2520 }
2521
2522
2523 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2524 Register input = ToRegister(instr->value());
2525 Register temp = scratch0();
2526 Register temp2 = ToRegister(instr->temp());
2527 Handle<String> class_name = instr->hydrogen()->class_name();
2528
2529 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2530 class_name, input, temp, temp2);
2531
2532 EmitBranch(instr, eq);
2533 }
2534
2535
2536 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2537 Register reg = ToRegister(instr->value());
2538 Register temp = ToRegister(instr->temp());
2539
2540 __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2541 __ Cmpi(temp, Operand(instr->map()), r0);
2542 EmitBranch(instr, eq);
2543 }
2544
2545
2546 void LCodeGen::DoHasInPrototypeChainAndBranch(
2547 LHasInPrototypeChainAndBranch* instr) {
2548 Register const object = ToRegister(instr->object());
2549 Register const object_map = scratch0();
2550 Register const object_instance_type = ip;
2551 Register const object_prototype = object_map;
2552 Register const prototype = ToRegister(instr->prototype());
2553
2554 // The {object} must be a spec object. It's sufficient to know that {object}
2555 // is not a smi, since all other non-spec objects have {null} prototypes and
2556 // will be ruled out below.
2557 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2558 __ TestIfSmi(object, r0);
2559 EmitFalseBranch(instr, eq, cr0);
2560 }
2561
2562 // Loop through the {object}s prototype chain looking for the {prototype}.
2563 __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2564 Label loop;
2565 __ bind(&loop);
2566
2567 // Deoptimize if the object needs to be access checked.
2568 __ lbz(object_instance_type,
2569 FieldMemOperand(object_map, Map::kBitFieldOffset));
2570 __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
2571 DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0);
2572 // Deoptimize for proxies.
2573 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
2574 DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
2575 __ LoadP(object_prototype,
2576 FieldMemOperand(object_map, Map::kPrototypeOffset));
2577 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
2578 EmitFalseBranch(instr, eq);
2579 __ cmp(object_prototype, prototype);
2580 EmitTrueBranch(instr, eq);
2581 __ LoadP(object_map,
2582 FieldMemOperand(object_prototype, HeapObject::kMapOffset));
2583 __ b(&loop);
2584 }
2585
2586
2587 void LCodeGen::DoCmpT(LCmpT* instr) {
2588 DCHECK(ToRegister(instr->context()).is(cp));
2589 Token::Value op = instr->op();
2590
2591 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2592 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2593 // This instruction also signals no smi code inlined
2594 __ cmpi(r3, Operand::Zero());
2595
2596 Condition condition = ComputeCompareCondition(op);
2597 if (CpuFeatures::IsSupported(ISELECT)) {
2598 __ LoadRoot(r4, Heap::kTrueValueRootIndex);
2599 __ LoadRoot(r5, Heap::kFalseValueRootIndex);
2600 __ isel(condition, ToRegister(instr->result()), r4, r5);
2601 } else {
2602 Label true_value, done;
2603
2604 __ b(condition, &true_value);
2605
2606 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2607 __ b(&done);
2608
2609 __ bind(&true_value);
2610 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2611
2612 __ bind(&done);
2613 }
2614 }
2615
2616
2617 void LCodeGen::DoReturn(LReturn* instr) {
2618 if (FLAG_trace && info()->IsOptimizing()) {
2619 // Push the return value on the stack as the parameter.
2620 // Runtime::TraceExit returns its parameter in r3. We're leaving the code
2621 // managed by the register allocator and tearing down the frame, it's
2622 // safe to write to the context register.
2623 __ push(r3);
2624 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2625 __ CallRuntime(Runtime::kTraceExit);
2626 }
2627 if (info()->saves_caller_doubles()) {
2628 RestoreCallerDoubles();
2629 }
2630 if (instr->has_constant_parameter_count()) {
2631 int parameter_count = ToInteger32(instr->constant_parameter_count());
2632 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2633 if (NeedsEagerFrame()) {
2634 masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
2635 } else if (sp_delta != 0) {
2636 __ addi(sp, sp, Operand(sp_delta));
2637 }
2638 } else {
2639 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2640 Register reg = ToRegister(instr->parameter_count());
2641 // The argument count parameter is a smi
2642 if (NeedsEagerFrame()) {
2643 masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2644 }
2645 __ SmiToPtrArrayOffset(r0, reg);
2646 __ add(sp, sp, r0);
2647 }
2648
2649 __ blr();
2650 }
2651
2652
2653 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2654 Register context = ToRegister(instr->context());
2655 Register result = ToRegister(instr->result());
2656 __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
2657 if (instr->hydrogen()->RequiresHoleCheck()) {
2658 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2659 if (instr->hydrogen()->DeoptimizesOnHole()) {
2660 __ cmp(result, ip);
2661 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
2662 } else {
2663 if (CpuFeatures::IsSupported(ISELECT)) {
2664 Register scratch = scratch0();
2665 __ mov(scratch, Operand(factory()->undefined_value()));
2666 __ cmp(result, ip);
2667 __ isel(eq, result, scratch, result);
2668 } else {
2669 Label skip;
2670 __ cmp(result, ip);
2671 __ bne(&skip);
2672 __ mov(result, Operand(factory()->undefined_value()));
2673 __ bind(&skip);
2674 }
2675 }
2676 }
2677 }
2678
2679
2680 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2681 Register context = ToRegister(instr->context());
2682 Register value = ToRegister(instr->value());
2683 Register scratch = scratch0();
2684 MemOperand target = ContextMemOperand(context, instr->slot_index());
2685
2686 Label skip_assignment;
2687
2688 if (instr->hydrogen()->RequiresHoleCheck()) {
2689 __ LoadP(scratch, target);
2690 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2691 __ cmp(scratch, ip);
2692 if (instr->hydrogen()->DeoptimizesOnHole()) {
2693 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
2694 } else {
2695 __ bne(&skip_assignment);
2696 }
2697 }
2698
2699 __ StoreP(value, target, r0);
2700 if (instr->hydrogen()->NeedsWriteBarrier()) {
2701 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2702 ? OMIT_SMI_CHECK
2703 : INLINE_SMI_CHECK;
2704 __ RecordWriteContextSlot(context, target.offset(), value, scratch,
2705 GetLinkRegisterState(), kSaveFPRegs,
2706 EMIT_REMEMBERED_SET, check_needed);
2707 }
2708
2709 __ bind(&skip_assignment);
2710 }
2711
2712
2713 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2714 HObjectAccess access = instr->hydrogen()->access();
2715 int offset = access.offset();
2716 Register object = ToRegister(instr->object());
2717
2718 if (access.IsExternalMemory()) {
2719 Register result = ToRegister(instr->result());
2720 MemOperand operand = MemOperand(object, offset);
2721 __ LoadRepresentation(result, operand, access.representation(), r0);
2722 return;
2723 }
2724
2725 if (instr->hydrogen()->representation().IsDouble()) {
2726 DCHECK(access.IsInobject());
2727 DoubleRegister result = ToDoubleRegister(instr->result());
2728 __ lfd(result, FieldMemOperand(object, offset));
2729 return;
2730 }
2731
2732 Register result = ToRegister(instr->result());
2733 if (!access.IsInobject()) {
2734 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2735 object = result;
2736 }
2737
2738 Representation representation = access.representation();
2739
2740 #if V8_TARGET_ARCH_PPC64
2741 // 64-bit Smi optimization
2742 if (representation.IsSmi() &&
2743 instr->hydrogen()->representation().IsInteger32()) {
2744 // Read int value directly from upper half of the smi.
2745 offset = SmiWordOffset(offset);
2746 representation = Representation::Integer32();
2747 }
2748 #endif
2749
2750 __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
2751 r0);
2752 }
2753
2754
2755 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2756 Register scratch = scratch0();
2757 Register function = ToRegister(instr->function());
2758 Register result = ToRegister(instr->result());
2759
2760 // Get the prototype or initial map from the function.
2761 __ LoadP(result,
2762 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2763
2764 // Check that the function has a prototype or an initial map.
2765 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2766 __ cmp(result, ip);
2767 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
2768
2769 // If the function does not have an initial map, we're done.
2770 if (CpuFeatures::IsSupported(ISELECT)) {
2771 // Get the prototype from the initial map (optimistic).
2772 __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset));
2773 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2774 __ isel(eq, result, ip, result);
2775 } else {
2776 Label done;
2777 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2778 __ bne(&done);
2779
2780 // Get the prototype from the initial map.
2781 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2782
2783 // All done.
2784 __ bind(&done);
2785 }
2786 }
2787
2788
2789 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2790 Register result = ToRegister(instr->result());
2791 __ LoadRoot(result, instr->index());
2792 }
2793
2794
2795 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2796 Register arguments = ToRegister(instr->arguments());
2797 Register result = ToRegister(instr->result());
2798 // There are two words between the frame pointer and the last argument.
2799 // Subtracting from length accounts for one of them add one more.
2800 if (instr->length()->IsConstantOperand()) {
2801 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2802 if (instr->index()->IsConstantOperand()) {
2803 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2804 int index = (const_length - const_index) + 1;
2805 __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
2806 } else {
2807 Register index = ToRegister(instr->index());
2808 __ subfic(result, index, Operand(const_length + 1));
2809 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
2810 __ LoadPX(result, MemOperand(arguments, result));
2811 }
2812 } else if (instr->index()->IsConstantOperand()) {
2813 Register length = ToRegister(instr->length());
2814 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2815 int loc = const_index - 1;
2816 if (loc != 0) {
2817 __ subi(result, length, Operand(loc));
2818 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
2819 __ LoadPX(result, MemOperand(arguments, result));
2820 } else {
2821 __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
2822 __ LoadPX(result, MemOperand(arguments, result));
2823 }
2824 } else {
2825 Register length = ToRegister(instr->length());
2826 Register index = ToRegister(instr->index());
2827 __ sub(result, length, index);
2828 __ addi(result, result, Operand(1));
2829 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
2830 __ LoadPX(result, MemOperand(arguments, result));
2831 }
2832 }
2833
2834
2835 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2836 Register external_pointer = ToRegister(instr->elements());
2837 Register key = no_reg;
2838 ElementsKind elements_kind = instr->elements_kind();
2839 bool key_is_constant = instr->key()->IsConstantOperand();
2840 int constant_key = 0;
2841 if (key_is_constant) {
2842 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2843 if (constant_key & 0xF0000000) {
2844 Abort(kArrayIndexConstantValueTooBig);
2845 }
2846 } else {
2847 key = ToRegister(instr->key());
2848 }
2849 int element_size_shift = ElementsKindToShiftSize(elements_kind);
2850 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
2851 int base_offset = instr->base_offset();
2852
2853 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2854 DoubleRegister result = ToDoubleRegister(instr->result());
2855 if (key_is_constant) {
2856 __ Add(scratch0(), external_pointer, constant_key << element_size_shift,
2857 r0);
2858 } else {
2859 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
2860 __ add(scratch0(), external_pointer, r0);
2861 }
2862 if (elements_kind == FLOAT32_ELEMENTS) {
2863 __ lfs(result, MemOperand(scratch0(), base_offset));
2864 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2865 __ lfd(result, MemOperand(scratch0(), base_offset));
2866 }
2867 } else {
2868 Register result = ToRegister(instr->result());
2869 MemOperand mem_operand =
2870 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
2871 constant_key, element_size_shift, base_offset);
2872 switch (elements_kind) {
2873 case INT8_ELEMENTS:
2874 if (key_is_constant) {
2875 __ LoadByte(result, mem_operand, r0);
2876 } else {
2877 __ lbzx(result, mem_operand);
2878 }
2879 __ extsb(result, result);
2880 break;
2881 case UINT8_ELEMENTS:
2882 case UINT8_CLAMPED_ELEMENTS:
2883 if (key_is_constant) {
2884 __ LoadByte(result, mem_operand, r0);
2885 } else {
2886 __ lbzx(result, mem_operand);
2887 }
2888 break;
2889 case INT16_ELEMENTS:
2890 if (key_is_constant) {
2891 __ LoadHalfWordArith(result, mem_operand, r0);
2892 } else {
2893 __ lhax(result, mem_operand);
2894 }
2895 break;
2896 case UINT16_ELEMENTS:
2897 if (key_is_constant) {
2898 __ LoadHalfWord(result, mem_operand, r0);
2899 } else {
2900 __ lhzx(result, mem_operand);
2901 }
2902 break;
2903 case INT32_ELEMENTS:
2904 if (key_is_constant) {
2905 __ LoadWordArith(result, mem_operand, r0);
2906 } else {
2907 __ lwax(result, mem_operand);
2908 }
2909 break;
2910 case UINT32_ELEMENTS:
2911 if (key_is_constant) {
2912 __ LoadWord(result, mem_operand, r0);
2913 } else {
2914 __ lwzx(result, mem_operand);
2915 }
2916 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2917 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
2918 __ cmplw(result, r0);
2919 DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue);
2920 }
2921 break;
2922 case FLOAT32_ELEMENTS:
2923 case FLOAT64_ELEMENTS:
2924 case FAST_HOLEY_DOUBLE_ELEMENTS:
2925 case FAST_HOLEY_ELEMENTS:
2926 case FAST_HOLEY_SMI_ELEMENTS:
2927 case FAST_DOUBLE_ELEMENTS:
2928 case FAST_ELEMENTS:
2929 case FAST_SMI_ELEMENTS:
2930 case DICTIONARY_ELEMENTS:
2931 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2932 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
2933 case FAST_STRING_WRAPPER_ELEMENTS:
2934 case SLOW_STRING_WRAPPER_ELEMENTS:
2935 case NO_ELEMENTS:
2936 UNREACHABLE();
2937 break;
2938 }
2939 }
2940 }
2941
2942
2943 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2944 Register elements = ToRegister(instr->elements());
2945 bool key_is_constant = instr->key()->IsConstantOperand();
2946 Register key = no_reg;
2947 DoubleRegister result = ToDoubleRegister(instr->result());
2948 Register scratch = scratch0();
2949
2950 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2951 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
2952 int constant_key = 0;
2953 if (key_is_constant) {
2954 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2955 if (constant_key & 0xF0000000) {
2956 Abort(kArrayIndexConstantValueTooBig);
2957 }
2958 } else {
2959 key = ToRegister(instr->key());
2960 }
2961
2962 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
2963 if (!key_is_constant) {
2964 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
2965 __ add(scratch, elements, r0);
2966 elements = scratch;
2967 }
2968 if (!is_int16(base_offset)) {
2969 __ Add(scratch, elements, base_offset, r0);
2970 base_offset = 0;
2971 elements = scratch;
2972 }
2973 __ lfd(result, MemOperand(elements, base_offset));
2974
2975 if (instr->hydrogen()->RequiresHoleCheck()) {
2976 if (is_int16(base_offset + Register::kExponentOffset)) {
2977 __ lwz(scratch,
2978 MemOperand(elements, base_offset + Register::kExponentOffset));
2979 } else {
2980 __ addi(scratch, elements, Operand(base_offset));
2981 __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
2982 }
2983 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
2984 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
2985 }
2986 }
2987
2988
2989 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2990 HLoadKeyed* hinstr = instr->hydrogen();
2991 Register elements = ToRegister(instr->elements());
2992 Register result = ToRegister(instr->result());
2993 Register scratch = scratch0();
2994 Register store_base = scratch;
2995 int offset = instr->base_offset();
2996
2997 if (instr->key()->IsConstantOperand()) {
2998 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2999 offset += ToInteger32(const_operand) * kPointerSize;
3000 store_base = elements;
3001 } else {
3002 Register key = ToRegister(instr->key());
3003 // Even though the HLoadKeyed instruction forces the input
3004 // representation for the key to be an integer, the input gets replaced
3005 // during bound check elimination with the index argument to the bounds
3006 // check, which can be tagged, so that case must be handled here, too.
3007 if (hinstr->key()->representation().IsSmi()) {
3008 __ SmiToPtrArrayOffset(r0, key);
3009 } else {
3010 __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
3011 }
3012 __ add(scratch, elements, r0);
3013 }
3014
3015 bool requires_hole_check = hinstr->RequiresHoleCheck();
3016 Representation representation = hinstr->representation();
3017
3018 #if V8_TARGET_ARCH_PPC64
3019 // 64-bit Smi optimization
3020 if (representation.IsInteger32() &&
3021 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3022 DCHECK(!requires_hole_check);
3023 // Read int value directly from upper half of the smi.
3024 offset = SmiWordOffset(offset);
3025 }
3026 #endif
3027
3028 __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
3029 r0);
3030
3031 // Check for the hole value.
3032 if (requires_hole_check) {
3033 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3034 __ TestIfSmi(result, r0);
3035 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
3036 } else {
3037 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3038 __ cmp(result, scratch);
3039 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
3040 }
3041 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3042 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3043 Label done;
3044 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3045 __ cmp(result, scratch);
3046 __ bne(&done);
3047 if (info()->IsStub()) {
3048 // A stub can safely convert the hole to undefined only if the array
3049 // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
3050 // it needs to bail out.
3051 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3052 __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
3053 __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0);
3054 DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
3055 }
3056 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3057 __ bind(&done);
3058 }
3059 }
3060
3061
3062 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3063 if (instr->is_fixed_typed_array()) {
3064 DoLoadKeyedExternalArray(instr);
3065 } else if (instr->hydrogen()->representation().IsDouble()) {
3066 DoLoadKeyedFixedDoubleArray(instr);
3067 } else {
3068 DoLoadKeyedFixedArray(instr);
3069 }
3070 }
3071
3072
3073 MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
3074 bool key_is_constant, bool key_is_smi,
3075 int constant_key,
3076 int element_size_shift,
3077 int base_offset) {
3078 Register scratch = scratch0();
3079
3080 if (key_is_constant) {
3081 return MemOperand(base, (constant_key << element_size_shift) + base_offset);
3082 }
3083
3084 bool needs_shift =
3085 (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
3086
3087 if (!(base_offset || needs_shift)) {
3088 return MemOperand(base, key);
3089 }
3090
3091 if (needs_shift) {
3092 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
3093 key = scratch;
3094 }
3095
3096 if (base_offset) {
3097 __ Add(scratch, key, base_offset, r0);
3098 }
3099
3100 return MemOperand(base, scratch);
3101 }
3102
3103
3104 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3105 Register scratch = scratch0();
3106 Register result = ToRegister(instr->result());
3107
3108 if (instr->hydrogen()->from_inlined()) {
3109 __ subi(result, sp, Operand(2 * kPointerSize));
3110 } else if (instr->hydrogen()->arguments_adaptor()) {
3111 // Check if the calling frame is an arguments adaptor frame.
3112 __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3113 __ LoadP(
3114 result,
3115 MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
3116 __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3117
3118 // Result is the frame pointer for the frame if not adapted and for the real
3119 // frame below the adaptor frame if adapted.
3120 if (CpuFeatures::IsSupported(ISELECT)) {
3121 __ isel(eq, result, scratch, fp);
3122 } else {
3123 Label done, adapted;
3124 __ beq(&adapted);
3125 __ mr(result, fp);
3126 __ b(&done);
3127
3128 __ bind(&adapted);
3129 __ mr(result, scratch);
3130 __ bind(&done);
3131 }
3132 } else {
3133 __ mr(result, fp);
3134 }
3135 }
3136
3137
3138 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3139 Register elem = ToRegister(instr->elements());
3140 Register result = ToRegister(instr->result());
3141
3142 Label done;
3143
3144 // If no arguments adaptor frame the number of arguments is fixed.
3145 __ cmp(fp, elem);
3146 __ mov(result, Operand(scope()->num_parameters()));
3147 __ beq(&done);
3148
3149 // Arguments adaptor frame present. Get argument length from there.
3150 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3151 __ LoadP(result,
3152 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3153 __ SmiUntag(result);
3154
3155 // Argument length is in result register.
3156 __ bind(&done);
3157 }
3158
3159
3160 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3161 Register receiver = ToRegister(instr->receiver());
3162 Register function = ToRegister(instr->function());
3163 Register result = ToRegister(instr->result());
3164 Register scratch = scratch0();
3165
3166 // If the receiver is null or undefined, we have to pass the global
3167 // object as a receiver to normal functions. Values have to be
3168 // passed unchanged to builtins and strict-mode functions.
3169 Label global_object, result_in_receiver;
3170
3171 if (!instr->hydrogen()->known_function()) {
3172 // Do not transform the receiver to object for strict mode
3173 // functions or builtins.
3174 __ LoadP(scratch,
3175 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3176 __ lwz(scratch,
3177 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3178 __ andi(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
3179 (1 << SharedFunctionInfo::kNativeBit)));
3180 __ bne(&result_in_receiver, cr0);
3181 }
3182
3183 // Normal function. Replace undefined or null with global receiver.
3184 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3185 __ cmp(receiver, scratch);
3186 __ beq(&global_object);
3187 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3188 __ cmp(receiver, scratch);
3189 __ beq(&global_object);
3190
3191 // Deoptimize if the receiver is not a JS object.
3192 __ TestIfSmi(receiver, r0);
3193 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
3194 __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
3195 DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
3196
3197 __ b(&result_in_receiver);
3198 __ bind(&global_object);
3199 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
3200 __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3201 __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3202
3203 if (result.is(receiver)) {
3204 __ bind(&result_in_receiver);
3205 } else {
3206 Label result_ok;
3207 __ b(&result_ok);
3208 __ bind(&result_in_receiver);
3209 __ mr(result, receiver);
3210 __ bind(&result_ok);
3211 }
3212 }
3213
3214
3215 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3216 Register receiver = ToRegister(instr->receiver());
3217 Register function = ToRegister(instr->function());
3218 Register length = ToRegister(instr->length());
3219 Register elements = ToRegister(instr->elements());
3220 Register scratch = scratch0();
3221 DCHECK(receiver.is(r3)); // Used for parameter count.
3222 DCHECK(function.is(r4)); // Required by InvokeFunction.
3223 DCHECK(ToRegister(instr->result()).is(r3));
3224
3225 // Copy the arguments to this function possibly from the
3226 // adaptor frame below it.
3227 const uint32_t kArgumentsLimit = 1 * KB;
3228 __ cmpli(length, Operand(kArgumentsLimit));
3229 DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments);
3230
3231 // Push the receiver and use the register to keep the original
3232 // number of arguments.
3233 __ push(receiver);
3234 __ mr(receiver, length);
3235 // The arguments are at a one pointer size offset from elements.
3236 __ addi(elements, elements, Operand(1 * kPointerSize));
3237
3238 // Loop through the arguments pushing them onto the execution
3239 // stack.
3240 Label invoke, loop;
3241 // length is a small non-negative integer, due to the test above.
3242 __ cmpi(length, Operand::Zero());
3243 __ beq(&invoke);
3244 __ mtctr(length);
3245 __ bind(&loop);
3246 __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
3247 __ LoadPX(scratch, MemOperand(elements, r0));
3248 __ push(scratch);
3249 __ addi(length, length, Operand(-1));
3250 __ bdnz(&loop);
3251
3252 __ bind(&invoke);
3253
3254 InvokeFlag flag = CALL_FUNCTION;
3255 if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
3256 DCHECK(!info()->saves_caller_doubles());
3257 // TODO(ishell): drop current frame before pushing arguments to the stack.
3258 flag = JUMP_FUNCTION;
3259 ParameterCount actual(r3);
3260 // It is safe to use r6, r7 and r8 as scratch registers here given that
3261 // 1) we are not going to return to caller function anyway,
3262 // 2) r6 (new.target) will be initialized below.
3263 PrepareForTailCall(actual, r6, r7, r8);
3264 }
3265
3266 DCHECK(instr->HasPointerMap());
3267 LPointerMap* pointers = instr->pointer_map();
3268 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3269 // The number of arguments is stored in receiver which is r3, as expected
3270 // by InvokeFunction.
3271 ParameterCount actual(receiver);
3272 __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
3273 }
3274
3275
3276 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3277 LOperand* argument = instr->value();
3278 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3279 Abort(kDoPushArgumentNotImplementedForDoubleType);
3280 } else {
3281 Register argument_reg = EmitLoadRegister(argument, ip);
3282 __ push(argument_reg);
3283 }
3284 }
3285
3286
3287 void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
3288
3289
3290 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3291 Register result = ToRegister(instr->result());
3292 __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3293 }
3294
3295
3296 void LCodeGen::DoContext(LContext* instr) {
3297 // If there is a non-return use, the context must be moved to a register.
3298 Register result = ToRegister(instr->result());
3299 if (info()->IsOptimizing()) {
3300 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3301 } else {
3302 // If there is no frame, the context must be in cp.
3303 DCHECK(result.is(cp));
3304 }
3305 }
3306
3307
3308 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3309 DCHECK(ToRegister(instr->context()).is(cp));
3310 __ Move(scratch0(), instr->hydrogen()->pairs());
3311 __ push(scratch0());
3312 __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
3313 __ push(scratch0());
3314 __ Move(scratch0(), instr->hydrogen()->feedback_vector());
3315 __ push(scratch0());
3316 CallRuntime(Runtime::kDeclareGlobals, instr);
3317 }
3318
3319 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3320 int formal_parameter_count, int arity,
3321 bool is_tail_call, LInstruction* instr) {
3322 bool dont_adapt_arguments =
3323 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3324 bool can_invoke_directly =
3325 dont_adapt_arguments || formal_parameter_count == arity;
3326
3327 Register function_reg = r4;
3328
3329 LPointerMap* pointers = instr->pointer_map();
3330
3331 if (can_invoke_directly) {
3332 // Change context.
3333 __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3334
3335 // Always initialize new target and number of actual arguments.
3336 __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
3337 __ mov(r3, Operand(arity));
3338
3339 bool is_self_call = function.is_identical_to(info()->closure());
3340
3341 // Invoke function.
3342 if (is_self_call) {
3343 Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
3344 if (is_tail_call) {
3345 __ Jump(self, RelocInfo::CODE_TARGET);
3346 } else {
3347 __ Call(self, RelocInfo::CODE_TARGET);
3348 }
3349 } else {
3350 __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3351 if (is_tail_call) {
3352 __ JumpToJSEntry(ip);
3353 } else {
3354 __ CallJSEntry(ip);
3355 }
3356 }
3357
3358 if (!is_tail_call) {
3359 // Set up deoptimization.
3360 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3361 }
3362 } else {
3363 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3364 ParameterCount actual(arity);
3365 ParameterCount expected(formal_parameter_count);
3366 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3367 __ InvokeFunction(function_reg, expected, actual, flag, generator);
3368 }
3369 }
3370
3371
3372 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3373 DCHECK(instr->context() != NULL);
3374 DCHECK(ToRegister(instr->context()).is(cp));
3375 Register input = ToRegister(instr->value());
3376 Register result = ToRegister(instr->result());
3377 Register scratch = scratch0();
3378
3379 // Deoptimize if not a heap number.
3380 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3381 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3382 __ cmp(scratch, ip);
3383 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
3384
3385 Label done;
3386 Register exponent = scratch0();
3387 scratch = no_reg;
3388 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3389 // Check the sign of the argument. If the argument is positive, just
3390 // return it.
3391 __ cmpwi(exponent, Operand::Zero());
3392 // Move the input to the result if necessary.
3393 __ Move(result, input);
3394 __ bge(&done);
3395
3396 // Input is negative. Reverse its sign.
3397 // Preserve the value of all registers.
3398 {
3399 PushSafepointRegistersScope scope(this);
3400
3401 // Registers were saved at the safepoint, so we can use
3402 // many scratch registers.
3403 Register tmp1 = input.is(r4) ? r3 : r4;
3404 Register tmp2 = input.is(r5) ? r3 : r5;
3405 Register tmp3 = input.is(r6) ? r3 : r6;
3406 Register tmp4 = input.is(r7) ? r3 : r7;
3407
3408 // exponent: floating point exponent value.
3409
3410 Label allocated, slow;
3411 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3412 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3413 __ b(&allocated);
3414
3415 // Slow case: Call the runtime system to do the number allocation.
3416 __ bind(&slow);
3417
3418 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3419 instr->context());
3420 // Set the pointer to the new heap number in tmp.
3421 if (!tmp1.is(r3)) __ mr(tmp1, r3);
3422 // Restore input_reg after call to runtime.
3423 __ LoadFromSafepointRegisterSlot(input, input);
3424 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3425
3426 __ bind(&allocated);
3427 // exponent: floating point exponent value.
3428 // tmp1: allocated heap number.
3429 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
3430 __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit
3431 __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3432 __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3433 __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3434
3435 __ StoreToSafepointRegisterSlot(tmp1, result);
3436 }
3437
3438 __ bind(&done);
3439 }
3440
3441
3442 void LCodeGen::EmitMathAbs(LMathAbs* instr) {
3443 Register input = ToRegister(instr->value());
3444 Register result = ToRegister(instr->result());
3445 Label done;
3446 __ cmpi(input, Operand::Zero());
3447 __ Move(result, input);
3448 __ bge(&done);
3449 __ li(r0, Operand::Zero()); // clear xer
3450 __ mtxer(r0);
3451 __ neg(result, result, SetOE, SetRC);
3452 // Deoptimize on overflow.
3453 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
3454 __ bind(&done);
3455 }
3456
3457
3458 #if V8_TARGET_ARCH_PPC64
3459 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
3460 Register input = ToRegister(instr->value());
3461 Register result = ToRegister(instr->result());
3462 Label done;
3463 __ cmpwi(input, Operand::Zero());
3464 __ Move(result, input);
3465 __ bge(&done);
3466
3467 // Deoptimize on overflow.
3468 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3469 __ cmpw(input, r0);
3470 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
3471
3472 __ neg(result, result);
3473 __ bind(&done);
3474 }
3475 #endif
3476
3477
3478 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3479 // Class for deferred case.
3480 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3481 public:
3482 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3483 : LDeferredCode(codegen), instr_(instr) {}
3484 void Generate() override {
3485 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3486 }
3487 LInstruction* instr() override { return instr_; }
3488
3489 private:
3490 LMathAbs* instr_;
3491 };
3492
3493 Representation r = instr->hydrogen()->value()->representation();
3494 if (r.IsDouble()) {
3495 DoubleRegister input = ToDoubleRegister(instr->value());
3496 DoubleRegister result = ToDoubleRegister(instr->result());
3497 __ fabs(result, input);
3498 #if V8_TARGET_ARCH_PPC64
3499 } else if (r.IsInteger32()) {
3500 EmitInteger32MathAbs(instr);
3501 } else if (r.IsSmi()) {
3502 #else
3503 } else if (r.IsSmiOrInteger32()) {
3504 #endif
3505 EmitMathAbs(instr);
3506 } else {
3507 // Representation is tagged.
3508 DeferredMathAbsTaggedHeapNumber* deferred =
3509 new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3510 Register input = ToRegister(instr->value());
3511 // Smi check.
3512 __ JumpIfNotSmi(input, deferred->entry());
3513 // If smi, handle it directly.
3514 EmitMathAbs(instr);
3515 __ bind(deferred->exit());
3516 }
3517 }
3518
3519 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
3520 DoubleRegister input_reg = ToDoubleRegister(instr->value());
3521 DoubleRegister output_reg = ToDoubleRegister(instr->result());
3522 __ frim(output_reg, input_reg);
3523 }
3524
3525 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3526 DoubleRegister input = ToDoubleRegister(instr->value());
3527 Register result = ToRegister(instr->result());
3528 Register input_high = scratch0();
3529 Register scratch = ip;
3530 Label done, exact;
3531
3532 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
3533 &exact);
3534 DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
3535
3536 __ bind(&exact);
3537 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3538 // Test for -0.
3539 __ cmpi(result, Operand::Zero());
3540 __ bne(&done);
3541 __ cmpwi(input_high, Operand::Zero());
3542 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
3543 }
3544 __ bind(&done);
3545 }
3546
3547 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
3548 DoubleRegister input_reg = ToDoubleRegister(instr->value());
3549 DoubleRegister output_reg = ToDoubleRegister(instr->result());
3550 DoubleRegister dot_five = double_scratch0();
3551 Label done;
3552
3553 __ frin(output_reg, input_reg);
3554 __ fcmpu(input_reg, kDoubleRegZero);
3555 __ bge(&done);
3556 __ fcmpu(output_reg, input_reg);
3557 __ beq(&done);
3558
3559 // Negative, non-integer case
3560 __ LoadDoubleLiteral(dot_five, 0.5, r0);
3561 __ fadd(output_reg, input_reg, dot_five);
3562 __ frim(output_reg, output_reg);
3563 // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
3564 __ fabs(output_reg, output_reg);
3565 __ fneg(output_reg, output_reg);
3566
3567 __ bind(&done);
3568 }
3569
3570 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
3571 DoubleRegister input = ToDoubleRegister(instr->value());
3572 Register result = ToRegister(instr->result());
3573 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3574 DoubleRegister input_plus_dot_five = double_scratch1;
3575 Register scratch1 = scratch0();
3576 Register scratch2 = ip;
3577 DoubleRegister dot_five = double_scratch0();
3578 Label convert, done;
3579
3580 __ LoadDoubleLiteral(dot_five, 0.5, r0);
3581 __ fabs(double_scratch1, input);
3582 __ fcmpu(double_scratch1, dot_five);
3583 DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN);
3584 // If input is in [-0.5, -0], the result is -0.
3585 // If input is in [+0, +0.5[, the result is +0.
3586 // If the input is +0.5, the result is 1.
3587 __ bgt(&convert); // Out of [-0.5, +0.5].
3588 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3589 // [-0.5, -0] (negative) yields minus zero.
3590 __ TestDoubleSign(input, scratch1);
3591 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
3592 }
3593 __ fcmpu(input, dot_five);
3594 if (CpuFeatures::IsSupported(ISELECT)) {
3595 __ li(result, Operand(1));
3596 __ isel(lt, result, r0, result);
3597 __ b(&done);
3598 } else {
3599 Label return_zero;
3600 __ bne(&return_zero);
3601 __ li(result, Operand(1)); // +0.5.
3602 __ b(&done);
3603 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3604 // flag kBailoutOnMinusZero.
3605 __ bind(&return_zero);
3606 __ li(result, Operand::Zero());
3607 __ b(&done);
3608 }
3609
3610 __ bind(&convert);
3611 __ fadd(input_plus_dot_five, input, dot_five);
3612 // Reuse dot_five (double_scratch0) as we no longer need this value.
3613 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
3614 double_scratch0(), &done, &done);
3615 DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
3616 __ bind(&done);
3617 }
3618
3619
3620 void LCodeGen::DoMathFround(LMathFround* instr) {
3621 DoubleRegister input_reg = ToDoubleRegister(instr->value());
3622 DoubleRegister output_reg = ToDoubleRegister(instr->result());
3623 __ frsp(output_reg, input_reg);
3624 }
3625
3626
3627 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3628 DoubleRegister input = ToDoubleRegister(instr->value());
3629 DoubleRegister result = ToDoubleRegister(instr->result());
3630 __ fsqrt(result, input);
3631 }
3632
3633
3634 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3635 DoubleRegister input = ToDoubleRegister(instr->value());
3636 DoubleRegister result = ToDoubleRegister(instr->result());
3637 DoubleRegister temp = double_scratch0();
3638
3639 // Note that according to ECMA-262 15.8.2.13:
3640 // Math.pow(-Infinity, 0.5) == Infinity
3641 // Math.sqrt(-Infinity) == NaN
3642 Label skip, done;
3643
3644 __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
3645 __ fcmpu(input, temp);
3646 __ bne(&skip);
3647 __ fneg(result, temp);
3648 __ b(&done);
3649
3650 // Add +0 to convert -0 to +0.
3651 __ bind(&skip);
3652 __ fadd(result, input, kDoubleRegZero);
3653 __ fsqrt(result, result);
3654 __ bind(&done);
3655 }
3656
3657
3658 void LCodeGen::DoPower(LPower* instr) {
3659 Representation exponent_type = instr->hydrogen()->right()->representation();
3660 // Having marked this as a call, we can use any registers.
3661 // Just make sure that the input/output registers are the expected ones.
3662 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3663 DCHECK(!instr->right()->IsDoubleRegister() ||
3664 ToDoubleRegister(instr->right()).is(d2));
3665 DCHECK(!instr->right()->IsRegister() ||
3666 ToRegister(instr->right()).is(tagged_exponent));
3667 DCHECK(ToDoubleRegister(instr->left()).is(d1));
3668 DCHECK(ToDoubleRegister(instr->result()).is(d3));
3669
3670 if (exponent_type.IsSmi()) {
3671 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3672 __ CallStub(&stub);
3673 } else if (exponent_type.IsTagged()) {
3674 Label no_deopt;
3675 __ JumpIfSmi(tagged_exponent, &no_deopt);
3676 DCHECK(!r10.is(tagged_exponent));
3677 __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3678 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3679 __ cmp(r10, ip);
3680 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
3681 __ bind(&no_deopt);
3682 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3683 __ CallStub(&stub);
3684 } else if (exponent_type.IsInteger32()) {
3685 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3686 __ CallStub(&stub);
3687 } else {
3688 DCHECK(exponent_type.IsDouble());
3689 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3690 __ CallStub(&stub);
3691 }
3692 }
3693
3694 void LCodeGen::DoMathCos(LMathCos* instr) {
3695 __ PrepareCallCFunction(0, 1, scratch0());
3696 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3697 __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
3698 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3699 }
3700
3701 void LCodeGen::DoMathSin(LMathSin* instr) {
3702 __ PrepareCallCFunction(0, 1, scratch0());
3703 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3704 __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
3705 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3706 }
3707
3708 void LCodeGen::DoMathExp(LMathExp* instr) {
3709 __ PrepareCallCFunction(0, 1, scratch0());
3710 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3711 __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
3712 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3713 }
3714
3715 void LCodeGen::DoMathLog(LMathLog* instr) {
3716 __ PrepareCallCFunction(0, 1, scratch0());
3717 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3718 __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
3719 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3720 }
3721
3722 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3723 Register input = ToRegister(instr->value());
3724 Register result = ToRegister(instr->result());
3725 __ cntlzw_(result, input);
3726 }
3727
3728 void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3729 Register scratch1, Register scratch2,
3730 Register scratch3) {
3731 #if DEBUG
3732 if (actual.is_reg()) {
3733 DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3734 } else {
3735 DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3736 }
3737 #endif
3738 if (FLAG_code_comments) {
3739 if (actual.is_reg()) {
3740 Comment(";;; PrepareForTailCall, actual: %s {",
3741 RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
3742 actual.reg().code()));
3743 } else {
3744 Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3745 }
3746 }
3747
3748 // Check if next frame is an arguments adaptor frame.
3749 Register caller_args_count_reg = scratch1;
3750 Label no_arguments_adaptor, formal_parameter_count_loaded;
3751 __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3752 __ LoadP(scratch3,
3753 MemOperand(scratch2, StandardFrameConstants::kContextOffset));
3754 __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3755 __ bne(&no_arguments_adaptor);
3756
3757 // Drop current frame and load arguments count from arguments adaptor frame.
3758 __ mr(fp, scratch2);
3759 __ LoadP(caller_args_count_reg,
3760 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3761 __ SmiUntag(caller_args_count_reg);
3762 __ b(&formal_parameter_count_loaded);
3763
3764 __ bind(&no_arguments_adaptor);
3765 // Load caller's formal parameter count
3766 __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
3767
3768 __ bind(&formal_parameter_count_loaded);
3769 __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
3770
3771 Comment(";;; }");
3772 }
3773
3774 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3775 HInvokeFunction* hinstr = instr->hydrogen();
3776 DCHECK(ToRegister(instr->context()).is(cp));
3777 DCHECK(ToRegister(instr->function()).is(r4));
3778 DCHECK(instr->HasPointerMap());
3779
3780 bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3781
3782 if (is_tail_call) {
3783 DCHECK(!info()->saves_caller_doubles());
3784 ParameterCount actual(instr->arity());
3785 // It is safe to use r6, r7 and r8 as scratch registers here given that
3786 // 1) we are not going to return to caller function anyway,
3787 // 2) r6 (new.target) will be initialized below.
3788 PrepareForTailCall(actual, r6, r7, r8);
3789 }
3790
3791 Handle<JSFunction> known_function = hinstr->known_function();
3792 if (known_function.is_null()) {
3793 LPointerMap* pointers = instr->pointer_map();
3794 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3795 ParameterCount actual(instr->arity());
3796 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3797 __ InvokeFunction(r4, no_reg, actual, flag, generator);
3798 } else {
3799 CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3800 instr->arity(), is_tail_call, instr);
3801 }
3802 }
3803
3804
3805 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3806 DCHECK(ToRegister(instr->result()).is(r3));
3807
3808 if (instr->hydrogen()->IsTailCall()) {
3809 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3810
3811 if (instr->target()->IsConstantOperand()) {
3812 LConstantOperand* target = LConstantOperand::cast(instr->target());
3813 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3814 __ Jump(code, RelocInfo::CODE_TARGET);
3815 } else {
3816 DCHECK(instr->target()->IsRegister());
3817 Register target = ToRegister(instr->target());
3818 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3819 __ JumpToJSEntry(ip);
3820 }
3821 } else {
3822 LPointerMap* pointers = instr->pointer_map();
3823 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3824
3825 if (instr->target()->IsConstantOperand()) {
3826 LConstantOperand* target = LConstantOperand::cast(instr->target());
3827 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3828 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3829 __ Call(code, RelocInfo::CODE_TARGET);
3830 } else {
3831 DCHECK(instr->target()->IsRegister());
3832 Register target = ToRegister(instr->target());
3833 generator.BeforeCall(__ CallSize(target));
3834 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3835 __ CallJSEntry(ip);
3836 }
3837 generator.AfterCall();
3838 }
3839 }
3840
3841
3842 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3843 DCHECK(ToRegister(instr->context()).is(cp));
3844 DCHECK(ToRegister(instr->constructor()).is(r4));
3845 DCHECK(ToRegister(instr->result()).is(r3));
3846
3847 __ mov(r3, Operand(instr->arity()));
3848 __ Move(r5, instr->hydrogen()->site());
3849
3850 ElementsKind kind = instr->hydrogen()->elements_kind();
3851 AllocationSiteOverrideMode override_mode =
3852 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3853 ? DISABLE_ALLOCATION_SITES
3854 : DONT_OVERRIDE;
3855
3856 if (instr->arity() == 0) {
3857 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3858 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3859 } else if (instr->arity() == 1) {
3860 Label done;
3861 if (IsFastPackedElementsKind(kind)) {
3862 Label packed_case;
3863 // We might need a change here
3864 // look at the first argument
3865 __ LoadP(r8, MemOperand(sp, 0));
3866 __ cmpi(r8, Operand::Zero());
3867 __ beq(&packed_case);
3868
3869 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3870 ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
3871 override_mode);
3872 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3873 __ b(&done);
3874 __ bind(&packed_case);
3875 }
3876
3877 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3878 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3879 __ bind(&done);
3880 } else {
3881 ArrayNArgumentsConstructorStub stub(isolate());
3882 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3883 }
3884 }
3885
3886
3887 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3888 CallRuntime(instr->function(), instr->arity(), instr);
3889 }
3890
3891
3892 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3893 Register function = ToRegister(instr->function());
3894 Register code_object = ToRegister(instr->code_object());
3895 __ addi(code_object, code_object,
3896 Operand(Code::kHeaderSize - kHeapObjectTag));
3897 __ StoreP(code_object,
3898 FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
3899 }
3900
3901
3902 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3903 Register result = ToRegister(instr->result());
3904 Register base = ToRegister(instr->base_object());
3905 if (instr->offset()->IsConstantOperand()) {
3906 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3907 __ Add(result, base, ToInteger32(offset), r0);
3908 } else {
3909 Register offset = ToRegister(instr->offset());
3910 __ add(result, base, offset);
3911 }
3912 }
3913
3914
3915 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3916 HStoreNamedField* hinstr = instr->hydrogen();
3917 Representation representation = instr->representation();
3918
3919 Register object = ToRegister(instr->object());
3920 Register scratch = scratch0();
3921 HObjectAccess access = hinstr->access();
3922 int offset = access.offset();
3923
3924 if (access.IsExternalMemory()) {
3925 Register value = ToRegister(instr->value());
3926 MemOperand operand = MemOperand(object, offset);
3927 __ StoreRepresentation(value, operand, representation, r0);
3928 return;
3929 }
3930
3931 __ AssertNotSmi(object);
3932
3933 #if V8_TARGET_ARCH_PPC64
3934 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
3935 IsInteger32(LConstantOperand::cast(instr->value())));
3936 #else
3937 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
3938 IsSmi(LConstantOperand::cast(instr->value())));
3939 #endif
3940 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
3941 DCHECK(access.IsInobject());
3942 DCHECK(!hinstr->has_transition());
3943 DCHECK(!hinstr->NeedsWriteBarrier());
3944 DoubleRegister value = ToDoubleRegister(instr->value());
3945 __ stfd(value, FieldMemOperand(object, offset));
3946 return;
3947 }
3948
3949 if (hinstr->has_transition()) {
3950 Handle<Map> transition = hinstr->transition_map();
3951 AddDeprecationDependency(transition);
3952 __ mov(scratch, Operand(transition));
3953 __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
3954 if (hinstr->NeedsWriteBarrierForMap()) {
3955 Register temp = ToRegister(instr->temp());
3956 // Update the write barrier for the map field.
3957 __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
3958 kSaveFPRegs);
3959 }
3960 }
3961
3962 // Do the store.
3963 Register record_dest = object;
3964 Register record_value = no_reg;
3965 Register record_scratch = scratch;
3966 #if V8_TARGET_ARCH_PPC64
3967 if (FLAG_unbox_double_fields && representation.IsDouble()) {
3968 DCHECK(access.IsInobject());
3969 DoubleRegister value = ToDoubleRegister(instr->value());
3970 __ stfd(value, FieldMemOperand(object, offset));
3971 if (hinstr->NeedsWriteBarrier()) {
3972 record_value = ToRegister(instr->value());
3973 }
3974 } else {
3975 if (representation.IsSmi() &&
3976 hinstr->value()->representation().IsInteger32()) {
3977 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
3978 // 64-bit Smi optimization
3979 // Store int value directly to upper half of the smi.
3980 offset = SmiWordOffset(offset);
3981 representation = Representation::Integer32();
3982 }
3983 #endif
3984 if (access.IsInobject()) {
3985 Register value = ToRegister(instr->value());
3986 MemOperand operand = FieldMemOperand(object, offset);
3987 __ StoreRepresentation(value, operand, representation, r0);
3988 record_value = value;
3989 } else {
3990 Register value = ToRegister(instr->value());
3991 __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3992 MemOperand operand = FieldMemOperand(scratch, offset);
3993 __ StoreRepresentation(value, operand, representation, r0);
3994 record_dest = scratch;
3995 record_value = value;
3996 record_scratch = object;
3997 }
3998 #if V8_TARGET_ARCH_PPC64
3999 }
4000 #endif
4001
4002 if (hinstr->NeedsWriteBarrier()) {
4003 __ RecordWriteField(record_dest, offset, record_value, record_scratch,
4004 GetLinkRegisterState(), kSaveFPRegs,
4005 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
4006 hinstr->PointersToHereCheckForValue());
4007 }
4008 }
4009
4010
4011 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4012 Representation representation = instr->hydrogen()->length()->representation();
4013 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4014 DCHECK(representation.IsSmiOrInteger32());
4015
4016 Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
4017 if (instr->length()->IsConstantOperand()) {
4018 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4019 Register index = ToRegister(instr->index());
4020 if (representation.IsSmi()) {
4021 __ CmplSmiLiteral(index, Smi::FromInt(length), r0);
4022 } else {
4023 __ Cmplwi(index, Operand(length), r0);
4024 }
4025 cc = CommuteCondition(cc);
4026 } else if (instr->index()->IsConstantOperand()) {
4027 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4028 Register length = ToRegister(instr->length());
4029 if (representation.IsSmi()) {
4030 __ CmplSmiLiteral(length, Smi::FromInt(index), r0);
4031 } else {
4032 __ Cmplwi(length, Operand(index), r0);
4033 }
4034 } else {
4035 Register index = ToRegister(instr->index());
4036 Register length = ToRegister(instr->length());
4037 if (representation.IsSmi()) {
4038 __ cmpl(length, index);
4039 } else {
4040 __ cmplw(length, index);
4041 }
4042 }
4043 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4044 Label done;
4045 __ b(NegateCondition(cc), &done);
4046 __ stop("eliminated bounds check failed");
4047 __ bind(&done);
4048 } else {
4049 DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
4050 }
4051 }
4052
4053
4054 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4055 Register external_pointer = ToRegister(instr->elements());
4056 Register key = no_reg;
4057 ElementsKind elements_kind = instr->elements_kind();
4058 bool key_is_constant = instr->key()->IsConstantOperand();
4059 int constant_key = 0;
4060 if (key_is_constant) {
4061 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4062 if (constant_key & 0xF0000000) {
4063 Abort(kArrayIndexConstantValueTooBig);
4064 }
4065 } else {
4066 key = ToRegister(instr->key());
4067 }
4068 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4069 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4070 int base_offset = instr->base_offset();
4071
4072 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
4073 Register address = scratch0();
4074 DoubleRegister value(ToDoubleRegister(instr->value()));
4075 if (key_is_constant) {
4076 if (constant_key != 0) {
4077 __ Add(address, external_pointer, constant_key << element_size_shift,
4078 r0);
4079 } else {
4080 address = external_pointer;
4081 }
4082 } else {
4083 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
4084 __ add(address, external_pointer, r0);
4085 }
4086 if (elements_kind == FLOAT32_ELEMENTS) {
4087 __ frsp(double_scratch0(), value);
4088 __ stfs(double_scratch0(), MemOperand(address, base_offset));
4089 } else { // Storing doubles, not floats.
4090 __ stfd(value, MemOperand(address, base_offset));
4091 }
4092 } else {
4093 Register value(ToRegister(instr->value()));
4094 MemOperand mem_operand =
4095 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
4096 constant_key, element_size_shift, base_offset);
4097 switch (elements_kind) {
4098 case UINT8_ELEMENTS:
4099 case UINT8_CLAMPED_ELEMENTS:
4100 case INT8_ELEMENTS:
4101 if (key_is_constant) {
4102 __ StoreByte(value, mem_operand, r0);
4103 } else {
4104 __ stbx(value, mem_operand);
4105 }
4106 break;
4107 case INT16_ELEMENTS:
4108 case UINT16_ELEMENTS:
4109 if (key_is_constant) {
4110 __ StoreHalfWord(value, mem_operand, r0);
4111 } else {
4112 __ sthx(value, mem_operand);
4113 }
4114 break;
4115 case INT32_ELEMENTS:
4116 case UINT32_ELEMENTS:
4117 if (key_is_constant) {
4118 __ StoreWord(value, mem_operand, r0);
4119 } else {
4120 __ stwx(value, mem_operand);
4121 }
4122 break;
4123 case FLOAT32_ELEMENTS:
4124 case FLOAT64_ELEMENTS:
4125 case FAST_DOUBLE_ELEMENTS:
4126 case FAST_ELEMENTS:
4127 case FAST_SMI_ELEMENTS:
4128 case FAST_HOLEY_DOUBLE_ELEMENTS:
4129 case FAST_HOLEY_ELEMENTS:
4130 case FAST_HOLEY_SMI_ELEMENTS:
4131 case DICTIONARY_ELEMENTS:
4132 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4133 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4134 case FAST_STRING_WRAPPER_ELEMENTS:
4135 case SLOW_STRING_WRAPPER_ELEMENTS:
4136 case NO_ELEMENTS:
4137 UNREACHABLE();
4138 break;
4139 }
4140 }
4141 }
4142
4143
4144 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4145 DoubleRegister value = ToDoubleRegister(instr->value());
4146 Register elements = ToRegister(instr->elements());
4147 Register key = no_reg;
4148 Register scratch = scratch0();
4149 DoubleRegister double_scratch = double_scratch0();
4150 bool key_is_constant = instr->key()->IsConstantOperand();
4151 int constant_key = 0;
4152
4153 // Calculate the effective address of the slot in the array to store the
4154 // double value.
4155 if (key_is_constant) {
4156 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4157 if (constant_key & 0xF0000000) {
4158 Abort(kArrayIndexConstantValueTooBig);
4159 }
4160 } else {
4161 key = ToRegister(instr->key());
4162 }
4163 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4164 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4165 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
4166 if (!key_is_constant) {
4167 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
4168 __ add(scratch, elements, scratch);
4169 elements = scratch;
4170 }
4171 if (!is_int16(base_offset)) {
4172 __ Add(scratch, elements, base_offset, r0);
4173 base_offset = 0;
4174 elements = scratch;
4175 }
4176
4177 if (instr->NeedsCanonicalization()) {
4178 // Turn potential sNaN value into qNaN.
4179 __ CanonicalizeNaN(double_scratch, value);
4180 __ stfd(double_scratch, MemOperand(elements, base_offset));
4181 } else {
4182 __ stfd(value, MemOperand(elements, base_offset));
4183 }
4184 }
4185
4186
4187 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4188 HStoreKeyed* hinstr = instr->hydrogen();
4189 Register value = ToRegister(instr->value());
4190 Register elements = ToRegister(instr->elements());
4191 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4192 Register scratch = scratch0();
4193 Register store_base = scratch;
4194 int offset = instr->base_offset();
4195
4196 // Do the store.
4197 if (instr->key()->IsConstantOperand()) {
4198 DCHECK(!hinstr->NeedsWriteBarrier());
4199 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4200 offset += ToInteger32(const_operand) * kPointerSize;
4201 store_base = elements;
4202 } else {
4203 // Even though the HLoadKeyed instruction forces the input
4204 // representation for the key to be an integer, the input gets replaced
4205 // during bound check elimination with the index argument to the bounds
4206 // check, which can be tagged, so that case must be handled here, too.
4207 if (hinstr->key()->representation().IsSmi()) {
4208 __ SmiToPtrArrayOffset(scratch, key);
4209 } else {
4210 __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
4211 }
4212 __ add(scratch, elements, scratch);
4213 }
4214
4215 Representation representation = hinstr->value()->representation();
4216
4217 #if V8_TARGET_ARCH_PPC64
4218 // 64-bit Smi optimization
4219 if (representation.IsInteger32()) {
4220 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4221 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4222 // Store int value directly to upper half of the smi.
4223 offset = SmiWordOffset(offset);
4224 }
4225 #endif
4226
4227 __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
4228 r0);
4229
4230 if (hinstr->NeedsWriteBarrier()) {
4231 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4232 ? OMIT_SMI_CHECK
4233 : INLINE_SMI_CHECK;
4234 // Compute address of modified element and store it into key register.
4235 __ Add(key, store_base, offset, r0);
4236 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
4237 EMIT_REMEMBERED_SET, check_needed,
4238 hinstr->PointersToHereCheckForValue());
4239 }
4240 }
4241
4242
4243 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4244 // By cases: external, fast double
4245 if (instr->is_fixed_typed_array()) {
4246 DoStoreKeyedExternalArray(instr);
4247 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4248 DoStoreKeyedFixedDoubleArray(instr);
4249 } else {
4250 DoStoreKeyedFixedArray(instr);
4251 }
4252 }
4253
4254
4255 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4256 class DeferredMaybeGrowElements final : public LDeferredCode {
4257 public:
4258 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4259 : LDeferredCode(codegen), instr_(instr) {}
4260 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4261 LInstruction* instr() override { return instr_; }
4262
4263 private:
4264 LMaybeGrowElements* instr_;
4265 };
4266
4267 Register result = r3;
4268 DeferredMaybeGrowElements* deferred =
4269 new (zone()) DeferredMaybeGrowElements(this, instr);
4270 LOperand* key = instr->key();
4271 LOperand* current_capacity = instr->current_capacity();
4272
4273 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4274 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4275 DCHECK(key->IsConstantOperand() || key->IsRegister());
4276 DCHECK(current_capacity->IsConstantOperand() ||
4277 current_capacity->IsRegister());
4278
4279 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4280 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4281 int32_t constant_capacity =
4282 ToInteger32(LConstantOperand::cast(current_capacity));
4283 if (constant_key >= constant_capacity) {
4284 // Deferred case.
4285 __ b(deferred->entry());
4286 }
4287 } else if (key->IsConstantOperand()) {
4288 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4289 __ Cmpwi(ToRegister(current_capacity), Operand(constant_key), r0);
4290 __ ble(deferred->entry());
4291 } else if (current_capacity->IsConstantOperand()) {
4292 int32_t constant_capacity =
4293 ToInteger32(LConstantOperand::cast(current_capacity));
4294 __ Cmpwi(ToRegister(key), Operand(constant_capacity), r0);
4295 __ bge(deferred->entry());
4296 } else {
4297 __ cmpw(ToRegister(key), ToRegister(current_capacity));
4298 __ bge(deferred->entry());
4299 }
4300
4301 if (instr->elements()->IsRegister()) {
4302 __ Move(result, ToRegister(instr->elements()));
4303 } else {
4304 __ LoadP(result, ToMemOperand(instr->elements()));
4305 }
4306
4307 __ bind(deferred->exit());
4308 }
4309
4310
4311 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4312 // TODO(3095996): Get rid of this. For now, we need to make the
4313 // result register contain a valid pointer because it is already
4314 // contained in the register pointer map.
4315 Register result = r3;
4316 __ li(result, Operand::Zero());
4317
4318 // We have to call a stub.
4319 {
4320 PushSafepointRegistersScope scope(this);
4321 if (instr->object()->IsRegister()) {
4322 __ Move(result, ToRegister(instr->object()));
4323 } else {
4324 __ LoadP(result, ToMemOperand(instr->object()));
4325 }
4326
4327 LOperand* key = instr->key();
4328 if (key->IsConstantOperand()) {
4329 LConstantOperand* constant_key = LConstantOperand::cast(key);
4330 int32_t int_key = ToInteger32(constant_key);
4331 if (Smi::IsValid(int_key)) {
4332 __ LoadSmiLiteral(r6, Smi::FromInt(int_key));
4333 } else {
4334 // We should never get here at runtime because there is a smi check on
4335 // the key before this point.
4336 __ stop("expected smi");
4337 }
4338 } else {
4339 __ SmiTag(r6, ToRegister(key));
4340 }
4341
4342 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
4343 __ CallStub(&stub);
4344 RecordSafepointWithLazyDeopt(
4345 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4346 __ StoreToSafepointRegisterSlot(result, result);
4347 }
4348
4349 // Deopt on smi, which means the elements array changed to dictionary mode.
4350 __ TestIfSmi(result, r0);
4351 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
4352 }
4353
4354
4355 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4356 Register object_reg = ToRegister(instr->object());
4357 Register scratch = scratch0();
4358
4359 Handle<Map> from_map = instr->original_map();
4360 Handle<Map> to_map = instr->transitioned_map();
4361 ElementsKind from_kind = instr->from_kind();
4362 ElementsKind to_kind = instr->to_kind();
4363
4364 Label not_applicable;
4365 __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4366 __ Cmpi(scratch, Operand(from_map), r0);
4367 __ bne(¬_applicable);
4368
4369 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4370 Register new_map_reg = ToRegister(instr->new_map_temp());
4371 __ mov(new_map_reg, Operand(to_map));
4372 __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
4373 r0);
4374 // Write barrier.
4375 __ RecordWriteForMap(object_reg, new_map_reg, scratch,
4376 GetLinkRegisterState(), kDontSaveFPRegs);
4377 } else {
4378 DCHECK(ToRegister(instr->context()).is(cp));
4379 DCHECK(object_reg.is(r3));
4380 PushSafepointRegistersScope scope(this);
4381 __ Move(r4, to_map);
4382 TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
4383 __ CallStub(&stub);
4384 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4385 Safepoint::kLazyDeopt);
4386 }
4387 __ bind(¬_applicable);
4388 }
4389
4390
4391 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4392 Register object = ToRegister(instr->object());
4393 Register temp1 = ToRegister(instr->temp1());
4394 Register temp2 = ToRegister(instr->temp2());
4395 Label no_memento_found;
4396 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
4397 DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
4398 __ bind(&no_memento_found);
4399 }
4400
4401
4402 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4403 DCHECK(ToRegister(instr->context()).is(cp));
4404 DCHECK(ToRegister(instr->left()).is(r4));
4405 DCHECK(ToRegister(instr->right()).is(r3));
4406 StringAddStub stub(isolate(), instr->hydrogen()->flags(),
4407 instr->hydrogen()->pretenure_flag());
4408 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4409 }
4410
4411
4412 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4413 class DeferredStringCharCodeAt final : public LDeferredCode {
4414 public:
4415 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4416 : LDeferredCode(codegen), instr_(instr) {}
4417 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4418 LInstruction* instr() override { return instr_; }
4419
4420 private:
4421 LStringCharCodeAt* instr_;
4422 };
4423
4424 DeferredStringCharCodeAt* deferred =
4425 new (zone()) DeferredStringCharCodeAt(this, instr);
4426
4427 StringCharLoadGenerator::Generate(
4428 masm(), ToRegister(instr->string()), ToRegister(instr->index()),
4429 ToRegister(instr->result()), deferred->entry());
4430 __ bind(deferred->exit());
4431 }
4432
4433
4434 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4435 Register string = ToRegister(instr->string());
4436 Register result = ToRegister(instr->result());
4437 Register scratch = scratch0();
4438
4439 // TODO(3095996): Get rid of this. For now, we need to make the
4440 // result register contain a valid pointer because it is already
4441 // contained in the register pointer map.
4442 __ li(result, Operand::Zero());
4443
4444 PushSafepointRegistersScope scope(this);
4445 __ push(string);
4446 // Push the index as a smi. This is safe because of the checks in
4447 // DoStringCharCodeAt above.
4448 if (instr->index()->IsConstantOperand()) {
4449 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4450 __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
4451 __ push(scratch);
4452 } else {
4453 Register index = ToRegister(instr->index());
4454 __ SmiTag(index);
4455 __ push(index);
4456 }
4457 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4458 instr->context());
4459 __ AssertSmi(r3);
4460 __ SmiUntag(r3);
4461 __ StoreToSafepointRegisterSlot(r3, result);
4462 }
4463
4464
4465 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4466 class DeferredStringCharFromCode final : public LDeferredCode {
4467 public:
4468 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4469 : LDeferredCode(codegen), instr_(instr) {}
4470 void Generate() override {
4471 codegen()->DoDeferredStringCharFromCode(instr_);
4472 }
4473 LInstruction* instr() override { return instr_; }
4474
4475 private:
4476 LStringCharFromCode* instr_;
4477 };
4478
4479 DeferredStringCharFromCode* deferred =
4480 new (zone()) DeferredStringCharFromCode(this, instr);
4481
4482 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4483 Register char_code = ToRegister(instr->char_code());
4484 Register result = ToRegister(instr->result());
4485 DCHECK(!char_code.is(result));
4486
4487 __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
4488 __ bgt(deferred->entry());
4489 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4490 __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
4491 __ add(result, result, r0);
4492 __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4493 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4494 __ cmp(result, ip);
4495 __ beq(deferred->entry());
4496 __ bind(deferred->exit());
4497 }
4498
4499
4500 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4501 Register char_code = ToRegister(instr->char_code());
4502 Register result = ToRegister(instr->result());
4503
4504 // TODO(3095996): Get rid of this. For now, we need to make the
4505 // result register contain a valid pointer because it is already
4506 // contained in the register pointer map.
4507 __ li(result, Operand::Zero());
4508
4509 PushSafepointRegistersScope scope(this);
4510 __ SmiTag(char_code);
4511 __ push(char_code);
4512 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4513 instr->context());
4514 __ StoreToSafepointRegisterSlot(r3, result);
4515 }
4516
4517
4518 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4519 LOperand* input = instr->value();
4520 DCHECK(input->IsRegister() || input->IsStackSlot());
4521 LOperand* output = instr->result();
4522 DCHECK(output->IsDoubleRegister());
4523 if (input->IsStackSlot()) {
4524 Register scratch = scratch0();
4525 __ LoadP(scratch, ToMemOperand(input));
4526 __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
4527 } else {
4528 __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
4529 }
4530 }
4531
4532
4533 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4534 LOperand* input = instr->value();
4535 LOperand* output = instr->result();
4536 __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
4537 }
4538
4539
4540 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4541 class DeferredNumberTagI final : public LDeferredCode {
4542 public:
4543 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4544 : LDeferredCode(codegen), instr_(instr) {}
4545 void Generate() override {
4546 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4547 instr_->temp2(), SIGNED_INT32);
4548 }
4549 LInstruction* instr() override { return instr_; }
4550
4551 private:
4552 LNumberTagI* instr_;
4553 };
4554
4555 Register src = ToRegister(instr->value());
4556 Register dst = ToRegister(instr->result());
4557
4558 DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
4559 #if V8_TARGET_ARCH_PPC64
4560 __ SmiTag(dst, src);
4561 #else
4562 __ SmiTagCheckOverflow(dst, src, r0);
4563 __ BranchOnOverflow(deferred->entry());
4564 #endif
4565 __ bind(deferred->exit());
4566 }
4567
4568
4569 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4570 class DeferredNumberTagU final : public LDeferredCode {
4571 public:
4572 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4573 : LDeferredCode(codegen), instr_(instr) {}
4574 void Generate() override {
4575 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4576 instr_->temp2(), UNSIGNED_INT32);
4577 }
4578 LInstruction* instr() override { return instr_; }
4579
4580 private:
4581 LNumberTagU* instr_;
4582 };
4583
4584 Register input = ToRegister(instr->value());
4585 Register result = ToRegister(instr->result());
4586
4587 DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
4588 __ Cmpli(input, Operand(Smi::kMaxValue), r0);
4589 __ bgt(deferred->entry());
4590 __ SmiTag(result, input);
4591 __ bind(deferred->exit());
4592 }
4593
4594
4595 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
4596 LOperand* temp1, LOperand* temp2,
4597 IntegerSignedness signedness) {
4598 Label done, slow;
4599 Register src = ToRegister(value);
4600 Register dst = ToRegister(instr->result());
4601 Register tmp1 = scratch0();
4602 Register tmp2 = ToRegister(temp1);
4603 Register tmp3 = ToRegister(temp2);
4604 DoubleRegister dbl_scratch = double_scratch0();
4605
4606 if (signedness == SIGNED_INT32) {
4607 // There was overflow, so bits 30 and 31 of the original integer
4608 // disagree. Try to allocate a heap number in new space and store
4609 // the value in there. If that fails, call the runtime system.
4610 if (dst.is(src)) {
4611 __ SmiUntag(src, dst);
4612 __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
4613 }
4614 __ ConvertIntToDouble(src, dbl_scratch);
4615 } else {
4616 __ ConvertUnsignedIntToDouble(src, dbl_scratch);
4617 }
4618
4619 if (FLAG_inline_new) {
4620 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4621 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
4622 __ b(&done);
4623 }
4624
4625 // Slow case: Call the runtime system to do the number allocation.
4626 __ bind(&slow);
4627 {
4628 // TODO(3095996): Put a valid pointer value in the stack slot where the
4629 // result register is stored, as this register is in the pointer map, but
4630 // contains an integer value.
4631 __ li(dst, Operand::Zero());
4632
4633 // Preserve the value of all registers.
4634 PushSafepointRegistersScope scope(this);
4635 // Reset the context register.
4636 if (!dst.is(cp)) {
4637 __ li(cp, Operand::Zero());
4638 }
4639 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4640 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4641 Safepoint::kNoLazyDeopt);
4642 __ StoreToSafepointRegisterSlot(r3, dst);
4643 }
4644
4645 // Done. Put the value in dbl_scratch into the value of the allocated heap
4646 // number.
4647 __ bind(&done);
4648 __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4649 }
4650
4651
4652 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4653 class DeferredNumberTagD final : public LDeferredCode {
4654 public:
4655 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4656 : LDeferredCode(codegen), instr_(instr) {}
4657 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4658 LInstruction* instr() override { return instr_; }
4659
4660 private:
4661 LNumberTagD* instr_;
4662 };
4663
4664 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4665 Register scratch = scratch0();
4666 Register reg = ToRegister(instr->result());
4667 Register temp1 = ToRegister(instr->temp());
4668 Register temp2 = ToRegister(instr->temp2());
4669
4670 DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
4671 if (FLAG_inline_new) {
4672 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4673 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4674 } else {
4675 __ b(deferred->entry());
4676 }
4677 __ bind(deferred->exit());
4678 __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
4679 }
4680
4681
4682 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4683 // TODO(3095996): Get rid of this. For now, we need to make the
4684 // result register contain a valid pointer because it is already
4685 // contained in the register pointer map.
4686 Register reg = ToRegister(instr->result());
4687 __ li(reg, Operand::Zero());
4688
4689 PushSafepointRegistersScope scope(this);
4690 // Reset the context register.
4691 if (!reg.is(cp)) {
4692 __ li(cp, Operand::Zero());
4693 }
4694 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4695 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4696 Safepoint::kNoLazyDeopt);
4697 __ StoreToSafepointRegisterSlot(r3, reg);
4698 }
4699
4700
4701 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4702 HChange* hchange = instr->hydrogen();
4703 Register input = ToRegister(instr->value());
4704 Register output = ToRegister(instr->result());
4705 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4706 hchange->value()->CheckFlag(HValue::kUint32)) {
4707 __ TestUnsignedSmiCandidate(input, r0);
4708 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0);
4709 }
4710 #if !V8_TARGET_ARCH_PPC64
4711 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4712 !hchange->value()->CheckFlag(HValue::kUint32)) {
4713 __ SmiTagCheckOverflow(output, input, r0);
4714 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
4715 } else {
4716 #endif
4717 __ SmiTag(output, input);
4718 #if !V8_TARGET_ARCH_PPC64
4719 }
4720 #endif
4721 }
4722
4723
4724 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4725 Register scratch = scratch0();
4726 Register input = ToRegister(instr->value());
4727 Register result = ToRegister(instr->result());
4728 if (instr->needs_check()) {
4729 // If the input is a HeapObject, value of scratch won't be zero.
4730 __ andi(scratch, input, Operand(kHeapObjectTag));
4731 __ SmiUntag(result, input);
4732 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
4733 } else {
4734 __ SmiUntag(result, input);
4735 }
4736 }
4737
4738
4739 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4740 DoubleRegister result_reg,
4741 NumberUntagDMode mode) {
4742 bool can_convert_undefined_to_nan = instr->truncating();
4743 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4744
4745 Register scratch = scratch0();
4746 DCHECK(!result_reg.is(double_scratch0()));
4747
4748 Label convert, load_smi, done;
4749
4750 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4751 // Smi check.
4752 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4753
4754 // Heap number map check.
4755 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4756 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4757 __ cmp(scratch, ip);
4758 if (can_convert_undefined_to_nan) {
4759 __ bne(&convert);
4760 } else {
4761 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
4762 }
4763 // load heap number
4764 __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4765 if (deoptimize_on_minus_zero) {
4766 __ TestDoubleIsMinusZero(result_reg, scratch, ip);
4767 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
4768 }
4769 __ b(&done);
4770 if (can_convert_undefined_to_nan) {
4771 __ bind(&convert);
4772 // Convert undefined (and hole) to NaN.
4773 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4774 __ cmp(input_reg, ip);
4775 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
4776 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4777 __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4778 __ b(&done);
4779 }
4780 } else {
4781 __ SmiUntag(scratch, input_reg);
4782 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4783 }
4784 // Smi to double register conversion
4785 __ bind(&load_smi);
4786 // scratch: untagged value of input_reg
4787 __ ConvertIntToDouble(scratch, result_reg);
4788 __ bind(&done);
4789 }
4790
4791
4792 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4793 Register input_reg = ToRegister(instr->value());
4794 Register scratch1 = scratch0();
4795 Register scratch2 = ToRegister(instr->temp());
4796 DoubleRegister double_scratch = double_scratch0();
4797 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4798
4799 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4800 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4801
4802 Label done;
4803
4804 // Heap number map check.
4805 __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4806 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4807 __ cmp(scratch1, ip);
4808
4809 if (instr->truncating()) {
4810 Label truncate;
4811 __ beq(&truncate);
4812 __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE);
4813 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
4814 __ bind(&truncate);
4815 __ mr(scratch2, input_reg);
4816 __ TruncateHeapNumberToI(input_reg, scratch2);
4817 } else {
4818 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
4819
4820 __ lfd(double_scratch2,
4821 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4822 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4823 // preserve heap number pointer in scratch2 for minus zero check below
4824 __ mr(scratch2, input_reg);
4825 }
4826 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
4827 double_scratch);
4828 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
4829
4830 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4831 __ cmpi(input_reg, Operand::Zero());
4832 __ bne(&done);
4833 __ TestHeapNumberSign(scratch2, scratch1);
4834 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
4835 }
4836 }
4837 __ bind(&done);
4838 }
4839
4840
4841 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4842 class DeferredTaggedToI final : public LDeferredCode {
4843 public:
4844 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4845 : LDeferredCode(codegen), instr_(instr) {}
4846 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4847 LInstruction* instr() override { return instr_; }
4848
4849 private:
4850 LTaggedToI* instr_;
4851 };
4852
4853 LOperand* input = instr->value();
4854 DCHECK(input->IsRegister());
4855 DCHECK(input->Equals(instr->result()));
4856
4857 Register input_reg = ToRegister(input);
4858
4859 if (instr->hydrogen()->value()->representation().IsSmi()) {
4860 __ SmiUntag(input_reg);
4861 } else {
4862 DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
4863
4864 // Branch to deferred code if the input is a HeapObject.
4865 __ JumpIfNotSmi(input_reg, deferred->entry());
4866
4867 __ SmiUntag(input_reg);
4868 __ bind(deferred->exit());
4869 }
4870 }
4871
4872
4873 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4874 LOperand* input = instr->value();
4875 DCHECK(input->IsRegister());
4876 LOperand* result = instr->result();
4877 DCHECK(result->IsDoubleRegister());
4878
4879 Register input_reg = ToRegister(input);
4880 DoubleRegister result_reg = ToDoubleRegister(result);
4881
4882 HValue* value = instr->hydrogen()->value();
4883 NumberUntagDMode mode = value->representation().IsSmi()
4884 ? NUMBER_CANDIDATE_IS_SMI
4885 : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4886
4887 EmitNumberUntagD(instr, input_reg, result_reg, mode);
4888 }
4889
4890
4891 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4892 Register result_reg = ToRegister(instr->result());
4893 Register scratch1 = scratch0();
4894 DoubleRegister double_input = ToDoubleRegister(instr->value());
4895 DoubleRegister double_scratch = double_scratch0();
4896
4897 if (instr->truncating()) {
4898 __ TruncateDoubleToI(result_reg, double_input);
4899 } else {
4900 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
4901 double_scratch);
4902 // Deoptimize if the input wasn't a int32 (inside a double).
4903 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
4904 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4905 Label done;
4906 __ cmpi(result_reg, Operand::Zero());
4907 __ bne(&done);
4908 __ TestDoubleSign(double_input, scratch1);
4909 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
4910 __ bind(&done);
4911 }
4912 }
4913 }
4914
4915
4916 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4917 Register result_reg = ToRegister(instr->result());
4918 Register scratch1 = scratch0();
4919 DoubleRegister double_input = ToDoubleRegister(instr->value());
4920 DoubleRegister double_scratch = double_scratch0();
4921
4922 if (instr->truncating()) {
4923 __ TruncateDoubleToI(result_reg, double_input);
4924 } else {
4925 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
4926 double_scratch);
4927 // Deoptimize if the input wasn't a int32 (inside a double).
4928 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
4929 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4930 Label done;
4931 __ cmpi(result_reg, Operand::Zero());
4932 __ bne(&done);
4933 __ TestDoubleSign(double_input, scratch1);
4934 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
4935 __ bind(&done);
4936 }
4937 }
4938 #if V8_TARGET_ARCH_PPC64
4939 __ SmiTag(result_reg);
4940 #else
4941 __ SmiTagCheckOverflow(result_reg, r0);
4942 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
4943 #endif
4944 }
4945
4946
4947 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4948 LOperand* input = instr->value();
4949 __ TestIfSmi(ToRegister(input), r0);
4950 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
4951 }
4952
4953
4954 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4955 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4956 LOperand* input = instr->value();
4957 __ TestIfSmi(ToRegister(input), r0);
4958 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
4959 }
4960 }
4961
4962
4963 void LCodeGen::DoCheckArrayBufferNotNeutered(
4964 LCheckArrayBufferNotNeutered* instr) {
4965 Register view = ToRegister(instr->view());
4966 Register scratch = scratch0();
4967
4968 __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
4969 __ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
4970 __ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
4971 DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0);
4972 }
4973
4974
4975 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4976 Register input = ToRegister(instr->value());
4977 Register scratch = scratch0();
4978
4979 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4980 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4981
4982 if (instr->hydrogen()->is_interval_check()) {
4983 InstanceType first;
4984 InstanceType last;
4985 instr->hydrogen()->GetCheckInterval(&first, &last);
4986
4987 __ cmpli(scratch, Operand(first));
4988
4989 // If there is only one type in the interval check for equality.
4990 if (first == last) {
4991 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
4992 } else {
4993 DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType);
4994 // Omit check for the last type.
4995 if (last != LAST_TYPE) {
4996 __ cmpli(scratch, Operand(last));
4997 DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType);
4998 }
4999 }
5000 } else {
5001 uint8_t mask;
5002 uint8_t tag;
5003 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5004
5005 if (base::bits::IsPowerOfTwo32(mask)) {
5006 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5007 __ andi(r0, scratch, Operand(mask));
5008 DeoptimizeIf(tag == 0 ? ne : eq, instr,
5009 DeoptimizeReason::kWrongInstanceType, cr0);
5010 } else {
5011 __ andi(scratch, scratch, Operand(mask));
5012 __ cmpi(scratch, Operand(tag));
5013 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
5014 }
5015 }
5016 }
5017
5018
5019 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5020 Register reg = ToRegister(instr->value());
5021 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5022 AllowDeferredHandleDereference smi_check;
5023 if (isolate()->heap()->InNewSpace(*object)) {
5024 Register reg = ToRegister(instr->value());
5025 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5026 __ mov(ip, Operand(cell));
5027 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
5028 __ cmp(reg, ip);
5029 } else {
5030 __ Cmpi(reg, Operand(object), r0);
5031 }
5032 DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
5033 }
5034
5035
5036 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5037 Register temp = ToRegister(instr->temp());
5038 {
5039 PushSafepointRegistersScope scope(this);
5040 __ push(object);
5041 __ li(cp, Operand::Zero());
5042 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5043 RecordSafepointWithRegisters(instr->pointer_map(), 1,
5044 Safepoint::kNoLazyDeopt);
5045 __ StoreToSafepointRegisterSlot(r3, temp);
5046 }
5047 __ TestIfSmi(temp, r0);
5048 DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
5049 }
5050
5051
5052 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5053 class DeferredCheckMaps final : public LDeferredCode {
5054 public:
5055 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5056 : LDeferredCode(codegen), instr_(instr), object_(object) {
5057 SetExit(check_maps());
5058 }
5059 void Generate() override {
5060 codegen()->DoDeferredInstanceMigration(instr_, object_);
5061 }
5062 Label* check_maps() { return &check_maps_; }
5063 LInstruction* instr() override { return instr_; }
5064
5065 private:
5066 LCheckMaps* instr_;
5067 Label check_maps_;
5068 Register object_;
5069 };
5070
5071 if (instr->hydrogen()->IsStabilityCheck()) {
5072 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5073 for (int i = 0; i < maps->size(); ++i) {
5074 AddStabilityDependency(maps->at(i).handle());
5075 }
5076 return;
5077 }
5078
5079 Register object = ToRegister(instr->value());
5080 Register map_reg = ToRegister(instr->temp());
5081
5082 __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
5083
5084 DeferredCheckMaps* deferred = NULL;
5085 if (instr->hydrogen()->HasMigrationTarget()) {
5086 deferred = new (zone()) DeferredCheckMaps(this, instr, object);
5087 __ bind(deferred->check_maps());
5088 }
5089
5090 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5091 Label success;
5092 for (int i = 0; i < maps->size() - 1; i++) {
5093 Handle<Map> map = maps->at(i).handle();
5094 __ CompareMap(map_reg, map, &success);
5095 __ beq(&success);
5096 }
5097
5098 Handle<Map> map = maps->at(maps->size() - 1).handle();
5099 __ CompareMap(map_reg, map, &success);
5100 if (instr->hydrogen()->HasMigrationTarget()) {
5101 __ bne(deferred->entry());
5102 } else {
5103 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
5104 }
5105
5106 __ bind(&success);
5107 }
5108
5109
5110 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5111 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5112 Register result_reg = ToRegister(instr->result());
5113 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5114 }
5115
5116
5117 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5118 Register unclamped_reg = ToRegister(instr->unclamped());
5119 Register result_reg = ToRegister(instr->result());
5120 __ ClampUint8(result_reg, unclamped_reg);
5121 }
5122
5123
5124 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5125 Register scratch = scratch0();
5126 Register input_reg = ToRegister(instr->unclamped());
5127 Register result_reg = ToRegister(instr->result());
5128 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5129 Label is_smi, done, heap_number;
5130
5131 // Both smi and heap number cases are handled.
5132 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5133
5134 // Check for heap number
5135 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5136 __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
5137 __ beq(&heap_number);
5138
5139 // Check for undefined. Undefined is converted to zero for clamping
5140 // conversions.
5141 __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
5142 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
5143 __ li(result_reg, Operand::Zero());
5144 __ b(&done);
5145
5146 // Heap number
5147 __ bind(&heap_number);
5148 __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5149 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5150 __ b(&done);
5151
5152 // smi
5153 __ bind(&is_smi);
5154 __ ClampUint8(result_reg, result_reg);
5155
5156 __ bind(&done);
5157 }
5158
5159
5160 void LCodeGen::DoAllocate(LAllocate* instr) {
5161 class DeferredAllocate final : public LDeferredCode {
5162 public:
5163 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5164 : LDeferredCode(codegen), instr_(instr) { }
5165 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5166 LInstruction* instr() override { return instr_; }
5167
5168 private:
5169 LAllocate* instr_;
5170 };
5171
5172 DeferredAllocate* deferred =
5173 new(zone()) DeferredAllocate(this, instr);
5174
5175 Register result = ToRegister(instr->result());
5176 Register scratch = ToRegister(instr->temp1());
5177 Register scratch2 = ToRegister(instr->temp2());
5178
5179 // Allocate memory for the object.
5180 AllocationFlags flags = NO_ALLOCATION_FLAGS;
5181 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5182 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5183 }
5184 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5185 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5186 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5187 }
5188
5189 if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5190 flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
5191 }
5192
5193 DCHECK(!instr->hydrogen()->IsAllocationFolded());
5194
5195 if (instr->size()->IsConstantOperand()) {
5196 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5197 CHECK(size <= kMaxRegularHeapObjectSize);
5198 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5199 } else {
5200 Register size = ToRegister(instr->size());
5201 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5202 }
5203
5204 __ bind(deferred->exit());
5205
5206 if (instr->hydrogen()->MustPrefillWithFiller()) {
5207 if (instr->size()->IsConstantOperand()) {
5208 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5209 __ LoadIntLiteral(scratch, size - kHeapObjectTag);
5210 } else {
5211 __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5212 }
5213 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5214 Label loop;
5215 __ bind(&loop);
5216 __ subi(scratch, scratch, Operand(kPointerSize));
5217 __ StorePX(scratch2, MemOperand(result, scratch));
5218 __ cmpi(scratch, Operand::Zero());
5219 __ bge(&loop);
5220 }
5221 }
5222
5223
5224 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5225 Register result = ToRegister(instr->result());
5226
5227 // TODO(3095996): Get rid of this. For now, we need to make the
5228 // result register contain a valid pointer because it is already
5229 // contained in the register pointer map.
5230 __ LoadSmiLiteral(result, Smi::kZero);
5231
5232 PushSafepointRegistersScope scope(this);
5233 if (instr->size()->IsRegister()) {
5234 Register size = ToRegister(instr->size());
5235 DCHECK(!size.is(result));
5236 __ SmiTag(size);
5237 __ push(size);
5238 } else {
5239 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5240 #if !V8_TARGET_ARCH_PPC64
5241 if (size >= 0 && size <= Smi::kMaxValue) {
5242 #endif
5243 __ Push(Smi::FromInt(size));
5244 #if !V8_TARGET_ARCH_PPC64
5245 } else {
5246 // We should never get here at runtime => abort
5247 __ stop("invalid allocation size");
5248 return;
5249 }
5250 #endif
5251 }
5252
5253 int flags = AllocateDoubleAlignFlag::encode(
5254 instr->hydrogen()->MustAllocateDoubleAligned());
5255 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5256 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5257 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5258 } else {
5259 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5260 }
5261 __ Push(Smi::FromInt(flags));
5262
5263 CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
5264 instr->context());
5265 __ StoreToSafepointRegisterSlot(r3, result);
5266
5267 if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5268 AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
5269 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5270 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5271 allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
5272 }
5273 // If the allocation folding dominator allocate triggered a GC, allocation
5274 // happend in the runtime. We have to reset the top pointer to virtually
5275 // undo the allocation.
5276 ExternalReference allocation_top =
5277 AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
5278 Register top_address = scratch0();
5279 __ subi(r3, r3, Operand(kHeapObjectTag));
5280 __ mov(top_address, Operand(allocation_top));
5281 __ StoreP(r3, MemOperand(top_address));
5282 __ addi(r3, r3, Operand(kHeapObjectTag));
5283 }
5284 }
5285
5286 void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
5287 DCHECK(instr->hydrogen()->IsAllocationFolded());
5288 DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
5289 Register result = ToRegister(instr->result());
5290 Register scratch1 = ToRegister(instr->temp1());
5291 Register scratch2 = ToRegister(instr->temp2());
5292
5293 AllocationFlags flags = ALLOCATION_FOLDED;
5294 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5295 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5296 }
5297 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5298 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5299 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5300 }
5301 if (instr->size()->IsConstantOperand()) {
5302 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5303 CHECK(size <= kMaxRegularHeapObjectSize);
5304 __ FastAllocate(size, result, scratch1, scratch2, flags);
5305 } else {
5306 Register size = ToRegister(instr->size());
5307 __ FastAllocate(size, result, scratch1, scratch2, flags);
5308 }
5309 }
5310
5311
5312 void LCodeGen::DoTypeof(LTypeof* instr) {
5313 DCHECK(ToRegister(instr->value()).is(r6));
5314 DCHECK(ToRegister(instr->result()).is(r3));
5315 Label end, do_call;
5316 Register value_register = ToRegister(instr->value());
5317 __ JumpIfNotSmi(value_register, &do_call);
5318 __ mov(r3, Operand(isolate()->factory()->number_string()));
5319 __ b(&end);
5320 __ bind(&do_call);
5321 Callable callable = CodeFactory::Typeof(isolate());
5322 CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
5323 __ bind(&end);
5324 }
5325
5326
5327 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5328 Register input = ToRegister(instr->value());
5329
5330 Condition final_branch_condition =
5331 EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
5332 instr->type_literal());
5333 if (final_branch_condition != kNoCondition) {
5334 EmitBranch(instr, final_branch_condition);
5335 }
5336 }
5337
5338
5339 Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
5340 Register input, Handle<String> type_name) {
5341 Condition final_branch_condition = kNoCondition;
5342 Register scratch = scratch0();
5343 Factory* factory = isolate()->factory();
5344 if (String::Equals(type_name, factory->number_string())) {
5345 __ JumpIfSmi(input, true_label);
5346 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5347 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5348 final_branch_condition = eq;
5349
5350 } else if (String::Equals(type_name, factory->string_string())) {
5351 __ JumpIfSmi(input, false_label);
5352 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5353 final_branch_condition = lt;
5354
5355 } else if (String::Equals(type_name, factory->symbol_string())) {
5356 __ JumpIfSmi(input, false_label);
5357 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5358 final_branch_condition = eq;
5359
5360 } else if (String::Equals(type_name, factory->boolean_string())) {
5361 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5362 __ beq(true_label);
5363 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5364 final_branch_condition = eq;
5365
5366 } else if (String::Equals(type_name, factory->undefined_string())) {
5367 __ CompareRoot(input, Heap::kNullValueRootIndex);
5368 __ beq(false_label);
5369 __ JumpIfSmi(input, false_label);
5370 // Check for undetectable objects => true.
5371 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5372 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5373 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5374 __ cmpi(r0, Operand::Zero());
5375 final_branch_condition = ne;
5376
5377 } else if (String::Equals(type_name, factory->function_string())) {
5378 __ JumpIfSmi(input, false_label);
5379 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5380 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5381 __ andi(scratch, scratch,
5382 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5383 __ cmpi(scratch, Operand(1 << Map::kIsCallable));
5384 final_branch_condition = eq;
5385
5386 } else if (String::Equals(type_name, factory->object_string())) {
5387 __ JumpIfSmi(input, false_label);
5388 __ CompareRoot(input, Heap::kNullValueRootIndex);
5389 __ beq(true_label);
5390 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5391 __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
5392 __ blt(false_label);
5393 // Check for callable or undetectable objects => false.
5394 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5395 __ andi(r0, scratch,
5396 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5397 __ cmpi(r0, Operand::Zero());
5398 final_branch_condition = eq;
5399
5400 // clang-format off
5401 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5402 } else if (String::Equals(type_name, factory->type##_string())) { \
5403 __ JumpIfSmi(input, false_label); \
5404 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
5405 __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \
5406 final_branch_condition = eq;
5407 SIMD128_TYPES(SIMD128_TYPE)
5408 #undef SIMD128_TYPE
5409 // clang-format on
5410
5411 } else {
5412 __ b(false_label);
5413 }
5414
5415 return final_branch_condition;
5416 }
5417
5418
5419 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5420 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5421 // Ensure that we have enough space after the previous lazy-bailout
5422 // instruction for patching the code here.
5423 int current_pc = masm()->pc_offset();
5424 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5425 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5426 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5427 while (padding_size > 0) {
5428 __ nop();
5429 padding_size -= Assembler::kInstrSize;
5430 }
5431 }
5432 }
5433 last_lazy_deopt_pc_ = masm()->pc_offset();
5434 }
5435
5436
5437 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5438 last_lazy_deopt_pc_ = masm()->pc_offset();
5439 DCHECK(instr->HasEnvironment());
5440 LEnvironment* env = instr->environment();
5441 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5442 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5443 }
5444
5445
5446 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5447 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5448 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5449 // needed return address), even though the implementation of LAZY and EAGER is
5450 // now identical. When LAZY is eventually completely folded into EAGER, remove
5451 // the special case below.
5452 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5453 type = Deoptimizer::LAZY;
5454 }
5455
5456 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
5457 }
5458
5459
5460 void LCodeGen::DoDummy(LDummy* instr) {
5461 // Nothing to see here, move on!
5462 }
5463
5464
5465 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5466 // Nothing to see here, move on!
5467 }
5468
5469
5470 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5471 PushSafepointRegistersScope scope(this);
5472 LoadContextFromDeferred(instr->context());
5473 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5474 RecordSafepointWithLazyDeopt(
5475 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5476 DCHECK(instr->HasEnvironment());
5477 LEnvironment* env = instr->environment();
5478 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5479 }
5480
5481
5482 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5483 class DeferredStackCheck final : public LDeferredCode {
5484 public:
5485 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5486 : LDeferredCode(codegen), instr_(instr) {}
5487 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5488 LInstruction* instr() override { return instr_; }
5489
5490 private:
5491 LStackCheck* instr_;
5492 };
5493
5494 DCHECK(instr->HasEnvironment());
5495 LEnvironment* env = instr->environment();
5496 // There is no LLazyBailout instruction for stack-checks. We have to
5497 // prepare for lazy deoptimization explicitly here.
5498 if (instr->hydrogen()->is_function_entry()) {
5499 // Perform stack overflow check.
5500 Label done;
5501 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5502 __ cmpl(sp, ip);
5503 __ bge(&done);
5504 DCHECK(instr->context()->IsRegister());
5505 DCHECK(ToRegister(instr->context()).is(cp));
5506 CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
5507 instr);
5508 __ bind(&done);
5509 } else {
5510 DCHECK(instr->hydrogen()->is_backwards_branch());
5511 // Perform stack overflow check if this goto needs it before jumping.
5512 DeferredStackCheck* deferred_stack_check =
5513 new (zone()) DeferredStackCheck(this, instr);
5514 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5515 __ cmpl(sp, ip);
5516 __ blt(deferred_stack_check->entry());
5517 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5518 __ bind(instr->done_label());
5519 deferred_stack_check->SetExit(instr->done_label());
5520 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5521 // Don't record a deoptimization index for the safepoint here.
5522 // This will be done explicitly when emitting call and the safepoint in
5523 // the deferred code.
5524 }
5525 }
5526
5527
5528 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5529 // This is a pseudo-instruction that ensures that the environment here is
5530 // properly registered for deoptimization and records the assembler's PC
5531 // offset.
5532 LEnvironment* environment = instr->environment();
5533
5534 // If the environment were already registered, we would have no way of
5535 // backpatching it with the spill slot operands.
5536 DCHECK(!environment->HasBeenRegistered());
5537 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5538
5539 GenerateOsrPrologue();
5540 }
5541
5542
5543 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5544 Label use_cache, call_runtime;
5545 __ CheckEnumCache(&call_runtime);
5546
5547 __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
5548 __ b(&use_cache);
5549
5550 // Get the set of properties to enumerate.
5551 __ bind(&call_runtime);
5552 __ push(r3);
5553 CallRuntime(Runtime::kForInEnumerate, instr);
5554 __ bind(&use_cache);
5555 }
5556
5557
5558 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5559 Register map = ToRegister(instr->map());
5560 Register result = ToRegister(instr->result());
5561 Label load_cache, done;
5562 __ EnumLength(result, map);
5563 __ CmpSmiLiteral(result, Smi::kZero, r0);
5564 __ bne(&load_cache);
5565 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5566 __ b(&done);
5567
5568 __ bind(&load_cache);
5569 __ LoadInstanceDescriptors(map, result);
5570 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5571 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5572 __ cmpi(result, Operand::Zero());
5573 DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
5574
5575 __ bind(&done);
5576 }
5577
5578
5579 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5580 Register object = ToRegister(instr->value());
5581 Register map = ToRegister(instr->map());
5582 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5583 __ cmp(map, scratch0());
5584 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
5585 }
5586
5587
5588 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5589 Register result, Register object,
5590 Register index) {
5591 PushSafepointRegistersScope scope(this);
5592 __ Push(object, index);
5593 __ li(cp, Operand::Zero());
5594 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5595 RecordSafepointWithRegisters(instr->pointer_map(), 2,
5596 Safepoint::kNoLazyDeopt);
5597 __ StoreToSafepointRegisterSlot(r3, result);
5598 }
5599
5600
5601 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5602 class DeferredLoadMutableDouble final : public LDeferredCode {
5603 public:
5604 DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
5605 Register result, Register object, Register index)
5606 : LDeferredCode(codegen),
5607 instr_(instr),
5608 result_(result),
5609 object_(object),
5610 index_(index) {}
5611 void Generate() override {
5612 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5613 }
5614 LInstruction* instr() override { return instr_; }
5615
5616 private:
5617 LLoadFieldByIndex* instr_;
5618 Register result_;
5619 Register object_;
5620 Register index_;
5621 };
5622
5623 Register object = ToRegister(instr->object());
5624 Register index = ToRegister(instr->index());
5625 Register result = ToRegister(instr->result());
5626 Register scratch = scratch0();
5627
5628 DeferredLoadMutableDouble* deferred;
5629 deferred = new (zone())
5630 DeferredLoadMutableDouble(this, instr, result, object, index);
5631
5632 Label out_of_object, done;
5633
5634 __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
5635 __ bne(deferred->entry(), cr0);
5636 __ ShiftRightArithImm(index, index, 1);
5637
5638 __ cmpi(index, Operand::Zero());
5639 __ blt(&out_of_object);
5640
5641 __ SmiToPtrArrayOffset(r0, index);
5642 __ add(scratch, object, r0);
5643 __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5644
5645 __ b(&done);
5646
5647 __ bind(&out_of_object);
5648 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5649 // Index is equal to negated out of object property index plus 1.
5650 __ SmiToPtrArrayOffset(r0, index);
5651 __ sub(scratch, result, r0);
5652 __ LoadP(result,
5653 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
5654 __ bind(deferred->exit());
5655 __ bind(&done);
5656 }
5657
5658 #undef __
5659
5660 } // namespace internal
5661 } // namespace v8
5662