1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/instruction-selector.h"
6
7 #include <limits>
8
9 #include "src/base/adapters.h"
10 #include "src/compiler/compiler-source-position-table.h"
11 #include "src/compiler/instruction-selector-impl.h"
12 #include "src/compiler/node-matchers.h"
13 #include "src/compiler/pipeline.h"
14 #include "src/compiler/schedule.h"
15 #include "src/compiler/state-values-utils.h"
16 #include "src/deoptimizer.h"
17
18 namespace v8 {
19 namespace internal {
20 namespace compiler {
21
InstructionSelector(Zone * zone,size_t node_count,Linkage * linkage,InstructionSequence * sequence,Schedule * schedule,SourcePositionTable * source_positions,Frame * frame,SourcePositionMode source_position_mode,Features features,EnableScheduling enable_scheduling,EnableSerialization enable_serialization)22 InstructionSelector::InstructionSelector(
23 Zone* zone, size_t node_count, Linkage* linkage,
24 InstructionSequence* sequence, Schedule* schedule,
25 SourcePositionTable* source_positions, Frame* frame,
26 SourcePositionMode source_position_mode, Features features,
27 EnableScheduling enable_scheduling,
28 EnableSerialization enable_serialization)
29 : zone_(zone),
30 linkage_(linkage),
31 sequence_(sequence),
32 source_positions_(source_positions),
33 source_position_mode_(source_position_mode),
34 features_(features),
35 schedule_(schedule),
36 current_block_(nullptr),
37 instructions_(zone),
38 defined_(node_count, false, zone),
39 used_(node_count, false, zone),
40 effect_level_(node_count, 0, zone),
41 virtual_registers_(node_count,
42 InstructionOperand::kInvalidVirtualRegister, zone),
43 virtual_register_rename_(zone),
44 scheduler_(nullptr),
45 enable_scheduling_(enable_scheduling),
46 enable_serialization_(enable_serialization),
47 frame_(frame),
48 instruction_selection_failed_(false) {
49 instructions_.reserve(node_count);
50 }
51
SelectInstructions()52 bool InstructionSelector::SelectInstructions() {
53 // Mark the inputs of all phis in loop headers as used.
54 BasicBlockVector* blocks = schedule()->rpo_order();
55 for (auto const block : *blocks) {
56 if (!block->IsLoopHeader()) continue;
57 DCHECK_LE(2u, block->PredecessorCount());
58 for (Node* const phi : *block) {
59 if (phi->opcode() != IrOpcode::kPhi) continue;
60
61 // Mark all inputs as used.
62 for (Node* const input : phi->inputs()) {
63 MarkAsUsed(input);
64 }
65 }
66 }
67
68 // Visit each basic block in post order.
69 for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
70 VisitBlock(*i);
71 if (instruction_selection_failed()) return false;
72 }
73
74 // Schedule the selected instructions.
75 if (UseInstructionScheduling()) {
76 scheduler_ = new (zone()) InstructionScheduler(zone(), sequence());
77 }
78
79 for (auto const block : *blocks) {
80 InstructionBlock* instruction_block =
81 sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
82 for (size_t i = 0; i < instruction_block->phis().size(); i++) {
83 UpdateRenamesInPhi(instruction_block->PhiAt(i));
84 }
85 size_t end = instruction_block->code_end();
86 size_t start = instruction_block->code_start();
87 DCHECK_LE(end, start);
88 StartBlock(RpoNumber::FromInt(block->rpo_number()));
89 while (start-- > end) {
90 UpdateRenames(instructions_[start]);
91 AddInstruction(instructions_[start]);
92 }
93 EndBlock(RpoNumber::FromInt(block->rpo_number()));
94 }
95 #if DEBUG
96 sequence()->ValidateSSA();
97 #endif
98 return true;
99 }
100
StartBlock(RpoNumber rpo)101 void InstructionSelector::StartBlock(RpoNumber rpo) {
102 if (UseInstructionScheduling()) {
103 DCHECK_NOT_NULL(scheduler_);
104 scheduler_->StartBlock(rpo);
105 } else {
106 sequence()->StartBlock(rpo);
107 }
108 }
109
110
EndBlock(RpoNumber rpo)111 void InstructionSelector::EndBlock(RpoNumber rpo) {
112 if (UseInstructionScheduling()) {
113 DCHECK_NOT_NULL(scheduler_);
114 scheduler_->EndBlock(rpo);
115 } else {
116 sequence()->EndBlock(rpo);
117 }
118 }
119
120
AddInstruction(Instruction * instr)121 void InstructionSelector::AddInstruction(Instruction* instr) {
122 if (UseInstructionScheduling()) {
123 DCHECK_NOT_NULL(scheduler_);
124 scheduler_->AddInstruction(instr);
125 } else {
126 sequence()->AddInstruction(instr);
127 }
128 }
129
130
Emit(InstructionCode opcode,InstructionOperand output,size_t temp_count,InstructionOperand * temps)131 Instruction* InstructionSelector::Emit(InstructionCode opcode,
132 InstructionOperand output,
133 size_t temp_count,
134 InstructionOperand* temps) {
135 size_t output_count = output.IsInvalid() ? 0 : 1;
136 return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps);
137 }
138
139
Emit(InstructionCode opcode,InstructionOperand output,InstructionOperand a,size_t temp_count,InstructionOperand * temps)140 Instruction* InstructionSelector::Emit(InstructionCode opcode,
141 InstructionOperand output,
142 InstructionOperand a, size_t temp_count,
143 InstructionOperand* temps) {
144 size_t output_count = output.IsInvalid() ? 0 : 1;
145 return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
146 }
147
148
Emit(InstructionCode opcode,InstructionOperand output,InstructionOperand a,InstructionOperand b,size_t temp_count,InstructionOperand * temps)149 Instruction* InstructionSelector::Emit(InstructionCode opcode,
150 InstructionOperand output,
151 InstructionOperand a,
152 InstructionOperand b, size_t temp_count,
153 InstructionOperand* temps) {
154 size_t output_count = output.IsInvalid() ? 0 : 1;
155 InstructionOperand inputs[] = {a, b};
156 size_t input_count = arraysize(inputs);
157 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
158 temps);
159 }
160
161
Emit(InstructionCode opcode,InstructionOperand output,InstructionOperand a,InstructionOperand b,InstructionOperand c,size_t temp_count,InstructionOperand * temps)162 Instruction* InstructionSelector::Emit(InstructionCode opcode,
163 InstructionOperand output,
164 InstructionOperand a,
165 InstructionOperand b,
166 InstructionOperand c, size_t temp_count,
167 InstructionOperand* temps) {
168 size_t output_count = output.IsInvalid() ? 0 : 1;
169 InstructionOperand inputs[] = {a, b, c};
170 size_t input_count = arraysize(inputs);
171 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
172 temps);
173 }
174
175
Emit(InstructionCode opcode,InstructionOperand output,InstructionOperand a,InstructionOperand b,InstructionOperand c,InstructionOperand d,size_t temp_count,InstructionOperand * temps)176 Instruction* InstructionSelector::Emit(
177 InstructionCode opcode, InstructionOperand output, InstructionOperand a,
178 InstructionOperand b, InstructionOperand c, InstructionOperand d,
179 size_t temp_count, InstructionOperand* temps) {
180 size_t output_count = output.IsInvalid() ? 0 : 1;
181 InstructionOperand inputs[] = {a, b, c, d};
182 size_t input_count = arraysize(inputs);
183 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
184 temps);
185 }
186
187
Emit(InstructionCode opcode,InstructionOperand output,InstructionOperand a,InstructionOperand b,InstructionOperand c,InstructionOperand d,InstructionOperand e,size_t temp_count,InstructionOperand * temps)188 Instruction* InstructionSelector::Emit(
189 InstructionCode opcode, InstructionOperand output, InstructionOperand a,
190 InstructionOperand b, InstructionOperand c, InstructionOperand d,
191 InstructionOperand e, size_t temp_count, InstructionOperand* temps) {
192 size_t output_count = output.IsInvalid() ? 0 : 1;
193 InstructionOperand inputs[] = {a, b, c, d, e};
194 size_t input_count = arraysize(inputs);
195 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
196 temps);
197 }
198
199
Emit(InstructionCode opcode,InstructionOperand output,InstructionOperand a,InstructionOperand b,InstructionOperand c,InstructionOperand d,InstructionOperand e,InstructionOperand f,size_t temp_count,InstructionOperand * temps)200 Instruction* InstructionSelector::Emit(
201 InstructionCode opcode, InstructionOperand output, InstructionOperand a,
202 InstructionOperand b, InstructionOperand c, InstructionOperand d,
203 InstructionOperand e, InstructionOperand f, size_t temp_count,
204 InstructionOperand* temps) {
205 size_t output_count = output.IsInvalid() ? 0 : 1;
206 InstructionOperand inputs[] = {a, b, c, d, e, f};
207 size_t input_count = arraysize(inputs);
208 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
209 temps);
210 }
211
212
Emit(InstructionCode opcode,size_t output_count,InstructionOperand * outputs,size_t input_count,InstructionOperand * inputs,size_t temp_count,InstructionOperand * temps)213 Instruction* InstructionSelector::Emit(
214 InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
215 size_t input_count, InstructionOperand* inputs, size_t temp_count,
216 InstructionOperand* temps) {
217 if (output_count >= Instruction::kMaxOutputCount ||
218 input_count >= Instruction::kMaxInputCount ||
219 temp_count >= Instruction::kMaxTempCount) {
220 set_instruction_selection_failed();
221 return nullptr;
222 }
223
224 Instruction* instr =
225 Instruction::New(instruction_zone(), opcode, output_count, outputs,
226 input_count, inputs, temp_count, temps);
227 return Emit(instr);
228 }
229
230
Emit(Instruction * instr)231 Instruction* InstructionSelector::Emit(Instruction* instr) {
232 instructions_.push_back(instr);
233 return instr;
234 }
235
236
CanCover(Node * user,Node * node) const237 bool InstructionSelector::CanCover(Node* user, Node* node) const {
238 // 1. Both {user} and {node} must be in the same basic block.
239 if (schedule()->block(node) != schedule()->block(user)) {
240 return false;
241 }
242 // 2. Pure {node}s must be owned by the {user}.
243 if (node->op()->HasProperty(Operator::kPure)) {
244 return node->OwnedBy(user);
245 }
246 // 3. Impure {node}s must match the effect level of {user}.
247 if (GetEffectLevel(node) != GetEffectLevel(user)) {
248 return false;
249 }
250 // 4. Only {node} must have value edges pointing to {user}.
251 for (Edge const edge : node->use_edges()) {
252 if (edge.from() != user && NodeProperties::IsValueEdge(edge)) {
253 return false;
254 }
255 }
256 return true;
257 }
258
IsOnlyUserOfNodeInSameBlock(Node * user,Node * node) const259 bool InstructionSelector::IsOnlyUserOfNodeInSameBlock(Node* user,
260 Node* node) const {
261 BasicBlock* bb_user = schedule()->block(user);
262 BasicBlock* bb_node = schedule()->block(node);
263 if (bb_user != bb_node) return false;
264 for (Edge const edge : node->use_edges()) {
265 Node* from = edge.from();
266 if ((from != user) && (schedule()->block(from) == bb_user)) {
267 return false;
268 }
269 }
270 return true;
271 }
272
UpdateRenames(Instruction * instruction)273 void InstructionSelector::UpdateRenames(Instruction* instruction) {
274 for (size_t i = 0; i < instruction->InputCount(); i++) {
275 TryRename(instruction->InputAt(i));
276 }
277 }
278
UpdateRenamesInPhi(PhiInstruction * phi)279 void InstructionSelector::UpdateRenamesInPhi(PhiInstruction* phi) {
280 for (size_t i = 0; i < phi->operands().size(); i++) {
281 int vreg = phi->operands()[i];
282 int renamed = GetRename(vreg);
283 if (vreg != renamed) {
284 phi->RenameInput(i, renamed);
285 }
286 }
287 }
288
GetRename(int virtual_register)289 int InstructionSelector::GetRename(int virtual_register) {
290 int rename = virtual_register;
291 while (true) {
292 if (static_cast<size_t>(rename) >= virtual_register_rename_.size()) break;
293 int next = virtual_register_rename_[rename];
294 if (next == InstructionOperand::kInvalidVirtualRegister) {
295 break;
296 }
297 rename = next;
298 }
299 return rename;
300 }
301
TryRename(InstructionOperand * op)302 void InstructionSelector::TryRename(InstructionOperand* op) {
303 if (!op->IsUnallocated()) return;
304 int vreg = UnallocatedOperand::cast(op)->virtual_register();
305 int rename = GetRename(vreg);
306 if (rename != vreg) {
307 UnallocatedOperand::cast(op)->set_virtual_register(rename);
308 }
309 }
310
SetRename(const Node * node,const Node * rename)311 void InstructionSelector::SetRename(const Node* node, const Node* rename) {
312 int vreg = GetVirtualRegister(node);
313 if (static_cast<size_t>(vreg) >= virtual_register_rename_.size()) {
314 int invalid = InstructionOperand::kInvalidVirtualRegister;
315 virtual_register_rename_.resize(vreg + 1, invalid);
316 }
317 virtual_register_rename_[vreg] = GetVirtualRegister(rename);
318 }
319
GetVirtualRegister(const Node * node)320 int InstructionSelector::GetVirtualRegister(const Node* node) {
321 DCHECK_NOT_NULL(node);
322 size_t const id = node->id();
323 DCHECK_LT(id, virtual_registers_.size());
324 int virtual_register = virtual_registers_[id];
325 if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
326 virtual_register = sequence()->NextVirtualRegister();
327 virtual_registers_[id] = virtual_register;
328 }
329 return virtual_register;
330 }
331
332
GetVirtualRegistersForTesting() const333 const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
334 const {
335 std::map<NodeId, int> virtual_registers;
336 for (size_t n = 0; n < virtual_registers_.size(); ++n) {
337 if (virtual_registers_[n] != InstructionOperand::kInvalidVirtualRegister) {
338 NodeId const id = static_cast<NodeId>(n);
339 virtual_registers.insert(std::make_pair(id, virtual_registers_[n]));
340 }
341 }
342 return virtual_registers;
343 }
344
345
IsDefined(Node * node) const346 bool InstructionSelector::IsDefined(Node* node) const {
347 DCHECK_NOT_NULL(node);
348 size_t const id = node->id();
349 DCHECK_LT(id, defined_.size());
350 return defined_[id];
351 }
352
353
MarkAsDefined(Node * node)354 void InstructionSelector::MarkAsDefined(Node* node) {
355 DCHECK_NOT_NULL(node);
356 size_t const id = node->id();
357 DCHECK_LT(id, defined_.size());
358 defined_[id] = true;
359 }
360
361
IsUsed(Node * node) const362 bool InstructionSelector::IsUsed(Node* node) const {
363 DCHECK_NOT_NULL(node);
364 // TODO(bmeurer): This is a terrible monster hack, but we have to make sure
365 // that the Retain is actually emitted, otherwise the GC will mess up.
366 if (node->opcode() == IrOpcode::kRetain) return true;
367 if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
368 size_t const id = node->id();
369 DCHECK_LT(id, used_.size());
370 return used_[id];
371 }
372
373
MarkAsUsed(Node * node)374 void InstructionSelector::MarkAsUsed(Node* node) {
375 DCHECK_NOT_NULL(node);
376 size_t const id = node->id();
377 DCHECK_LT(id, used_.size());
378 used_[id] = true;
379 }
380
GetEffectLevel(Node * node) const381 int InstructionSelector::GetEffectLevel(Node* node) const {
382 DCHECK_NOT_NULL(node);
383 size_t const id = node->id();
384 DCHECK_LT(id, effect_level_.size());
385 return effect_level_[id];
386 }
387
SetEffectLevel(Node * node,int effect_level)388 void InstructionSelector::SetEffectLevel(Node* node, int effect_level) {
389 DCHECK_NOT_NULL(node);
390 size_t const id = node->id();
391 DCHECK_LT(id, effect_level_.size());
392 effect_level_[id] = effect_level;
393 }
394
CanAddressRelativeToRootsRegister() const395 bool InstructionSelector::CanAddressRelativeToRootsRegister() const {
396 return enable_serialization_ == kDisableSerialization &&
397 CanUseRootsRegister();
398 }
399
CanUseRootsRegister() const400 bool InstructionSelector::CanUseRootsRegister() const {
401 return linkage()->GetIncomingDescriptor()->flags() &
402 CallDescriptor::kCanUseRoots;
403 }
404
MarkAsRepresentation(MachineRepresentation rep,const InstructionOperand & op)405 void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
406 const InstructionOperand& op) {
407 UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
408 sequence()->MarkAsRepresentation(rep, unalloc.virtual_register());
409 }
410
411
MarkAsRepresentation(MachineRepresentation rep,Node * node)412 void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
413 Node* node) {
414 sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
415 }
416
417
418 namespace {
419
420 enum class FrameStateInputKind { kAny, kStackSlot };
421
OperandForDeopt(OperandGenerator * g,Node * input,FrameStateInputKind kind,MachineRepresentation rep)422 InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
423 FrameStateInputKind kind,
424 MachineRepresentation rep) {
425 if (rep == MachineRepresentation::kNone) {
426 return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
427 }
428
429 switch (input->opcode()) {
430 case IrOpcode::kInt32Constant:
431 case IrOpcode::kInt64Constant:
432 case IrOpcode::kNumberConstant:
433 case IrOpcode::kFloat32Constant:
434 case IrOpcode::kFloat64Constant:
435 case IrOpcode::kHeapConstant:
436 return g->UseImmediate(input);
437 case IrOpcode::kObjectState:
438 case IrOpcode::kTypedObjectState:
439 UNREACHABLE();
440 break;
441 default:
442 switch (kind) {
443 case FrameStateInputKind::kStackSlot:
444 return g->UseUniqueSlot(input);
445 case FrameStateInputKind::kAny:
446 // Currently deopts "wrap" other operations, so the deopt's inputs
447 // are potentially needed untill the end of the deoptimising code.
448 return g->UseAnyAtEnd(input);
449 }
450 }
451 UNREACHABLE();
452 return InstructionOperand();
453 }
454
455
456 class StateObjectDeduplicator {
457 public:
StateObjectDeduplicator(Zone * zone)458 explicit StateObjectDeduplicator(Zone* zone) : objects_(zone) {}
459 static const size_t kNotDuplicated = SIZE_MAX;
460
GetObjectId(Node * node)461 size_t GetObjectId(Node* node) {
462 for (size_t i = 0; i < objects_.size(); ++i) {
463 if (objects_[i] == node) {
464 return i;
465 }
466 }
467 return kNotDuplicated;
468 }
469
InsertObject(Node * node)470 size_t InsertObject(Node* node) {
471 size_t id = objects_.size();
472 objects_.push_back(node);
473 return id;
474 }
475
476 private:
477 ZoneVector<Node*> objects_;
478 };
479
480
481 // Returns the number of instruction operands added to inputs.
AddOperandToStateValueDescriptor(StateValueDescriptor * descriptor,InstructionOperandVector * inputs,OperandGenerator * g,StateObjectDeduplicator * deduplicator,Node * input,MachineType type,FrameStateInputKind kind,Zone * zone)482 size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
483 InstructionOperandVector* inputs,
484 OperandGenerator* g,
485 StateObjectDeduplicator* deduplicator,
486 Node* input, MachineType type,
487 FrameStateInputKind kind, Zone* zone) {
488 switch (input->opcode()) {
489 case IrOpcode::kObjectState: {
490 UNREACHABLE();
491 return 0;
492 }
493 case IrOpcode::kTypedObjectState: {
494 size_t id = deduplicator->GetObjectId(input);
495 if (id == StateObjectDeduplicator::kNotDuplicated) {
496 size_t entries = 0;
497 id = deduplicator->InsertObject(input);
498 descriptor->fields().push_back(
499 StateValueDescriptor::Recursive(zone, id));
500 StateValueDescriptor* new_desc = &descriptor->fields().back();
501 int const input_count = input->op()->ValueInputCount();
502 ZoneVector<MachineType> const* types = MachineTypesOf(input->op());
503 for (int i = 0; i < input_count; ++i) {
504 entries += AddOperandToStateValueDescriptor(
505 new_desc, inputs, g, deduplicator, input->InputAt(i),
506 types->at(i), kind, zone);
507 }
508 return entries;
509 } else {
510 // Crankshaft counts duplicate objects for the running id, so we have
511 // to push the input again.
512 deduplicator->InsertObject(input);
513 descriptor->fields().push_back(
514 StateValueDescriptor::Duplicate(zone, id));
515 return 0;
516 }
517 }
518 default: {
519 inputs->push_back(OperandForDeopt(g, input, kind, type.representation()));
520 descriptor->fields().push_back(StateValueDescriptor::Plain(zone, type));
521 return 1;
522 }
523 }
524 }
525
526
527 // Returns the number of instruction operands added to inputs.
AddInputsToFrameStateDescriptor(FrameStateDescriptor * descriptor,Node * state,OperandGenerator * g,StateObjectDeduplicator * deduplicator,InstructionOperandVector * inputs,FrameStateInputKind kind,Zone * zone)528 size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
529 Node* state, OperandGenerator* g,
530 StateObjectDeduplicator* deduplicator,
531 InstructionOperandVector* inputs,
532 FrameStateInputKind kind, Zone* zone) {
533 DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
534
535 size_t entries = 0;
536 size_t initial_size = inputs->size();
537 USE(initial_size); // initial_size is only used for debug.
538
539 if (descriptor->outer_state()) {
540 entries += AddInputsToFrameStateDescriptor(
541 descriptor->outer_state(), state->InputAt(kFrameStateOuterStateInput),
542 g, deduplicator, inputs, kind, zone);
543 }
544
545 Node* parameters = state->InputAt(kFrameStateParametersInput);
546 Node* locals = state->InputAt(kFrameStateLocalsInput);
547 Node* stack = state->InputAt(kFrameStateStackInput);
548 Node* context = state->InputAt(kFrameStateContextInput);
549 Node* function = state->InputAt(kFrameStateFunctionInput);
550
551 DCHECK_EQ(descriptor->parameters_count(),
552 StateValuesAccess(parameters).size());
553 DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
554 DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
555
556 StateValueDescriptor* values_descriptor =
557 descriptor->GetStateValueDescriptor();
558 entries += AddOperandToStateValueDescriptor(
559 values_descriptor, inputs, g, deduplicator, function,
560 MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
561 for (StateValuesAccess::TypedNode input_node :
562 StateValuesAccess(parameters)) {
563 entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
564 deduplicator, input_node.node,
565 input_node.type, kind, zone);
566 }
567 if (descriptor->HasContext()) {
568 entries += AddOperandToStateValueDescriptor(
569 values_descriptor, inputs, g, deduplicator, context,
570 MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
571 }
572 for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
573 entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
574 deduplicator, input_node.node,
575 input_node.type, kind, zone);
576 }
577 for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
578 entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
579 deduplicator, input_node.node,
580 input_node.type, kind, zone);
581 }
582 DCHECK_EQ(initial_size + entries, inputs->size());
583 return entries;
584 }
585
586 } // namespace
587
588
589 // An internal helper class for generating the operands to calls.
590 // TODO(bmeurer): Get rid of the CallBuffer business and make
591 // InstructionSelector::VisitCall platform independent instead.
592 struct CallBuffer {
CallBufferv8::internal::compiler::CallBuffer593 CallBuffer(Zone* zone, const CallDescriptor* descriptor,
594 FrameStateDescriptor* frame_state)
595 : descriptor(descriptor),
596 frame_state_descriptor(frame_state),
597 output_nodes(zone),
598 outputs(zone),
599 instruction_args(zone),
600 pushed_nodes(zone) {
601 output_nodes.reserve(descriptor->ReturnCount());
602 outputs.reserve(descriptor->ReturnCount());
603 pushed_nodes.reserve(input_count());
604 instruction_args.reserve(input_count() + frame_state_value_count());
605 }
606
607
608 const CallDescriptor* descriptor;
609 FrameStateDescriptor* frame_state_descriptor;
610 NodeVector output_nodes;
611 InstructionOperandVector outputs;
612 InstructionOperandVector instruction_args;
613 ZoneVector<PushParameter> pushed_nodes;
614
input_countv8::internal::compiler::CallBuffer615 size_t input_count() const { return descriptor->InputCount(); }
616
frame_state_countv8::internal::compiler::CallBuffer617 size_t frame_state_count() const { return descriptor->FrameStateCount(); }
618
frame_state_value_countv8::internal::compiler::CallBuffer619 size_t frame_state_value_count() const {
620 return (frame_state_descriptor == nullptr)
621 ? 0
622 : (frame_state_descriptor->GetTotalSize() +
623 1); // Include deopt id.
624 }
625 };
626
627
628 // TODO(bmeurer): Get rid of the CallBuffer business and make
629 // InstructionSelector::VisitCall platform independent instead.
InitializeCallBuffer(Node * call,CallBuffer * buffer,CallBufferFlags flags,int stack_param_delta)630 void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
631 CallBufferFlags flags,
632 int stack_param_delta) {
633 OperandGenerator g(this);
634 DCHECK_LE(call->op()->ValueOutputCount(),
635 static_cast<int>(buffer->descriptor->ReturnCount()));
636 DCHECK_EQ(
637 call->op()->ValueInputCount(),
638 static_cast<int>(buffer->input_count() + buffer->frame_state_count()));
639
640 if (buffer->descriptor->ReturnCount() > 0) {
641 // Collect the projections that represent multiple outputs from this call.
642 if (buffer->descriptor->ReturnCount() == 1) {
643 buffer->output_nodes.push_back(call);
644 } else {
645 buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), nullptr);
646 for (auto use : call->uses()) {
647 if (use->opcode() != IrOpcode::kProjection) continue;
648 size_t const index = ProjectionIndexOf(use->op());
649 DCHECK_LT(index, buffer->output_nodes.size());
650 DCHECK(!buffer->output_nodes[index]);
651 buffer->output_nodes[index] = use;
652 }
653 }
654
655 // Filter out the outputs that aren't live because no projection uses them.
656 size_t outputs_needed_by_framestate =
657 buffer->frame_state_descriptor == nullptr
658 ? 0
659 : buffer->frame_state_descriptor->state_combine()
660 .ConsumedOutputCount();
661 for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
662 bool output_is_live = buffer->output_nodes[i] != nullptr ||
663 i < outputs_needed_by_framestate;
664 if (output_is_live) {
665 MachineRepresentation rep =
666 buffer->descriptor->GetReturnType(static_cast<int>(i))
667 .representation();
668 LinkageLocation location =
669 buffer->descriptor->GetReturnLocation(static_cast<int>(i));
670
671 Node* output = buffer->output_nodes[i];
672 InstructionOperand op = output == nullptr
673 ? g.TempLocation(location)
674 : g.DefineAsLocation(output, location);
675 MarkAsRepresentation(rep, op);
676
677 buffer->outputs.push_back(op);
678 }
679 }
680 }
681
682 // The first argument is always the callee code.
683 Node* callee = call->InputAt(0);
684 bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
685 bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
686 switch (buffer->descriptor->kind()) {
687 case CallDescriptor::kCallCodeObject:
688 buffer->instruction_args.push_back(
689 (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
690 ? g.UseImmediate(callee)
691 : g.UseRegister(callee));
692 break;
693 case CallDescriptor::kCallAddress:
694 buffer->instruction_args.push_back(
695 (call_address_immediate &&
696 callee->opcode() == IrOpcode::kExternalConstant)
697 ? g.UseImmediate(callee)
698 : g.UseRegister(callee));
699 break;
700 case CallDescriptor::kCallJSFunction:
701 buffer->instruction_args.push_back(
702 g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)));
703 break;
704 }
705 DCHECK_EQ(1u, buffer->instruction_args.size());
706
707 // If the call needs a frame state, we insert the state information as
708 // follows (n is the number of value inputs to the frame state):
709 // arg 1 : deoptimization id.
710 // arg 2 - arg (n + 1) : value inputs to the frame state.
711 size_t frame_state_entries = 0;
712 USE(frame_state_entries); // frame_state_entries is only used for debug.
713 if (buffer->frame_state_descriptor != nullptr) {
714 Node* frame_state =
715 call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
716
717 // If it was a syntactic tail call we need to drop the current frame and
718 // all the frames on top of it that are either an arguments adaptor frame
719 // or a tail caller frame.
720 if (buffer->descriptor->SupportsTailCalls()) {
721 frame_state = NodeProperties::GetFrameStateInput(frame_state);
722 buffer->frame_state_descriptor =
723 buffer->frame_state_descriptor->outer_state();
724 while (buffer->frame_state_descriptor != nullptr &&
725 (buffer->frame_state_descriptor->type() ==
726 FrameStateType::kArgumentsAdaptor ||
727 buffer->frame_state_descriptor->type() ==
728 FrameStateType::kTailCallerFunction)) {
729 frame_state = NodeProperties::GetFrameStateInput(frame_state);
730 buffer->frame_state_descriptor =
731 buffer->frame_state_descriptor->outer_state();
732 }
733 }
734
735 int const state_id = sequence()->AddDeoptimizationEntry(
736 buffer->frame_state_descriptor, DeoptimizeReason::kNoReason);
737 buffer->instruction_args.push_back(g.TempImmediate(state_id));
738
739 StateObjectDeduplicator deduplicator(instruction_zone());
740
741 frame_state_entries =
742 1 + AddInputsToFrameStateDescriptor(
743 buffer->frame_state_descriptor, frame_state, &g, &deduplicator,
744 &buffer->instruction_args, FrameStateInputKind::kStackSlot,
745 instruction_zone());
746
747 DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size());
748 }
749
750 size_t input_count = static_cast<size_t>(buffer->input_count());
751
752 // Split the arguments into pushed_nodes and instruction_args. Pushed
753 // arguments require an explicit push instruction before the call and do
754 // not appear as arguments to the call. Everything else ends up
755 // as an InstructionOperand argument to the call.
756 auto iter(call->inputs().begin());
757 size_t pushed_count = 0;
758 bool call_tail = (flags & kCallTail) != 0;
759 for (size_t index = 0; index < input_count; ++iter, ++index) {
760 DCHECK(iter != call->inputs().end());
761 DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
762 if (index == 0) continue; // The first argument (callee) is already done.
763
764 LinkageLocation location = buffer->descriptor->GetInputLocation(index);
765 if (call_tail) {
766 location = LinkageLocation::ConvertToTailCallerLocation(
767 location, stack_param_delta);
768 }
769 InstructionOperand op = g.UseLocation(*iter, location);
770 if (UnallocatedOperand::cast(op).HasFixedSlotPolicy() && !call_tail) {
771 int stack_index = -UnallocatedOperand::cast(op).fixed_slot_index() - 1;
772 if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
773 buffer->pushed_nodes.resize(stack_index + 1);
774 }
775 PushParameter parameter(*iter, buffer->descriptor->GetInputType(index));
776 buffer->pushed_nodes[stack_index] = parameter;
777 pushed_count++;
778 } else {
779 buffer->instruction_args.push_back(op);
780 }
781 }
782 DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
783 frame_state_entries);
784 if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && call_tail &&
785 stack_param_delta != 0) {
786 // For tail calls that change the size of their parameter list and keep
787 // their return address on the stack, move the return address to just above
788 // the parameters.
789 LinkageLocation saved_return_location =
790 LinkageLocation::ForSavedCallerReturnAddress();
791 InstructionOperand return_address =
792 g.UsePointerLocation(LinkageLocation::ConvertToTailCallerLocation(
793 saved_return_location, stack_param_delta),
794 saved_return_location);
795 buffer->instruction_args.push_back(return_address);
796 }
797 }
798
VisitBlock(BasicBlock * block)799 void InstructionSelector::VisitBlock(BasicBlock* block) {
800 DCHECK(!current_block_);
801 current_block_ = block;
802 int current_block_end = static_cast<int>(instructions_.size());
803
804 int effect_level = 0;
805 for (Node* const node : *block) {
806 if (node->opcode() == IrOpcode::kStore ||
807 node->opcode() == IrOpcode::kUnalignedStore ||
808 node->opcode() == IrOpcode::kCheckedStore ||
809 node->opcode() == IrOpcode::kCall) {
810 ++effect_level;
811 }
812 SetEffectLevel(node, effect_level);
813 }
814
815 // We visit the control first, then the nodes in the block, so the block's
816 // control input should be on the same effect level as the last node.
817 if (block->control_input() != nullptr) {
818 SetEffectLevel(block->control_input(), effect_level);
819 }
820
821 // Generate code for the block control "top down", but schedule the code
822 // "bottom up".
823 VisitControl(block);
824 std::reverse(instructions_.begin() + current_block_end, instructions_.end());
825
826 // Visit code in reverse control flow order, because architecture-specific
827 // matching may cover more than one node at a time.
828 for (auto node : base::Reversed(*block)) {
829 // Skip nodes that are unused or already defined.
830 if (!IsUsed(node) || IsDefined(node)) continue;
831 // Generate code for this node "top down", but schedule the code "bottom
832 // up".
833 size_t current_node_end = instructions_.size();
834 VisitNode(node);
835 if (instruction_selection_failed()) return;
836 std::reverse(instructions_.begin() + current_node_end, instructions_.end());
837 if (instructions_.size() == current_node_end) continue;
838 // Mark source position on first instruction emitted.
839 SourcePosition source_position = source_positions_->GetSourcePosition(node);
840 if (source_position.IsKnown() &&
841 (source_position_mode_ == kAllSourcePositions ||
842 node->opcode() == IrOpcode::kCall)) {
843 sequence()->SetSourcePosition(instructions_[current_node_end],
844 source_position);
845 }
846 }
847
848 // We're done with the block.
849 InstructionBlock* instruction_block =
850 sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
851 instruction_block->set_code_start(static_cast<int>(instructions_.size()));
852 instruction_block->set_code_end(current_block_end);
853
854 current_block_ = nullptr;
855 }
856
857
VisitControl(BasicBlock * block)858 void InstructionSelector::VisitControl(BasicBlock* block) {
859 #ifdef DEBUG
860 // SSA deconstruction requires targets of branches not to have phis.
861 // Edge split form guarantees this property, but is more strict.
862 if (block->SuccessorCount() > 1) {
863 for (BasicBlock* const successor : block->successors()) {
864 for (Node* const node : *successor) {
865 CHECK(!IrOpcode::IsPhiOpcode(node->opcode()));
866 }
867 }
868 }
869 #endif
870
871 Node* input = block->control_input();
872 switch (block->control()) {
873 case BasicBlock::kGoto:
874 return VisitGoto(block->SuccessorAt(0));
875 case BasicBlock::kCall: {
876 DCHECK_EQ(IrOpcode::kCall, input->opcode());
877 BasicBlock* success = block->SuccessorAt(0);
878 BasicBlock* exception = block->SuccessorAt(1);
879 return VisitCall(input, exception), VisitGoto(success);
880 }
881 case BasicBlock::kTailCall: {
882 DCHECK_EQ(IrOpcode::kTailCall, input->opcode());
883 return VisitTailCall(input);
884 }
885 case BasicBlock::kBranch: {
886 DCHECK_EQ(IrOpcode::kBranch, input->opcode());
887 BasicBlock* tbranch = block->SuccessorAt(0);
888 BasicBlock* fbranch = block->SuccessorAt(1);
889 if (tbranch == fbranch) return VisitGoto(tbranch);
890 return VisitBranch(input, tbranch, fbranch);
891 }
892 case BasicBlock::kSwitch: {
893 DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
894 SwitchInfo sw;
895 // Last successor must be Default.
896 sw.default_branch = block->successors().back();
897 DCHECK_EQ(IrOpcode::kIfDefault, sw.default_branch->front()->opcode());
898 // All other successors must be cases.
899 sw.case_count = block->SuccessorCount() - 1;
900 sw.case_branches = &block->successors().front();
901 // Determine case values and their min/max.
902 sw.case_values = zone()->NewArray<int32_t>(sw.case_count);
903 sw.min_value = std::numeric_limits<int32_t>::max();
904 sw.max_value = std::numeric_limits<int32_t>::min();
905 for (size_t index = 0; index < sw.case_count; ++index) {
906 BasicBlock* branch = sw.case_branches[index];
907 int32_t value = OpParameter<int32_t>(branch->front()->op());
908 sw.case_values[index] = value;
909 if (sw.min_value > value) sw.min_value = value;
910 if (sw.max_value < value) sw.max_value = value;
911 }
912 DCHECK_LE(sw.min_value, sw.max_value);
913 // Note that {value_range} can be 0 if {min_value} is -2^31 and
914 // {max_value}
915 // is 2^31-1, so don't assume that it's non-zero below.
916 sw.value_range = 1u + bit_cast<uint32_t>(sw.max_value) -
917 bit_cast<uint32_t>(sw.min_value);
918 return VisitSwitch(input, sw);
919 }
920 case BasicBlock::kReturn: {
921 DCHECK_EQ(IrOpcode::kReturn, input->opcode());
922 return VisitReturn(input);
923 }
924 case BasicBlock::kDeoptimize: {
925 DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
926 Node* value = input->InputAt(0);
927 return VisitDeoptimize(p.kind(), p.reason(), value);
928 }
929 case BasicBlock::kThrow:
930 DCHECK_EQ(IrOpcode::kThrow, input->opcode());
931 return VisitThrow(input->InputAt(0));
932 case BasicBlock::kNone: {
933 // Exit block doesn't have control.
934 DCHECK_NULL(input);
935 break;
936 }
937 default:
938 UNREACHABLE();
939 break;
940 }
941 }
942
MarkPairProjectionsAsWord32(Node * node)943 void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) {
944 Node* projection0 = NodeProperties::FindProjection(node, 0);
945 if (projection0) {
946 MarkAsWord32(projection0);
947 }
948 Node* projection1 = NodeProperties::FindProjection(node, 1);
949 if (projection1) {
950 MarkAsWord32(projection1);
951 }
952 }
953
VisitNode(Node * node)954 void InstructionSelector::VisitNode(Node* node) {
955 DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
956 switch (node->opcode()) {
957 case IrOpcode::kStart:
958 case IrOpcode::kLoop:
959 case IrOpcode::kEnd:
960 case IrOpcode::kBranch:
961 case IrOpcode::kIfTrue:
962 case IrOpcode::kIfFalse:
963 case IrOpcode::kIfSuccess:
964 case IrOpcode::kSwitch:
965 case IrOpcode::kIfValue:
966 case IrOpcode::kIfDefault:
967 case IrOpcode::kEffectPhi:
968 case IrOpcode::kMerge:
969 case IrOpcode::kTerminate:
970 case IrOpcode::kBeginRegion:
971 // No code needed for these graph artifacts.
972 return;
973 case IrOpcode::kIfException:
974 return MarkAsReference(node), VisitIfException(node);
975 case IrOpcode::kFinishRegion:
976 return MarkAsReference(node), VisitFinishRegion(node);
977 case IrOpcode::kParameter: {
978 MachineType type =
979 linkage()->GetParameterType(ParameterIndexOf(node->op()));
980 MarkAsRepresentation(type.representation(), node);
981 return VisitParameter(node);
982 }
983 case IrOpcode::kOsrValue:
984 return MarkAsReference(node), VisitOsrValue(node);
985 case IrOpcode::kPhi: {
986 MachineRepresentation rep = PhiRepresentationOf(node->op());
987 if (rep == MachineRepresentation::kNone) return;
988 MarkAsRepresentation(rep, node);
989 return VisitPhi(node);
990 }
991 case IrOpcode::kProjection:
992 return VisitProjection(node);
993 case IrOpcode::kInt32Constant:
994 case IrOpcode::kInt64Constant:
995 case IrOpcode::kExternalConstant:
996 case IrOpcode::kRelocatableInt32Constant:
997 case IrOpcode::kRelocatableInt64Constant:
998 return VisitConstant(node);
999 case IrOpcode::kFloat32Constant:
1000 return MarkAsFloat32(node), VisitConstant(node);
1001 case IrOpcode::kFloat64Constant:
1002 return MarkAsFloat64(node), VisitConstant(node);
1003 case IrOpcode::kHeapConstant:
1004 return MarkAsReference(node), VisitConstant(node);
1005 case IrOpcode::kNumberConstant: {
1006 double value = OpParameter<double>(node);
1007 if (!IsSmiDouble(value)) MarkAsReference(node);
1008 return VisitConstant(node);
1009 }
1010 case IrOpcode::kCall:
1011 return VisitCall(node);
1012 case IrOpcode::kDeoptimizeIf:
1013 return VisitDeoptimizeIf(node);
1014 case IrOpcode::kDeoptimizeUnless:
1015 return VisitDeoptimizeUnless(node);
1016 case IrOpcode::kFrameState:
1017 case IrOpcode::kStateValues:
1018 case IrOpcode::kObjectState:
1019 return;
1020 case IrOpcode::kDebugBreak:
1021 VisitDebugBreak(node);
1022 return;
1023 case IrOpcode::kComment:
1024 VisitComment(node);
1025 return;
1026 case IrOpcode::kRetain:
1027 VisitRetain(node);
1028 return;
1029 case IrOpcode::kLoad: {
1030 LoadRepresentation type = LoadRepresentationOf(node->op());
1031 MarkAsRepresentation(type.representation(), node);
1032 return VisitLoad(node);
1033 }
1034 case IrOpcode::kStore:
1035 return VisitStore(node);
1036 case IrOpcode::kWord32And:
1037 return MarkAsWord32(node), VisitWord32And(node);
1038 case IrOpcode::kWord32Or:
1039 return MarkAsWord32(node), VisitWord32Or(node);
1040 case IrOpcode::kWord32Xor:
1041 return MarkAsWord32(node), VisitWord32Xor(node);
1042 case IrOpcode::kWord32Shl:
1043 return MarkAsWord32(node), VisitWord32Shl(node);
1044 case IrOpcode::kWord32Shr:
1045 return MarkAsWord32(node), VisitWord32Shr(node);
1046 case IrOpcode::kWord32Sar:
1047 return MarkAsWord32(node), VisitWord32Sar(node);
1048 case IrOpcode::kWord32Ror:
1049 return MarkAsWord32(node), VisitWord32Ror(node);
1050 case IrOpcode::kWord32Equal:
1051 return VisitWord32Equal(node);
1052 case IrOpcode::kWord32Clz:
1053 return MarkAsWord32(node), VisitWord32Clz(node);
1054 case IrOpcode::kWord32Ctz:
1055 return MarkAsWord32(node), VisitWord32Ctz(node);
1056 case IrOpcode::kWord32ReverseBits:
1057 return MarkAsWord32(node), VisitWord32ReverseBits(node);
1058 case IrOpcode::kWord32ReverseBytes:
1059 return MarkAsWord32(node), VisitWord32ReverseBytes(node);
1060 case IrOpcode::kWord32Popcnt:
1061 return MarkAsWord32(node), VisitWord32Popcnt(node);
1062 case IrOpcode::kWord64Popcnt:
1063 return MarkAsWord32(node), VisitWord64Popcnt(node);
1064 case IrOpcode::kWord64And:
1065 return MarkAsWord64(node), VisitWord64And(node);
1066 case IrOpcode::kWord64Or:
1067 return MarkAsWord64(node), VisitWord64Or(node);
1068 case IrOpcode::kWord64Xor:
1069 return MarkAsWord64(node), VisitWord64Xor(node);
1070 case IrOpcode::kWord64Shl:
1071 return MarkAsWord64(node), VisitWord64Shl(node);
1072 case IrOpcode::kWord64Shr:
1073 return MarkAsWord64(node), VisitWord64Shr(node);
1074 case IrOpcode::kWord64Sar:
1075 return MarkAsWord64(node), VisitWord64Sar(node);
1076 case IrOpcode::kWord64Ror:
1077 return MarkAsWord64(node), VisitWord64Ror(node);
1078 case IrOpcode::kWord64Clz:
1079 return MarkAsWord64(node), VisitWord64Clz(node);
1080 case IrOpcode::kWord64Ctz:
1081 return MarkAsWord64(node), VisitWord64Ctz(node);
1082 case IrOpcode::kWord64ReverseBits:
1083 return MarkAsWord64(node), VisitWord64ReverseBits(node);
1084 case IrOpcode::kWord64ReverseBytes:
1085 return MarkAsWord64(node), VisitWord64ReverseBytes(node);
1086 case IrOpcode::kWord64Equal:
1087 return VisitWord64Equal(node);
1088 case IrOpcode::kInt32Add:
1089 return MarkAsWord32(node), VisitInt32Add(node);
1090 case IrOpcode::kInt32AddWithOverflow:
1091 return MarkAsWord32(node), VisitInt32AddWithOverflow(node);
1092 case IrOpcode::kInt32Sub:
1093 return MarkAsWord32(node), VisitInt32Sub(node);
1094 case IrOpcode::kInt32SubWithOverflow:
1095 return VisitInt32SubWithOverflow(node);
1096 case IrOpcode::kInt32Mul:
1097 return MarkAsWord32(node), VisitInt32Mul(node);
1098 case IrOpcode::kInt32MulWithOverflow:
1099 return MarkAsWord32(node), VisitInt32MulWithOverflow(node);
1100 case IrOpcode::kInt32MulHigh:
1101 return VisitInt32MulHigh(node);
1102 case IrOpcode::kInt32Div:
1103 return MarkAsWord32(node), VisitInt32Div(node);
1104 case IrOpcode::kInt32Mod:
1105 return MarkAsWord32(node), VisitInt32Mod(node);
1106 case IrOpcode::kInt32LessThan:
1107 return VisitInt32LessThan(node);
1108 case IrOpcode::kInt32LessThanOrEqual:
1109 return VisitInt32LessThanOrEqual(node);
1110 case IrOpcode::kUint32Div:
1111 return MarkAsWord32(node), VisitUint32Div(node);
1112 case IrOpcode::kUint32LessThan:
1113 return VisitUint32LessThan(node);
1114 case IrOpcode::kUint32LessThanOrEqual:
1115 return VisitUint32LessThanOrEqual(node);
1116 case IrOpcode::kUint32Mod:
1117 return MarkAsWord32(node), VisitUint32Mod(node);
1118 case IrOpcode::kUint32MulHigh:
1119 return VisitUint32MulHigh(node);
1120 case IrOpcode::kInt64Add:
1121 return MarkAsWord64(node), VisitInt64Add(node);
1122 case IrOpcode::kInt64AddWithOverflow:
1123 return MarkAsWord64(node), VisitInt64AddWithOverflow(node);
1124 case IrOpcode::kInt64Sub:
1125 return MarkAsWord64(node), VisitInt64Sub(node);
1126 case IrOpcode::kInt64SubWithOverflow:
1127 return MarkAsWord64(node), VisitInt64SubWithOverflow(node);
1128 case IrOpcode::kInt64Mul:
1129 return MarkAsWord64(node), VisitInt64Mul(node);
1130 case IrOpcode::kInt64Div:
1131 return MarkAsWord64(node), VisitInt64Div(node);
1132 case IrOpcode::kInt64Mod:
1133 return MarkAsWord64(node), VisitInt64Mod(node);
1134 case IrOpcode::kInt64LessThan:
1135 return VisitInt64LessThan(node);
1136 case IrOpcode::kInt64LessThanOrEqual:
1137 return VisitInt64LessThanOrEqual(node);
1138 case IrOpcode::kUint64Div:
1139 return MarkAsWord64(node), VisitUint64Div(node);
1140 case IrOpcode::kUint64LessThan:
1141 return VisitUint64LessThan(node);
1142 case IrOpcode::kUint64LessThanOrEqual:
1143 return VisitUint64LessThanOrEqual(node);
1144 case IrOpcode::kUint64Mod:
1145 return MarkAsWord64(node), VisitUint64Mod(node);
1146 case IrOpcode::kBitcastTaggedToWord:
1147 return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
1148 VisitBitcastTaggedToWord(node);
1149 case IrOpcode::kBitcastWordToTagged:
1150 return MarkAsReference(node), VisitBitcastWordToTagged(node);
1151 case IrOpcode::kBitcastWordToTaggedSigned:
1152 return MarkAsRepresentation(MachineRepresentation::kTaggedSigned, node),
1153 EmitIdentity(node);
1154 case IrOpcode::kChangeFloat32ToFloat64:
1155 return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
1156 case IrOpcode::kChangeInt32ToFloat64:
1157 return MarkAsFloat64(node), VisitChangeInt32ToFloat64(node);
1158 case IrOpcode::kChangeUint32ToFloat64:
1159 return MarkAsFloat64(node), VisitChangeUint32ToFloat64(node);
1160 case IrOpcode::kChangeFloat64ToInt32:
1161 return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
1162 case IrOpcode::kChangeFloat64ToUint32:
1163 return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
1164 case IrOpcode::kFloat64SilenceNaN:
1165 MarkAsFloat64(node);
1166 if (CanProduceSignalingNaN(node->InputAt(0))) {
1167 return VisitFloat64SilenceNaN(node);
1168 } else {
1169 return EmitIdentity(node);
1170 }
1171 case IrOpcode::kTruncateFloat64ToUint32:
1172 return MarkAsWord32(node), VisitTruncateFloat64ToUint32(node);
1173 case IrOpcode::kTruncateFloat32ToInt32:
1174 return MarkAsWord32(node), VisitTruncateFloat32ToInt32(node);
1175 case IrOpcode::kTruncateFloat32ToUint32:
1176 return MarkAsWord32(node), VisitTruncateFloat32ToUint32(node);
1177 case IrOpcode::kTryTruncateFloat32ToInt64:
1178 return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node);
1179 case IrOpcode::kTryTruncateFloat64ToInt64:
1180 return MarkAsWord64(node), VisitTryTruncateFloat64ToInt64(node);
1181 case IrOpcode::kTryTruncateFloat32ToUint64:
1182 return MarkAsWord64(node), VisitTryTruncateFloat32ToUint64(node);
1183 case IrOpcode::kTryTruncateFloat64ToUint64:
1184 return MarkAsWord64(node), VisitTryTruncateFloat64ToUint64(node);
1185 case IrOpcode::kChangeInt32ToInt64:
1186 return MarkAsWord64(node), VisitChangeInt32ToInt64(node);
1187 case IrOpcode::kChangeUint32ToUint64:
1188 return MarkAsWord64(node), VisitChangeUint32ToUint64(node);
1189 case IrOpcode::kTruncateFloat64ToFloat32:
1190 return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node);
1191 case IrOpcode::kTruncateFloat64ToWord32:
1192 return MarkAsWord32(node), VisitTruncateFloat64ToWord32(node);
1193 case IrOpcode::kTruncateInt64ToInt32:
1194 return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
1195 case IrOpcode::kRoundFloat64ToInt32:
1196 return MarkAsWord32(node), VisitRoundFloat64ToInt32(node);
1197 case IrOpcode::kRoundInt64ToFloat32:
1198 return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
1199 case IrOpcode::kRoundInt32ToFloat32:
1200 return MarkAsFloat32(node), VisitRoundInt32ToFloat32(node);
1201 case IrOpcode::kRoundInt64ToFloat64:
1202 return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node);
1203 case IrOpcode::kBitcastFloat32ToInt32:
1204 return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
1205 case IrOpcode::kRoundUint32ToFloat32:
1206 return MarkAsFloat32(node), VisitRoundUint32ToFloat32(node);
1207 case IrOpcode::kRoundUint64ToFloat32:
1208 return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node);
1209 case IrOpcode::kRoundUint64ToFloat64:
1210 return MarkAsFloat64(node), VisitRoundUint64ToFloat64(node);
1211 case IrOpcode::kBitcastFloat64ToInt64:
1212 return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node);
1213 case IrOpcode::kBitcastInt32ToFloat32:
1214 return MarkAsFloat32(node), VisitBitcastInt32ToFloat32(node);
1215 case IrOpcode::kBitcastInt64ToFloat64:
1216 return MarkAsFloat64(node), VisitBitcastInt64ToFloat64(node);
1217 case IrOpcode::kFloat32Add:
1218 return MarkAsFloat32(node), VisitFloat32Add(node);
1219 case IrOpcode::kFloat32Sub:
1220 return MarkAsFloat32(node), VisitFloat32Sub(node);
1221 case IrOpcode::kFloat32Neg:
1222 return MarkAsFloat32(node), VisitFloat32Neg(node);
1223 case IrOpcode::kFloat32Mul:
1224 return MarkAsFloat32(node), VisitFloat32Mul(node);
1225 case IrOpcode::kFloat32Div:
1226 return MarkAsFloat32(node), VisitFloat32Div(node);
1227 case IrOpcode::kFloat32Abs:
1228 return MarkAsFloat32(node), VisitFloat32Abs(node);
1229 case IrOpcode::kFloat32Sqrt:
1230 return MarkAsFloat32(node), VisitFloat32Sqrt(node);
1231 case IrOpcode::kFloat32Equal:
1232 return VisitFloat32Equal(node);
1233 case IrOpcode::kFloat32LessThan:
1234 return VisitFloat32LessThan(node);
1235 case IrOpcode::kFloat32LessThanOrEqual:
1236 return VisitFloat32LessThanOrEqual(node);
1237 case IrOpcode::kFloat32Max:
1238 return MarkAsFloat32(node), VisitFloat32Max(node);
1239 case IrOpcode::kFloat32Min:
1240 return MarkAsFloat32(node), VisitFloat32Min(node);
1241 case IrOpcode::kFloat64Add:
1242 return MarkAsFloat64(node), VisitFloat64Add(node);
1243 case IrOpcode::kFloat64Sub:
1244 return MarkAsFloat64(node), VisitFloat64Sub(node);
1245 case IrOpcode::kFloat64Neg:
1246 return MarkAsFloat64(node), VisitFloat64Neg(node);
1247 case IrOpcode::kFloat64Mul:
1248 return MarkAsFloat64(node), VisitFloat64Mul(node);
1249 case IrOpcode::kFloat64Div:
1250 return MarkAsFloat64(node), VisitFloat64Div(node);
1251 case IrOpcode::kFloat64Mod:
1252 return MarkAsFloat64(node), VisitFloat64Mod(node);
1253 case IrOpcode::kFloat64Min:
1254 return MarkAsFloat64(node), VisitFloat64Min(node);
1255 case IrOpcode::kFloat64Max:
1256 return MarkAsFloat64(node), VisitFloat64Max(node);
1257 case IrOpcode::kFloat64Abs:
1258 return MarkAsFloat64(node), VisitFloat64Abs(node);
1259 case IrOpcode::kFloat64Acos:
1260 return MarkAsFloat64(node), VisitFloat64Acos(node);
1261 case IrOpcode::kFloat64Acosh:
1262 return MarkAsFloat64(node), VisitFloat64Acosh(node);
1263 case IrOpcode::kFloat64Asin:
1264 return MarkAsFloat64(node), VisitFloat64Asin(node);
1265 case IrOpcode::kFloat64Asinh:
1266 return MarkAsFloat64(node), VisitFloat64Asinh(node);
1267 case IrOpcode::kFloat64Atan:
1268 return MarkAsFloat64(node), VisitFloat64Atan(node);
1269 case IrOpcode::kFloat64Atanh:
1270 return MarkAsFloat64(node), VisitFloat64Atanh(node);
1271 case IrOpcode::kFloat64Atan2:
1272 return MarkAsFloat64(node), VisitFloat64Atan2(node);
1273 case IrOpcode::kFloat64Cbrt:
1274 return MarkAsFloat64(node), VisitFloat64Cbrt(node);
1275 case IrOpcode::kFloat64Cos:
1276 return MarkAsFloat64(node), VisitFloat64Cos(node);
1277 case IrOpcode::kFloat64Cosh:
1278 return MarkAsFloat64(node), VisitFloat64Cosh(node);
1279 case IrOpcode::kFloat64Exp:
1280 return MarkAsFloat64(node), VisitFloat64Exp(node);
1281 case IrOpcode::kFloat64Expm1:
1282 return MarkAsFloat64(node), VisitFloat64Expm1(node);
1283 case IrOpcode::kFloat64Log:
1284 return MarkAsFloat64(node), VisitFloat64Log(node);
1285 case IrOpcode::kFloat64Log1p:
1286 return MarkAsFloat64(node), VisitFloat64Log1p(node);
1287 case IrOpcode::kFloat64Log10:
1288 return MarkAsFloat64(node), VisitFloat64Log10(node);
1289 case IrOpcode::kFloat64Log2:
1290 return MarkAsFloat64(node), VisitFloat64Log2(node);
1291 case IrOpcode::kFloat64Pow:
1292 return MarkAsFloat64(node), VisitFloat64Pow(node);
1293 case IrOpcode::kFloat64Sin:
1294 return MarkAsFloat64(node), VisitFloat64Sin(node);
1295 case IrOpcode::kFloat64Sinh:
1296 return MarkAsFloat64(node), VisitFloat64Sinh(node);
1297 case IrOpcode::kFloat64Sqrt:
1298 return MarkAsFloat64(node), VisitFloat64Sqrt(node);
1299 case IrOpcode::kFloat64Tan:
1300 return MarkAsFloat64(node), VisitFloat64Tan(node);
1301 case IrOpcode::kFloat64Tanh:
1302 return MarkAsFloat64(node), VisitFloat64Tanh(node);
1303 case IrOpcode::kFloat64Equal:
1304 return VisitFloat64Equal(node);
1305 case IrOpcode::kFloat64LessThan:
1306 return VisitFloat64LessThan(node);
1307 case IrOpcode::kFloat64LessThanOrEqual:
1308 return VisitFloat64LessThanOrEqual(node);
1309 case IrOpcode::kFloat32RoundDown:
1310 return MarkAsFloat32(node), VisitFloat32RoundDown(node);
1311 case IrOpcode::kFloat64RoundDown:
1312 return MarkAsFloat64(node), VisitFloat64RoundDown(node);
1313 case IrOpcode::kFloat32RoundUp:
1314 return MarkAsFloat32(node), VisitFloat32RoundUp(node);
1315 case IrOpcode::kFloat64RoundUp:
1316 return MarkAsFloat64(node), VisitFloat64RoundUp(node);
1317 case IrOpcode::kFloat32RoundTruncate:
1318 return MarkAsFloat32(node), VisitFloat32RoundTruncate(node);
1319 case IrOpcode::kFloat64RoundTruncate:
1320 return MarkAsFloat64(node), VisitFloat64RoundTruncate(node);
1321 case IrOpcode::kFloat64RoundTiesAway:
1322 return MarkAsFloat64(node), VisitFloat64RoundTiesAway(node);
1323 case IrOpcode::kFloat32RoundTiesEven:
1324 return MarkAsFloat32(node), VisitFloat32RoundTiesEven(node);
1325 case IrOpcode::kFloat64RoundTiesEven:
1326 return MarkAsFloat64(node), VisitFloat64RoundTiesEven(node);
1327 case IrOpcode::kFloat64ExtractLowWord32:
1328 return MarkAsWord32(node), VisitFloat64ExtractLowWord32(node);
1329 case IrOpcode::kFloat64ExtractHighWord32:
1330 return MarkAsWord32(node), VisitFloat64ExtractHighWord32(node);
1331 case IrOpcode::kFloat64InsertLowWord32:
1332 return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
1333 case IrOpcode::kFloat64InsertHighWord32:
1334 return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
1335 case IrOpcode::kStackSlot:
1336 return VisitStackSlot(node);
1337 case IrOpcode::kLoadStackPointer:
1338 return VisitLoadStackPointer(node);
1339 case IrOpcode::kLoadFramePointer:
1340 return VisitLoadFramePointer(node);
1341 case IrOpcode::kLoadParentFramePointer:
1342 return VisitLoadParentFramePointer(node);
1343 case IrOpcode::kUnalignedLoad: {
1344 UnalignedLoadRepresentation type =
1345 UnalignedLoadRepresentationOf(node->op());
1346 MarkAsRepresentation(type.representation(), node);
1347 return VisitUnalignedLoad(node);
1348 }
1349 case IrOpcode::kUnalignedStore:
1350 return VisitUnalignedStore(node);
1351 case IrOpcode::kCheckedLoad: {
1352 MachineRepresentation rep =
1353 CheckedLoadRepresentationOf(node->op()).representation();
1354 MarkAsRepresentation(rep, node);
1355 return VisitCheckedLoad(node);
1356 }
1357 case IrOpcode::kCheckedStore:
1358 return VisitCheckedStore(node);
1359 case IrOpcode::kInt32PairAdd:
1360 MarkAsWord32(node);
1361 MarkPairProjectionsAsWord32(node);
1362 return VisitInt32PairAdd(node);
1363 case IrOpcode::kInt32PairSub:
1364 MarkAsWord32(node);
1365 MarkPairProjectionsAsWord32(node);
1366 return VisitInt32PairSub(node);
1367 case IrOpcode::kInt32PairMul:
1368 MarkAsWord32(node);
1369 MarkPairProjectionsAsWord32(node);
1370 return VisitInt32PairMul(node);
1371 case IrOpcode::kWord32PairShl:
1372 MarkAsWord32(node);
1373 MarkPairProjectionsAsWord32(node);
1374 return VisitWord32PairShl(node);
1375 case IrOpcode::kWord32PairShr:
1376 MarkAsWord32(node);
1377 MarkPairProjectionsAsWord32(node);
1378 return VisitWord32PairShr(node);
1379 case IrOpcode::kWord32PairSar:
1380 MarkAsWord32(node);
1381 MarkPairProjectionsAsWord32(node);
1382 return VisitWord32PairSar(node);
1383 case IrOpcode::kAtomicLoad: {
1384 LoadRepresentation type = LoadRepresentationOf(node->op());
1385 MarkAsRepresentation(type.representation(), node);
1386 return VisitAtomicLoad(node);
1387 }
1388 case IrOpcode::kAtomicStore:
1389 return VisitAtomicStore(node);
1390 case IrOpcode::kProtectedLoad:
1391 return VisitProtectedLoad(node);
1392 case IrOpcode::kUnsafePointerAdd:
1393 MarkAsRepresentation(MachineType::PointerRepresentation(), node);
1394 return VisitUnsafePointerAdd(node);
1395 case IrOpcode::kCreateInt32x4:
1396 return MarkAsSimd128(node), VisitCreateInt32x4(node);
1397 case IrOpcode::kInt32x4ExtractLane:
1398 return MarkAsWord32(node), VisitInt32x4ExtractLane(node);
1399 default:
1400 V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
1401 node->opcode(), node->op()->mnemonic(), node->id());
1402 break;
1403 }
1404 }
1405
VisitLoadStackPointer(Node * node)1406 void InstructionSelector::VisitLoadStackPointer(Node* node) {
1407 OperandGenerator g(this);
1408 Emit(kArchStackPointer, g.DefineAsRegister(node));
1409 }
1410
VisitLoadFramePointer(Node * node)1411 void InstructionSelector::VisitLoadFramePointer(Node* node) {
1412 OperandGenerator g(this);
1413 Emit(kArchFramePointer, g.DefineAsRegister(node));
1414 }
1415
VisitLoadParentFramePointer(Node * node)1416 void InstructionSelector::VisitLoadParentFramePointer(Node* node) {
1417 OperandGenerator g(this);
1418 Emit(kArchParentFramePointer, g.DefineAsRegister(node));
1419 }
1420
VisitFloat64Acos(Node * node)1421 void InstructionSelector::VisitFloat64Acos(Node* node) {
1422 VisitFloat64Ieee754Unop(node, kIeee754Float64Acos);
1423 }
1424
VisitFloat64Acosh(Node * node)1425 void InstructionSelector::VisitFloat64Acosh(Node* node) {
1426 VisitFloat64Ieee754Unop(node, kIeee754Float64Acosh);
1427 }
1428
VisitFloat64Asin(Node * node)1429 void InstructionSelector::VisitFloat64Asin(Node* node) {
1430 VisitFloat64Ieee754Unop(node, kIeee754Float64Asin);
1431 }
1432
VisitFloat64Asinh(Node * node)1433 void InstructionSelector::VisitFloat64Asinh(Node* node) {
1434 VisitFloat64Ieee754Unop(node, kIeee754Float64Asinh);
1435 }
1436
VisitFloat64Atan(Node * node)1437 void InstructionSelector::VisitFloat64Atan(Node* node) {
1438 VisitFloat64Ieee754Unop(node, kIeee754Float64Atan);
1439 }
1440
VisitFloat64Atanh(Node * node)1441 void InstructionSelector::VisitFloat64Atanh(Node* node) {
1442 VisitFloat64Ieee754Unop(node, kIeee754Float64Atanh);
1443 }
1444
VisitFloat64Atan2(Node * node)1445 void InstructionSelector::VisitFloat64Atan2(Node* node) {
1446 VisitFloat64Ieee754Binop(node, kIeee754Float64Atan2);
1447 }
1448
VisitFloat64Cbrt(Node * node)1449 void InstructionSelector::VisitFloat64Cbrt(Node* node) {
1450 VisitFloat64Ieee754Unop(node, kIeee754Float64Cbrt);
1451 }
1452
VisitFloat64Cos(Node * node)1453 void InstructionSelector::VisitFloat64Cos(Node* node) {
1454 VisitFloat64Ieee754Unop(node, kIeee754Float64Cos);
1455 }
1456
VisitFloat64Cosh(Node * node)1457 void InstructionSelector::VisitFloat64Cosh(Node* node) {
1458 VisitFloat64Ieee754Unop(node, kIeee754Float64Cosh);
1459 }
1460
VisitFloat64Exp(Node * node)1461 void InstructionSelector::VisitFloat64Exp(Node* node) {
1462 VisitFloat64Ieee754Unop(node, kIeee754Float64Exp);
1463 }
1464
VisitFloat64Expm1(Node * node)1465 void InstructionSelector::VisitFloat64Expm1(Node* node) {
1466 VisitFloat64Ieee754Unop(node, kIeee754Float64Expm1);
1467 }
1468
VisitFloat64Log(Node * node)1469 void InstructionSelector::VisitFloat64Log(Node* node) {
1470 VisitFloat64Ieee754Unop(node, kIeee754Float64Log);
1471 }
1472
VisitFloat64Log1p(Node * node)1473 void InstructionSelector::VisitFloat64Log1p(Node* node) {
1474 VisitFloat64Ieee754Unop(node, kIeee754Float64Log1p);
1475 }
1476
VisitFloat64Log2(Node * node)1477 void InstructionSelector::VisitFloat64Log2(Node* node) {
1478 VisitFloat64Ieee754Unop(node, kIeee754Float64Log2);
1479 }
1480
VisitFloat64Log10(Node * node)1481 void InstructionSelector::VisitFloat64Log10(Node* node) {
1482 VisitFloat64Ieee754Unop(node, kIeee754Float64Log10);
1483 }
1484
VisitFloat64Pow(Node * node)1485 void InstructionSelector::VisitFloat64Pow(Node* node) {
1486 VisitFloat64Ieee754Binop(node, kIeee754Float64Pow);
1487 }
1488
VisitFloat64Sin(Node * node)1489 void InstructionSelector::VisitFloat64Sin(Node* node) {
1490 VisitFloat64Ieee754Unop(node, kIeee754Float64Sin);
1491 }
1492
VisitFloat64Sinh(Node * node)1493 void InstructionSelector::VisitFloat64Sinh(Node* node) {
1494 VisitFloat64Ieee754Unop(node, kIeee754Float64Sinh);
1495 }
1496
VisitFloat64Tan(Node * node)1497 void InstructionSelector::VisitFloat64Tan(Node* node) {
1498 VisitFloat64Ieee754Unop(node, kIeee754Float64Tan);
1499 }
1500
VisitFloat64Tanh(Node * node)1501 void InstructionSelector::VisitFloat64Tanh(Node* node) {
1502 VisitFloat64Ieee754Unop(node, kIeee754Float64Tanh);
1503 }
1504
EmitTableSwitch(const SwitchInfo & sw,InstructionOperand & index_operand)1505 void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
1506 InstructionOperand& index_operand) {
1507 OperandGenerator g(this);
1508 size_t input_count = 2 + sw.value_range;
1509 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
1510 inputs[0] = index_operand;
1511 InstructionOperand default_operand = g.Label(sw.default_branch);
1512 std::fill(&inputs[1], &inputs[input_count], default_operand);
1513 for (size_t index = 0; index < sw.case_count; ++index) {
1514 size_t value = sw.case_values[index] - sw.min_value;
1515 BasicBlock* branch = sw.case_branches[index];
1516 DCHECK_LE(0u, value);
1517 DCHECK_LT(value + 2, input_count);
1518 inputs[value + 2] = g.Label(branch);
1519 }
1520 Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
1521 }
1522
1523
EmitLookupSwitch(const SwitchInfo & sw,InstructionOperand & value_operand)1524 void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
1525 InstructionOperand& value_operand) {
1526 OperandGenerator g(this);
1527 size_t input_count = 2 + sw.case_count * 2;
1528 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
1529 inputs[0] = value_operand;
1530 inputs[1] = g.Label(sw.default_branch);
1531 for (size_t index = 0; index < sw.case_count; ++index) {
1532 int32_t value = sw.case_values[index];
1533 BasicBlock* branch = sw.case_branches[index];
1534 inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
1535 inputs[index * 2 + 2 + 1] = g.Label(branch);
1536 }
1537 Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
1538 }
1539
VisitStackSlot(Node * node)1540 void InstructionSelector::VisitStackSlot(Node* node) {
1541 int size = 1 << ElementSizeLog2Of(StackSlotRepresentationOf(node->op()));
1542 int slot = frame_->AllocateSpillSlot(size);
1543 OperandGenerator g(this);
1544
1545 Emit(kArchStackSlot, g.DefineAsRegister(node),
1546 sequence()->AddImmediate(Constant(slot)), 0, nullptr);
1547 }
1548
VisitBitcastTaggedToWord(Node * node)1549 void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
1550 OperandGenerator g(this);
1551 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
1552 }
1553
VisitBitcastWordToTagged(Node * node)1554 void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
1555 OperandGenerator g(this);
1556 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
1557 }
1558
1559 // 32 bit targets do not implement the following instructions.
1560 #if V8_TARGET_ARCH_32_BIT
1561
VisitWord64And(Node * node)1562 void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
1563
1564
VisitWord64Or(Node * node)1565 void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
1566
1567
VisitWord64Xor(Node * node)1568 void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
1569
1570
VisitWord64Shl(Node * node)1571 void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
1572
1573
VisitWord64Shr(Node * node)1574 void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
1575
1576
VisitWord64Sar(Node * node)1577 void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
1578
1579
VisitWord64Ror(Node * node)1580 void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
1581
1582
VisitWord64Clz(Node * node)1583 void InstructionSelector::VisitWord64Clz(Node* node) { UNIMPLEMENTED(); }
1584
1585
VisitWord64Ctz(Node * node)1586 void InstructionSelector::VisitWord64Ctz(Node* node) { UNIMPLEMENTED(); }
1587
1588
VisitWord64ReverseBits(Node * node)1589 void InstructionSelector::VisitWord64ReverseBits(Node* node) {
1590 UNIMPLEMENTED();
1591 }
1592
1593
VisitWord64Popcnt(Node * node)1594 void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED(); }
1595
1596
VisitWord64Equal(Node * node)1597 void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
1598
1599
VisitInt64Add(Node * node)1600 void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
1601
1602
VisitInt64AddWithOverflow(Node * node)1603 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
1604 UNIMPLEMENTED();
1605 }
1606
1607
VisitInt64Sub(Node * node)1608 void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
1609
1610
VisitInt64SubWithOverflow(Node * node)1611 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
1612 UNIMPLEMENTED();
1613 }
1614
VisitInt64Mul(Node * node)1615 void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
1616
1617
VisitInt64Div(Node * node)1618 void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
1619
1620
VisitInt64LessThan(Node * node)1621 void InstructionSelector::VisitInt64LessThan(Node* node) { UNIMPLEMENTED(); }
1622
1623
VisitInt64LessThanOrEqual(Node * node)1624 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1625 UNIMPLEMENTED();
1626 }
1627
1628
VisitUint64Div(Node * node)1629 void InstructionSelector::VisitUint64Div(Node* node) { UNIMPLEMENTED(); }
1630
1631
VisitInt64Mod(Node * node)1632 void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
1633
1634
VisitUint64LessThan(Node * node)1635 void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); }
1636
1637
VisitUint64LessThanOrEqual(Node * node)1638 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
1639 UNIMPLEMENTED();
1640 }
1641
1642
VisitUint64Mod(Node * node)1643 void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
1644
1645
VisitChangeInt32ToInt64(Node * node)1646 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1647 UNIMPLEMENTED();
1648 }
1649
1650
VisitChangeUint32ToUint64(Node * node)1651 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1652 UNIMPLEMENTED();
1653 }
1654
1655
VisitTryTruncateFloat32ToInt64(Node * node)1656 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1657 UNIMPLEMENTED();
1658 }
1659
1660
VisitTryTruncateFloat64ToInt64(Node * node)1661 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1662 UNIMPLEMENTED();
1663 }
1664
1665
VisitTryTruncateFloat32ToUint64(Node * node)1666 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1667 UNIMPLEMENTED();
1668 }
1669
1670
VisitTryTruncateFloat64ToUint64(Node * node)1671 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1672 UNIMPLEMENTED();
1673 }
1674
1675
VisitTruncateInt64ToInt32(Node * node)1676 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1677 UNIMPLEMENTED();
1678 }
1679
1680
VisitRoundInt64ToFloat32(Node * node)1681 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1682 UNIMPLEMENTED();
1683 }
1684
1685
VisitRoundInt64ToFloat64(Node * node)1686 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1687 UNIMPLEMENTED();
1688 }
1689
1690
VisitRoundUint64ToFloat32(Node * node)1691 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1692 UNIMPLEMENTED();
1693 }
1694
1695
VisitRoundUint64ToFloat64(Node * node)1696 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1697 UNIMPLEMENTED();
1698 }
1699
1700
VisitBitcastFloat64ToInt64(Node * node)1701 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1702 UNIMPLEMENTED();
1703 }
1704
1705
VisitBitcastInt64ToFloat64(Node * node)1706 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1707 UNIMPLEMENTED();
1708 }
1709 #endif // V8_TARGET_ARCH_32_BIT
1710
1711 // 64 bit targets do not implement the following instructions.
1712 #if V8_TARGET_ARCH_64_BIT
VisitInt32PairAdd(Node * node)1713 void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
1714
VisitInt32PairSub(Node * node)1715 void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
1716
VisitInt32PairMul(Node * node)1717 void InstructionSelector::VisitInt32PairMul(Node* node) { UNIMPLEMENTED(); }
1718
VisitWord32PairShl(Node * node)1719 void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
1720
VisitWord32PairShr(Node * node)1721 void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
1722
VisitWord32PairSar(Node * node)1723 void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
1724 #endif // V8_TARGET_ARCH_64_BIT
1725
1726 #if !V8_TARGET_ARCH_X64
VisitCreateInt32x4(Node * node)1727 void InstructionSelector::VisitCreateInt32x4(Node* node) { UNIMPLEMENTED(); }
1728
VisitInt32x4ExtractLane(Node * node)1729 void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
1730 UNIMPLEMENTED();
1731 }
1732 #endif // !V8_TARGET_ARCH_X64
1733
VisitFinishRegion(Node * node)1734 void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
1735
VisitParameter(Node * node)1736 void InstructionSelector::VisitParameter(Node* node) {
1737 OperandGenerator g(this);
1738 int index = ParameterIndexOf(node->op());
1739 InstructionOperand op =
1740 linkage()->ParameterHasSecondaryLocation(index)
1741 ? g.DefineAsDualLocation(
1742 node, linkage()->GetParameterLocation(index),
1743 linkage()->GetParameterSecondaryLocation(index))
1744 : g.DefineAsLocation(node, linkage()->GetParameterLocation(index));
1745
1746 Emit(kArchNop, op);
1747 }
1748
1749 namespace {
ExceptionLocation()1750 LinkageLocation ExceptionLocation() {
1751 return LinkageLocation::ForRegister(kReturnRegister0.code(),
1752 MachineType::IntPtr());
1753 }
1754 }
1755
VisitIfException(Node * node)1756 void InstructionSelector::VisitIfException(Node* node) {
1757 OperandGenerator g(this);
1758 DCHECK_EQ(IrOpcode::kCall, node->InputAt(1)->opcode());
1759 Emit(kArchNop, g.DefineAsLocation(node, ExceptionLocation()));
1760 }
1761
1762
VisitOsrValue(Node * node)1763 void InstructionSelector::VisitOsrValue(Node* node) {
1764 OperandGenerator g(this);
1765 int index = OsrValueIndexOf(node->op());
1766 Emit(kArchNop,
1767 g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index)));
1768 }
1769
1770
VisitPhi(Node * node)1771 void InstructionSelector::VisitPhi(Node* node) {
1772 const int input_count = node->op()->ValueInputCount();
1773 PhiInstruction* phi = new (instruction_zone())
1774 PhiInstruction(instruction_zone(), GetVirtualRegister(node),
1775 static_cast<size_t>(input_count));
1776 sequence()
1777 ->InstructionBlockAt(RpoNumber::FromInt(current_block_->rpo_number()))
1778 ->AddPhi(phi);
1779 for (int i = 0; i < input_count; ++i) {
1780 Node* const input = node->InputAt(i);
1781 MarkAsUsed(input);
1782 phi->SetInput(static_cast<size_t>(i), GetVirtualRegister(input));
1783 }
1784 }
1785
1786
VisitProjection(Node * node)1787 void InstructionSelector::VisitProjection(Node* node) {
1788 OperandGenerator g(this);
1789 Node* value = node->InputAt(0);
1790 switch (value->opcode()) {
1791 case IrOpcode::kInt32AddWithOverflow:
1792 case IrOpcode::kInt32SubWithOverflow:
1793 case IrOpcode::kInt32MulWithOverflow:
1794 case IrOpcode::kInt64AddWithOverflow:
1795 case IrOpcode::kInt64SubWithOverflow:
1796 case IrOpcode::kTryTruncateFloat32ToInt64:
1797 case IrOpcode::kTryTruncateFloat64ToInt64:
1798 case IrOpcode::kTryTruncateFloat32ToUint64:
1799 case IrOpcode::kTryTruncateFloat64ToUint64:
1800 case IrOpcode::kInt32PairAdd:
1801 case IrOpcode::kInt32PairSub:
1802 case IrOpcode::kInt32PairMul:
1803 case IrOpcode::kWord32PairShl:
1804 case IrOpcode::kWord32PairShr:
1805 case IrOpcode::kWord32PairSar:
1806 if (ProjectionIndexOf(node->op()) == 0u) {
1807 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1808 } else {
1809 DCHECK(ProjectionIndexOf(node->op()) == 1u);
1810 MarkAsUsed(value);
1811 }
1812 break;
1813 default:
1814 break;
1815 }
1816 }
1817
1818
VisitConstant(Node * node)1819 void InstructionSelector::VisitConstant(Node* node) {
1820 // We must emit a NOP here because every live range needs a defining
1821 // instruction in the register allocator.
1822 OperandGenerator g(this);
1823 Emit(kArchNop, g.DefineAsConstant(node));
1824 }
1825
1826
VisitCall(Node * node,BasicBlock * handler)1827 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
1828 OperandGenerator g(this);
1829 const CallDescriptor* descriptor = CallDescriptorOf(node->op());
1830
1831 FrameStateDescriptor* frame_state_descriptor = nullptr;
1832 if (descriptor->NeedsFrameState()) {
1833 frame_state_descriptor = GetFrameStateDescriptor(
1834 node->InputAt(static_cast<int>(descriptor->InputCount())));
1835 }
1836
1837 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
1838
1839 // Compute InstructionOperands for inputs and outputs.
1840 // TODO(turbofan): on some architectures it's probably better to use
1841 // the code object in a register if there are multiple uses of it.
1842 // Improve constant pool and the heuristics in the register allocator
1843 // for where to emit constants.
1844 CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate);
1845 InitializeCallBuffer(node, &buffer, call_buffer_flags);
1846
1847 EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
1848
1849 // Pass label of exception handler block.
1850 CallDescriptor::Flags flags = descriptor->flags();
1851 if (handler) {
1852 DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
1853 flags |= CallDescriptor::kHasExceptionHandler;
1854 buffer.instruction_args.push_back(g.Label(handler));
1855 }
1856
1857 bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
1858 bool to_native_stack = descriptor->UseNativeStack();
1859 if (from_native_stack != to_native_stack) {
1860 // (arm64 only) Mismatch in the use of stack pointers. One or the other
1861 // has to be restored manually by the code generator.
1862 flags |= to_native_stack ? CallDescriptor::kRestoreJSSP
1863 : CallDescriptor::kRestoreCSP;
1864 }
1865
1866 // Select the appropriate opcode based on the call type.
1867 InstructionCode opcode = kArchNop;
1868 switch (descriptor->kind()) {
1869 case CallDescriptor::kCallAddress:
1870 opcode =
1871 kArchCallCFunction |
1872 MiscField::encode(static_cast<int>(descriptor->ParameterCount()));
1873 break;
1874 case CallDescriptor::kCallCodeObject:
1875 opcode = kArchCallCodeObject | MiscField::encode(flags);
1876 break;
1877 case CallDescriptor::kCallJSFunction:
1878 opcode = kArchCallJSFunction | MiscField::encode(flags);
1879 break;
1880 }
1881
1882 // Emit the call instruction.
1883 size_t const output_count = buffer.outputs.size();
1884 auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
1885 Instruction* call_instr =
1886 Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
1887 &buffer.instruction_args.front());
1888 if (instruction_selection_failed()) return;
1889 call_instr->MarkAsCall();
1890 }
1891
1892
VisitTailCall(Node * node)1893 void InstructionSelector::VisitTailCall(Node* node) {
1894 OperandGenerator g(this);
1895 CallDescriptor const* descriptor = CallDescriptorOf(node->op());
1896 DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
1897
1898 CallDescriptor* caller = linkage()->GetIncomingDescriptor();
1899 DCHECK(caller->CanTailCall(node));
1900 const CallDescriptor* callee = CallDescriptorOf(node->op());
1901 int stack_param_delta = callee->GetStackParameterDelta(caller);
1902 CallBuffer buffer(zone(), descriptor, nullptr);
1903
1904 // Compute InstructionOperands for inputs and outputs.
1905 CallBufferFlags flags(kCallCodeImmediate | kCallTail);
1906 if (IsTailCallAddressImmediate()) {
1907 flags |= kCallAddressImmediate;
1908 }
1909 InitializeCallBuffer(node, &buffer, flags, stack_param_delta);
1910
1911 // Select the appropriate opcode based on the call type.
1912 InstructionCode opcode;
1913 InstructionOperandVector temps(zone());
1914 if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
1915 switch (descriptor->kind()) {
1916 case CallDescriptor::kCallCodeObject:
1917 opcode = kArchTailCallCodeObjectFromJSFunction;
1918 break;
1919 case CallDescriptor::kCallJSFunction:
1920 opcode = kArchTailCallJSFunctionFromJSFunction;
1921 break;
1922 default:
1923 UNREACHABLE();
1924 return;
1925 }
1926 int temps_count = GetTempsCountForTailCallFromJSFunction();
1927 for (int i = 0; i < temps_count; i++) {
1928 temps.push_back(g.TempRegister());
1929 }
1930 } else {
1931 switch (descriptor->kind()) {
1932 case CallDescriptor::kCallCodeObject:
1933 opcode = kArchTailCallCodeObject;
1934 break;
1935 case CallDescriptor::kCallAddress:
1936 opcode = kArchTailCallAddress;
1937 break;
1938 default:
1939 UNREACHABLE();
1940 return;
1941 }
1942 }
1943 opcode |= MiscField::encode(descriptor->flags());
1944
1945 Emit(kArchPrepareTailCall, g.NoOutput());
1946
1947 int first_unused_stack_slot =
1948 (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0) +
1949 stack_param_delta;
1950 buffer.instruction_args.push_back(g.TempImmediate(first_unused_stack_slot));
1951
1952 // Emit the tailcall instruction.
1953 Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
1954 &buffer.instruction_args.front(), temps.size(),
1955 temps.empty() ? nullptr : &temps.front());
1956 }
1957
1958
VisitGoto(BasicBlock * target)1959 void InstructionSelector::VisitGoto(BasicBlock* target) {
1960 // jump to the next block.
1961 OperandGenerator g(this);
1962 Emit(kArchJmp, g.NoOutput(), g.Label(target));
1963 }
1964
VisitReturn(Node * ret)1965 void InstructionSelector::VisitReturn(Node* ret) {
1966 OperandGenerator g(this);
1967 const int input_count = linkage()->GetIncomingDescriptor()->ReturnCount() == 0
1968 ? 1
1969 : ret->op()->ValueInputCount();
1970 DCHECK_GE(input_count, 1);
1971 auto value_locations = zone()->NewArray<InstructionOperand>(input_count);
1972 Node* pop_count = ret->InputAt(0);
1973 value_locations[0] = pop_count->opcode() == IrOpcode::kInt32Constant
1974 ? g.UseImmediate(pop_count)
1975 : g.UseRegister(pop_count);
1976 for (int i = 1; i < input_count; ++i) {
1977 value_locations[i] =
1978 g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i - 1));
1979 }
1980 Emit(kArchRet, 0, nullptr, input_count, value_locations);
1981 }
1982
EmitDeoptimize(InstructionCode opcode,InstructionOperand output,InstructionOperand a,DeoptimizeReason reason,Node * frame_state)1983 Instruction* InstructionSelector::EmitDeoptimize(InstructionCode opcode,
1984 InstructionOperand output,
1985 InstructionOperand a,
1986 DeoptimizeReason reason,
1987 Node* frame_state) {
1988 size_t output_count = output.IsInvalid() ? 0 : 1;
1989 InstructionOperand inputs[] = {a};
1990 size_t input_count = arraysize(inputs);
1991 return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
1992 reason, frame_state);
1993 }
1994
EmitDeoptimize(InstructionCode opcode,InstructionOperand output,InstructionOperand a,InstructionOperand b,DeoptimizeReason reason,Node * frame_state)1995 Instruction* InstructionSelector::EmitDeoptimize(
1996 InstructionCode opcode, InstructionOperand output, InstructionOperand a,
1997 InstructionOperand b, DeoptimizeReason reason, Node* frame_state) {
1998 size_t output_count = output.IsInvalid() ? 0 : 1;
1999 InstructionOperand inputs[] = {a, b};
2000 size_t input_count = arraysize(inputs);
2001 return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
2002 reason, frame_state);
2003 }
2004
EmitDeoptimize(InstructionCode opcode,size_t output_count,InstructionOperand * outputs,size_t input_count,InstructionOperand * inputs,DeoptimizeReason reason,Node * frame_state)2005 Instruction* InstructionSelector::EmitDeoptimize(
2006 InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
2007 size_t input_count, InstructionOperand* inputs, DeoptimizeReason reason,
2008 Node* frame_state) {
2009 OperandGenerator g(this);
2010 FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
2011 InstructionOperandVector args(instruction_zone());
2012 args.reserve(input_count + 1 + descriptor->GetTotalSize());
2013 for (size_t i = 0; i < input_count; ++i) {
2014 args.push_back(inputs[i]);
2015 }
2016 opcode |= MiscField::encode(static_cast<int>(input_count));
2017 int const state_id = sequence()->AddDeoptimizationEntry(descriptor, reason);
2018 args.push_back(g.TempImmediate(state_id));
2019 StateObjectDeduplicator deduplicator(instruction_zone());
2020 AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
2021 &args, FrameStateInputKind::kAny,
2022 instruction_zone());
2023 return Emit(opcode, output_count, outputs, args.size(), &args.front(), 0,
2024 nullptr);
2025 }
2026
EmitIdentity(Node * node)2027 void InstructionSelector::EmitIdentity(Node* node) {
2028 OperandGenerator g(this);
2029 MarkAsUsed(node->InputAt(0));
2030 SetRename(node, node->InputAt(0));
2031 }
2032
VisitDeoptimize(DeoptimizeKind kind,DeoptimizeReason reason,Node * value)2033 void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
2034 DeoptimizeReason reason,
2035 Node* value) {
2036 InstructionCode opcode = kArchDeoptimize;
2037 switch (kind) {
2038 case DeoptimizeKind::kEager:
2039 opcode |= MiscField::encode(Deoptimizer::EAGER);
2040 break;
2041 case DeoptimizeKind::kSoft:
2042 opcode |= MiscField::encode(Deoptimizer::SOFT);
2043 break;
2044 }
2045 EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, reason, value);
2046 }
2047
2048
VisitThrow(Node * value)2049 void InstructionSelector::VisitThrow(Node* value) {
2050 OperandGenerator g(this);
2051 Emit(kArchThrowTerminator, g.NoOutput());
2052 }
2053
VisitDebugBreak(Node * node)2054 void InstructionSelector::VisitDebugBreak(Node* node) {
2055 OperandGenerator g(this);
2056 Emit(kArchDebugBreak, g.NoOutput());
2057 }
2058
VisitComment(Node * node)2059 void InstructionSelector::VisitComment(Node* node) {
2060 OperandGenerator g(this);
2061 InstructionOperand operand(g.UseImmediate(node));
2062 Emit(kArchComment, 0, nullptr, 1, &operand);
2063 }
2064
VisitUnsafePointerAdd(Node * node)2065 void InstructionSelector::VisitUnsafePointerAdd(Node* node) {
2066 #if V8_TARGET_ARCH_64_BIT
2067 VisitInt64Add(node);
2068 #else // V8_TARGET_ARCH_64_BIT
2069 VisitInt32Add(node);
2070 #endif // V8_TARGET_ARCH_64_BIT
2071 }
2072
VisitRetain(Node * node)2073 void InstructionSelector::VisitRetain(Node* node) {
2074 OperandGenerator g(this);
2075 Emit(kArchNop, g.NoOutput(), g.UseAny(node->InputAt(0)));
2076 }
2077
CanProduceSignalingNaN(Node * node)2078 bool InstructionSelector::CanProduceSignalingNaN(Node* node) {
2079 // TODO(jarin) Improve the heuristic here.
2080 if (node->opcode() == IrOpcode::kFloat64Add ||
2081 node->opcode() == IrOpcode::kFloat64Sub ||
2082 node->opcode() == IrOpcode::kFloat64Mul) {
2083 return false;
2084 }
2085 return true;
2086 }
2087
GetFrameStateDescriptor(Node * state)2088 FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
2089 Node* state) {
2090 DCHECK(state->opcode() == IrOpcode::kFrameState);
2091 DCHECK_EQ(kFrameStateInputCount, state->InputCount());
2092 FrameStateInfo state_info = OpParameter<FrameStateInfo>(state);
2093
2094 int parameters = static_cast<int>(
2095 StateValuesAccess(state->InputAt(kFrameStateParametersInput)).size());
2096 int locals = static_cast<int>(
2097 StateValuesAccess(state->InputAt(kFrameStateLocalsInput)).size());
2098 int stack = static_cast<int>(
2099 StateValuesAccess(state->InputAt(kFrameStateStackInput)).size());
2100
2101 DCHECK_EQ(parameters, state_info.parameter_count());
2102 DCHECK_EQ(locals, state_info.local_count());
2103
2104 FrameStateDescriptor* outer_state = nullptr;
2105 Node* outer_node = state->InputAt(kFrameStateOuterStateInput);
2106 if (outer_node->opcode() == IrOpcode::kFrameState) {
2107 outer_state = GetFrameStateDescriptor(outer_node);
2108 }
2109
2110 return new (instruction_zone()) FrameStateDescriptor(
2111 instruction_zone(), state_info.type(), state_info.bailout_id(),
2112 state_info.state_combine(), parameters, locals, stack,
2113 state_info.shared_info(), outer_state);
2114 }
2115
2116
2117 } // namespace compiler
2118 } // namespace internal
2119 } // namespace v8
2120