1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <algorithm>
6
7 #include "src/base/adapters.h"
8 #include "src/compiler/instruction-selector-impl.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties.h"
11
12 namespace v8 {
13 namespace internal {
14 namespace compiler {
15
16 // Adds X64-specific methods for generating operands.
17 class X64OperandGenerator final : public OperandGenerator {
18 public:
X64OperandGenerator(InstructionSelector * selector)19 explicit X64OperandGenerator(InstructionSelector* selector)
20 : OperandGenerator(selector) {}
21
CanBeImmediate(Node * node)22 bool CanBeImmediate(Node* node) {
23 switch (node->opcode()) {
24 case IrOpcode::kInt32Constant:
25 case IrOpcode::kRelocatableInt32Constant:
26 return true;
27 case IrOpcode::kInt64Constant: {
28 const int64_t value = OpParameter<int64_t>(node);
29 return value == static_cast<int64_t>(static_cast<int32_t>(value));
30 }
31 case IrOpcode::kNumberConstant: {
32 const double value = OpParameter<double>(node);
33 return bit_cast<int64_t>(value) == 0;
34 }
35 default:
36 return false;
37 }
38 }
39
GetImmediateIntegerValue(Node * node)40 int32_t GetImmediateIntegerValue(Node* node) {
41 DCHECK(CanBeImmediate(node));
42 if (node->opcode() == IrOpcode::kInt32Constant) {
43 return OpParameter<int32_t>(node);
44 }
45 DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
46 return static_cast<int32_t>(OpParameter<int64_t>(node));
47 }
48
CanBeMemoryOperand(InstructionCode opcode,Node * node,Node * input,int effect_level)49 bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
50 int effect_level) {
51 if (input->opcode() != IrOpcode::kLoad ||
52 !selector()->CanCover(node, input)) {
53 return false;
54 }
55 if (effect_level != selector()->GetEffectLevel(input)) {
56 return false;
57 }
58 MachineRepresentation rep =
59 LoadRepresentationOf(input->op()).representation();
60 switch (opcode) {
61 case kX64Cmp:
62 case kX64Test:
63 return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
64 case kX64Cmp32:
65 case kX64Test32:
66 return rep == MachineRepresentation::kWord32;
67 case kX64Cmp16:
68 case kX64Test16:
69 return rep == MachineRepresentation::kWord16;
70 case kX64Cmp8:
71 case kX64Test8:
72 return rep == MachineRepresentation::kWord8;
73 default:
74 break;
75 }
76 return false;
77 }
78
GenerateMemoryOperandInputs(Node * index,int scale_exponent,Node * base,Node * displacement,DisplacementMode displacement_mode,InstructionOperand inputs[],size_t * input_count)79 AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
80 Node* base, Node* displacement,
81 DisplacementMode displacement_mode,
82 InstructionOperand inputs[],
83 size_t* input_count) {
84 AddressingMode mode = kMode_MRI;
85 if (base != nullptr) {
86 inputs[(*input_count)++] = UseRegister(base);
87 if (index != nullptr) {
88 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
89 inputs[(*input_count)++] = UseRegister(index);
90 if (displacement != nullptr) {
91 inputs[(*input_count)++] = displacement_mode
92 ? UseNegatedImmediate(displacement)
93 : UseImmediate(displacement);
94 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
95 kMode_MR4I, kMode_MR8I};
96 mode = kMRnI_modes[scale_exponent];
97 } else {
98 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
99 kMode_MR4, kMode_MR8};
100 mode = kMRn_modes[scale_exponent];
101 }
102 } else {
103 if (displacement == nullptr) {
104 mode = kMode_MR;
105 } else {
106 inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
107 ? UseNegatedImmediate(displacement)
108 : UseImmediate(displacement);
109 mode = kMode_MRI;
110 }
111 }
112 } else {
113 DCHECK_NOT_NULL(index);
114 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
115 inputs[(*input_count)++] = UseRegister(index);
116 if (displacement != nullptr) {
117 inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
118 ? UseNegatedImmediate(displacement)
119 : UseImmediate(displacement);
120 static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
121 kMode_M4I, kMode_M8I};
122 mode = kMnI_modes[scale_exponent];
123 } else {
124 static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
125 kMode_M4, kMode_M8};
126 mode = kMn_modes[scale_exponent];
127 if (mode == kMode_MR1) {
128 // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0]
129 inputs[(*input_count)++] = UseRegister(index);
130 }
131 }
132 }
133 return mode;
134 }
135
GetEffectiveAddressMemoryOperand(Node * operand,InstructionOperand inputs[],size_t * input_count)136 AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
137 InstructionOperand inputs[],
138 size_t* input_count) {
139 if (selector()->CanAddressRelativeToRootsRegister()) {
140 LoadMatcher<ExternalReferenceMatcher> m(operand);
141 if (m.index().HasValue() && m.object().HasValue()) {
142 Address const kRootsRegisterValue =
143 kRootRegisterBias +
144 reinterpret_cast<Address>(
145 selector()->isolate()->heap()->roots_array_start());
146 ptrdiff_t const delta =
147 m.index().Value() +
148 (m.object().Value().address() - kRootsRegisterValue);
149 if (is_int32(delta)) {
150 inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
151 return kMode_Root;
152 }
153 }
154 }
155 BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
156 DCHECK(m.matches());
157 if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
158 return GenerateMemoryOperandInputs(
159 m.index(), m.scale(), m.base(), m.displacement(),
160 m.displacement_mode(), inputs, input_count);
161 } else {
162 inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
163 inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
164 return kMode_MR1;
165 }
166 }
167
CanBeBetterLeftOperand(Node * node) const168 bool CanBeBetterLeftOperand(Node* node) const {
169 return !selector()->IsLive(node);
170 }
171 };
172
173 namespace {
174
GetLoadOpcode(LoadRepresentation load_rep)175 ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
176 ArchOpcode opcode = kArchNop;
177 switch (load_rep.representation()) {
178 case MachineRepresentation::kFloat32:
179 opcode = kX64Movss;
180 break;
181 case MachineRepresentation::kFloat64:
182 opcode = kX64Movsd;
183 break;
184 case MachineRepresentation::kBit: // Fall through.
185 case MachineRepresentation::kWord8:
186 opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl;
187 break;
188 case MachineRepresentation::kWord16:
189 opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl;
190 break;
191 case MachineRepresentation::kWord32:
192 opcode = kX64Movl;
193 break;
194 case MachineRepresentation::kTaggedSigned: // Fall through.
195 case MachineRepresentation::kTaggedPointer: // Fall through.
196 case MachineRepresentation::kTagged: // Fall through.
197 case MachineRepresentation::kWord64:
198 opcode = kX64Movq;
199 break;
200 case MachineRepresentation::kSimd128: // Fall through.
201 case MachineRepresentation::kNone:
202 UNREACHABLE();
203 break;
204 }
205 return opcode;
206 }
207
208 } // namespace
209
VisitLoad(Node * node)210 void InstructionSelector::VisitLoad(Node* node) {
211 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
212 X64OperandGenerator g(this);
213
214 ArchOpcode opcode = GetLoadOpcode(load_rep);
215 InstructionOperand outputs[1];
216 outputs[0] = g.DefineAsRegister(node);
217 InstructionOperand inputs[3];
218 size_t input_count = 0;
219 AddressingMode mode =
220 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
221 InstructionCode code = opcode | AddressingModeField::encode(mode);
222 Emit(code, 1, outputs, input_count, inputs);
223 }
224
VisitProtectedLoad(Node * node)225 void InstructionSelector::VisitProtectedLoad(Node* node) {
226 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
227 X64OperandGenerator g(this);
228
229 ArchOpcode opcode = GetLoadOpcode(load_rep);
230 InstructionOperand outputs[1];
231 outputs[0] = g.DefineAsRegister(node);
232 InstructionOperand inputs[4];
233 size_t input_count = 0;
234 AddressingMode mode =
235 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
236 // Add the context parameter as an input.
237 inputs[input_count++] = g.UseUniqueRegister(node->InputAt(2));
238 // Add the source position as an input
239 inputs[input_count++] = g.UseImmediate(node->InputAt(3));
240 InstructionCode code = opcode | AddressingModeField::encode(mode);
241 Emit(code, 1, outputs, input_count, inputs);
242 }
243
VisitStore(Node * node)244 void InstructionSelector::VisitStore(Node* node) {
245 X64OperandGenerator g(this);
246 Node* base = node->InputAt(0);
247 Node* index = node->InputAt(1);
248 Node* value = node->InputAt(2);
249
250 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
251 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
252 MachineRepresentation rep = store_rep.representation();
253
254 if (write_barrier_kind != kNoWriteBarrier) {
255 DCHECK(CanBeTaggedPointer(rep));
256 AddressingMode addressing_mode;
257 InstructionOperand inputs[3];
258 size_t input_count = 0;
259 inputs[input_count++] = g.UseUniqueRegister(base);
260 if (g.CanBeImmediate(index)) {
261 inputs[input_count++] = g.UseImmediate(index);
262 addressing_mode = kMode_MRI;
263 } else {
264 inputs[input_count++] = g.UseUniqueRegister(index);
265 addressing_mode = kMode_MR1;
266 }
267 inputs[input_count++] = g.UseUniqueRegister(value);
268 RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
269 switch (write_barrier_kind) {
270 case kNoWriteBarrier:
271 UNREACHABLE();
272 break;
273 case kMapWriteBarrier:
274 record_write_mode = RecordWriteMode::kValueIsMap;
275 break;
276 case kPointerWriteBarrier:
277 record_write_mode = RecordWriteMode::kValueIsPointer;
278 break;
279 case kFullWriteBarrier:
280 record_write_mode = RecordWriteMode::kValueIsAny;
281 break;
282 }
283 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
284 size_t const temp_count = arraysize(temps);
285 InstructionCode code = kArchStoreWithWriteBarrier;
286 code |= AddressingModeField::encode(addressing_mode);
287 code |= MiscField::encode(static_cast<int>(record_write_mode));
288 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
289 } else {
290 ArchOpcode opcode = kArchNop;
291 switch (rep) {
292 case MachineRepresentation::kFloat32:
293 opcode = kX64Movss;
294 break;
295 case MachineRepresentation::kFloat64:
296 opcode = kX64Movsd;
297 break;
298 case MachineRepresentation::kBit: // Fall through.
299 case MachineRepresentation::kWord8:
300 opcode = kX64Movb;
301 break;
302 case MachineRepresentation::kWord16:
303 opcode = kX64Movw;
304 break;
305 case MachineRepresentation::kWord32:
306 opcode = kX64Movl;
307 break;
308 case MachineRepresentation::kTaggedSigned: // Fall through.
309 case MachineRepresentation::kTaggedPointer: // Fall through.
310 case MachineRepresentation::kTagged: // Fall through.
311 case MachineRepresentation::kWord64:
312 opcode = kX64Movq;
313 break;
314 case MachineRepresentation::kSimd128: // Fall through.
315 case MachineRepresentation::kNone:
316 UNREACHABLE();
317 return;
318 }
319 InstructionOperand inputs[4];
320 size_t input_count = 0;
321 AddressingMode addressing_mode =
322 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
323 InstructionCode code =
324 opcode | AddressingModeField::encode(addressing_mode);
325 InstructionOperand value_operand =
326 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
327 inputs[input_count++] = value_operand;
328 Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
329 inputs);
330 }
331 }
332
333 // Architecture supports unaligned access, therefore VisitLoad is used instead
VisitUnalignedLoad(Node * node)334 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
335
336 // Architecture supports unaligned access, therefore VisitStore is used instead
VisitUnalignedStore(Node * node)337 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
338
VisitCheckedLoad(Node * node)339 void InstructionSelector::VisitCheckedLoad(Node* node) {
340 CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
341 X64OperandGenerator g(this);
342 Node* const buffer = node->InputAt(0);
343 Node* const offset = node->InputAt(1);
344 Node* const length = node->InputAt(2);
345 ArchOpcode opcode = kArchNop;
346 switch (load_rep.representation()) {
347 case MachineRepresentation::kWord8:
348 opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
349 break;
350 case MachineRepresentation::kWord16:
351 opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
352 break;
353 case MachineRepresentation::kWord32:
354 opcode = kCheckedLoadWord32;
355 break;
356 case MachineRepresentation::kWord64:
357 opcode = kCheckedLoadWord64;
358 break;
359 case MachineRepresentation::kFloat32:
360 opcode = kCheckedLoadFloat32;
361 break;
362 case MachineRepresentation::kFloat64:
363 opcode = kCheckedLoadFloat64;
364 break;
365 case MachineRepresentation::kBit: // Fall through.
366 case MachineRepresentation::kSimd128: // Fall through.
367 case MachineRepresentation::kTaggedSigned: // Fall through.
368 case MachineRepresentation::kTaggedPointer: // Fall through.
369 case MachineRepresentation::kTagged: // Fall through.
370 case MachineRepresentation::kNone:
371 UNREACHABLE();
372 return;
373 }
374 if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
375 Int32Matcher mlength(length);
376 Int32BinopMatcher moffset(offset);
377 if (mlength.HasValue() && moffset.right().HasValue() &&
378 moffset.right().Value() >= 0 &&
379 mlength.Value() >= moffset.right().Value()) {
380 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
381 g.UseRegister(moffset.left().node()),
382 g.UseImmediate(moffset.right().node()), g.UseImmediate(length));
383 return;
384 }
385 }
386 InstructionOperand length_operand =
387 g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
388 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
389 g.UseRegister(offset), g.TempImmediate(0), length_operand);
390 }
391
392
VisitCheckedStore(Node * node)393 void InstructionSelector::VisitCheckedStore(Node* node) {
394 MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
395 X64OperandGenerator g(this);
396 Node* const buffer = node->InputAt(0);
397 Node* const offset = node->InputAt(1);
398 Node* const length = node->InputAt(2);
399 Node* const value = node->InputAt(3);
400 ArchOpcode opcode = kArchNop;
401 switch (rep) {
402 case MachineRepresentation::kWord8:
403 opcode = kCheckedStoreWord8;
404 break;
405 case MachineRepresentation::kWord16:
406 opcode = kCheckedStoreWord16;
407 break;
408 case MachineRepresentation::kWord32:
409 opcode = kCheckedStoreWord32;
410 break;
411 case MachineRepresentation::kWord64:
412 opcode = kCheckedStoreWord64;
413 break;
414 case MachineRepresentation::kFloat32:
415 opcode = kCheckedStoreFloat32;
416 break;
417 case MachineRepresentation::kFloat64:
418 opcode = kCheckedStoreFloat64;
419 break;
420 case MachineRepresentation::kBit: // Fall through.
421 case MachineRepresentation::kSimd128: // Fall through.
422 case MachineRepresentation::kTaggedSigned: // Fall through.
423 case MachineRepresentation::kTaggedPointer: // Fall through.
424 case MachineRepresentation::kTagged: // Fall through.
425 case MachineRepresentation::kNone:
426 UNREACHABLE();
427 return;
428 }
429 InstructionOperand value_operand =
430 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
431 if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
432 Int32Matcher mlength(length);
433 Int32BinopMatcher moffset(offset);
434 if (mlength.HasValue() && moffset.right().HasValue() &&
435 moffset.right().Value() >= 0 &&
436 mlength.Value() >= moffset.right().Value()) {
437 Emit(opcode, g.NoOutput(), g.UseRegister(buffer),
438 g.UseRegister(moffset.left().node()),
439 g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
440 value_operand);
441 return;
442 }
443 }
444 InstructionOperand length_operand =
445 g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
446 Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
447 g.TempImmediate(0), length_operand, value_operand);
448 }
449
450
451 // Shared routine for multiple binary operations.
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)452 static void VisitBinop(InstructionSelector* selector, Node* node,
453 InstructionCode opcode, FlagsContinuation* cont) {
454 X64OperandGenerator g(selector);
455 Int32BinopMatcher m(node);
456 Node* left = m.left().node();
457 Node* right = m.right().node();
458 InstructionOperand inputs[4];
459 size_t input_count = 0;
460 InstructionOperand outputs[2];
461 size_t output_count = 0;
462
463 // TODO(turbofan): match complex addressing modes.
464 if (left == right) {
465 // If both inputs refer to the same operand, enforce allocating a register
466 // for both of them to ensure that we don't end up generating code like
467 // this:
468 //
469 // mov rax, [rbp-0x10]
470 // add rax, [rbp-0x10]
471 // jo label
472 InstructionOperand const input = g.UseRegister(left);
473 inputs[input_count++] = input;
474 inputs[input_count++] = input;
475 } else if (g.CanBeImmediate(right)) {
476 inputs[input_count++] = g.UseRegister(left);
477 inputs[input_count++] = g.UseImmediate(right);
478 } else {
479 if (node->op()->HasProperty(Operator::kCommutative) &&
480 g.CanBeBetterLeftOperand(right)) {
481 std::swap(left, right);
482 }
483 inputs[input_count++] = g.UseRegister(left);
484 inputs[input_count++] = g.Use(right);
485 }
486
487 if (cont->IsBranch()) {
488 inputs[input_count++] = g.Label(cont->true_block());
489 inputs[input_count++] = g.Label(cont->false_block());
490 }
491
492 outputs[output_count++] = g.DefineSameAsFirst(node);
493 if (cont->IsSet()) {
494 outputs[output_count++] = g.DefineAsRegister(cont->result());
495 }
496
497 DCHECK_NE(0u, input_count);
498 DCHECK_NE(0u, output_count);
499 DCHECK_GE(arraysize(inputs), input_count);
500 DCHECK_GE(arraysize(outputs), output_count);
501
502 opcode = cont->Encode(opcode);
503 if (cont->IsDeoptimize()) {
504 selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
505 cont->reason(), cont->frame_state());
506 } else {
507 selector->Emit(opcode, output_count, outputs, input_count, inputs);
508 }
509 }
510
511
512 // Shared routine for multiple binary operations.
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)513 static void VisitBinop(InstructionSelector* selector, Node* node,
514 InstructionCode opcode) {
515 FlagsContinuation cont;
516 VisitBinop(selector, node, opcode, &cont);
517 }
518
519
VisitWord32And(Node * node)520 void InstructionSelector::VisitWord32And(Node* node) {
521 X64OperandGenerator g(this);
522 Uint32BinopMatcher m(node);
523 if (m.right().Is(0xff)) {
524 Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
525 } else if (m.right().Is(0xffff)) {
526 Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
527 } else {
528 VisitBinop(this, node, kX64And32);
529 }
530 }
531
532
VisitWord64And(Node * node)533 void InstructionSelector::VisitWord64And(Node* node) {
534 VisitBinop(this, node, kX64And);
535 }
536
537
VisitWord32Or(Node * node)538 void InstructionSelector::VisitWord32Or(Node* node) {
539 VisitBinop(this, node, kX64Or32);
540 }
541
542
VisitWord64Or(Node * node)543 void InstructionSelector::VisitWord64Or(Node* node) {
544 VisitBinop(this, node, kX64Or);
545 }
546
547
VisitWord32Xor(Node * node)548 void InstructionSelector::VisitWord32Xor(Node* node) {
549 X64OperandGenerator g(this);
550 Uint32BinopMatcher m(node);
551 if (m.right().Is(-1)) {
552 Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
553 } else {
554 VisitBinop(this, node, kX64Xor32);
555 }
556 }
557
558
VisitWord64Xor(Node * node)559 void InstructionSelector::VisitWord64Xor(Node* node) {
560 X64OperandGenerator g(this);
561 Uint64BinopMatcher m(node);
562 if (m.right().Is(-1)) {
563 Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
564 } else {
565 VisitBinop(this, node, kX64Xor);
566 }
567 }
568
569
570 namespace {
571
572 // Shared routine for multiple 32-bit shift operations.
573 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
VisitWord32Shift(InstructionSelector * selector,Node * node,ArchOpcode opcode)574 void VisitWord32Shift(InstructionSelector* selector, Node* node,
575 ArchOpcode opcode) {
576 X64OperandGenerator g(selector);
577 Int32BinopMatcher m(node);
578 Node* left = m.left().node();
579 Node* right = m.right().node();
580
581 if (g.CanBeImmediate(right)) {
582 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
583 g.UseImmediate(right));
584 } else {
585 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
586 g.UseFixed(right, rcx));
587 }
588 }
589
590
591 // Shared routine for multiple 64-bit shift operations.
592 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
VisitWord64Shift(InstructionSelector * selector,Node * node,ArchOpcode opcode)593 void VisitWord64Shift(InstructionSelector* selector, Node* node,
594 ArchOpcode opcode) {
595 X64OperandGenerator g(selector);
596 Int64BinopMatcher m(node);
597 Node* left = m.left().node();
598 Node* right = m.right().node();
599
600 if (g.CanBeImmediate(right)) {
601 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
602 g.UseImmediate(right));
603 } else {
604 if (m.right().IsWord64And()) {
605 Int64BinopMatcher mright(right);
606 if (mright.right().Is(0x3F)) {
607 right = mright.left().node();
608 }
609 }
610 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
611 g.UseFixed(right, rcx));
612 }
613 }
614
EmitLea(InstructionSelector * selector,InstructionCode opcode,Node * result,Node * index,int scale,Node * base,Node * displacement,DisplacementMode displacement_mode)615 void EmitLea(InstructionSelector* selector, InstructionCode opcode,
616 Node* result, Node* index, int scale, Node* base,
617 Node* displacement, DisplacementMode displacement_mode) {
618 X64OperandGenerator g(selector);
619
620 InstructionOperand inputs[4];
621 size_t input_count = 0;
622 AddressingMode mode =
623 g.GenerateMemoryOperandInputs(index, scale, base, displacement,
624 displacement_mode, inputs, &input_count);
625
626 DCHECK_NE(0u, input_count);
627 DCHECK_GE(arraysize(inputs), input_count);
628
629 InstructionOperand outputs[1];
630 outputs[0] = g.DefineAsRegister(result);
631
632 opcode = AddressingModeField::encode(mode) | opcode;
633
634 selector->Emit(opcode, 1, outputs, input_count, inputs);
635 }
636
637 } // namespace
638
639
VisitWord32Shl(Node * node)640 void InstructionSelector::VisitWord32Shl(Node* node) {
641 Int32ScaleMatcher m(node, true);
642 if (m.matches()) {
643 Node* index = node->InputAt(0);
644 Node* base = m.power_of_two_plus_one() ? index : nullptr;
645 EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr,
646 kPositiveDisplacement);
647 return;
648 }
649 VisitWord32Shift(this, node, kX64Shl32);
650 }
651
652
VisitWord64Shl(Node * node)653 void InstructionSelector::VisitWord64Shl(Node* node) {
654 X64OperandGenerator g(this);
655 Int64ScaleMatcher m(node, true);
656 if (m.matches()) {
657 Node* index = node->InputAt(0);
658 Node* base = m.power_of_two_plus_one() ? index : nullptr;
659 EmitLea(this, kX64Lea, node, index, m.scale(), base, nullptr,
660 kPositiveDisplacement);
661 return;
662 } else {
663 Int64BinopMatcher m(node);
664 if ((m.left().IsChangeInt32ToInt64() ||
665 m.left().IsChangeUint32ToUint64()) &&
666 m.right().IsInRange(32, 63)) {
667 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
668 // 32 bits anyway.
669 Emit(kX64Shl, g.DefineSameAsFirst(node),
670 g.UseRegister(m.left().node()->InputAt(0)),
671 g.UseImmediate(m.right().node()));
672 return;
673 }
674 }
675 VisitWord64Shift(this, node, kX64Shl);
676 }
677
678
VisitWord32Shr(Node * node)679 void InstructionSelector::VisitWord32Shr(Node* node) {
680 VisitWord32Shift(this, node, kX64Shr32);
681 }
682
683 namespace {
TryMatchLoadWord64AndShiftRight(InstructionSelector * selector,Node * node,InstructionCode opcode)684 bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
685 InstructionCode opcode) {
686 DCHECK(IrOpcode::kWord64Sar == node->opcode() ||
687 IrOpcode::kWord64Shr == node->opcode());
688 X64OperandGenerator g(selector);
689 Int64BinopMatcher m(node);
690 if (selector->CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
691 m.right().Is(32)) {
692 // Just load and sign-extend the interesting 4 bytes instead. This happens,
693 // for example, when we're loading and untagging SMIs.
694 BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
695 AddressOption::kAllowAll);
696 if (mleft.matches() && (mleft.displacement() == nullptr ||
697 g.CanBeImmediate(mleft.displacement()))) {
698 size_t input_count = 0;
699 InstructionOperand inputs[3];
700 AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
701 m.left().node(), inputs, &input_count);
702 if (mleft.displacement() == nullptr) {
703 // Make sure that the addressing mode indicates the presence of an
704 // immediate displacement. It seems that we never use M1 and M2, but we
705 // handle them here anyways.
706 switch (mode) {
707 case kMode_MR:
708 mode = kMode_MRI;
709 break;
710 case kMode_MR1:
711 mode = kMode_MR1I;
712 break;
713 case kMode_MR2:
714 mode = kMode_MR2I;
715 break;
716 case kMode_MR4:
717 mode = kMode_MR4I;
718 break;
719 case kMode_MR8:
720 mode = kMode_MR8I;
721 break;
722 case kMode_M1:
723 mode = kMode_M1I;
724 break;
725 case kMode_M2:
726 mode = kMode_M2I;
727 break;
728 case kMode_M4:
729 mode = kMode_M4I;
730 break;
731 case kMode_M8:
732 mode = kMode_M8I;
733 break;
734 case kMode_None:
735 case kMode_MRI:
736 case kMode_MR1I:
737 case kMode_MR2I:
738 case kMode_MR4I:
739 case kMode_MR8I:
740 case kMode_M1I:
741 case kMode_M2I:
742 case kMode_M4I:
743 case kMode_M8I:
744 case kMode_Root:
745 UNREACHABLE();
746 }
747 inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
748 } else {
749 int32_t displacement = g.GetImmediateIntegerValue(mleft.displacement());
750 inputs[input_count - 1] =
751 ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
752 }
753 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
754 InstructionCode code = opcode | AddressingModeField::encode(mode);
755 selector->Emit(code, 1, outputs, input_count, inputs);
756 return true;
757 }
758 }
759 return false;
760 }
761 } // namespace
762
VisitWord64Shr(Node * node)763 void InstructionSelector::VisitWord64Shr(Node* node) {
764 if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movl)) return;
765 VisitWord64Shift(this, node, kX64Shr);
766 }
767
VisitWord32Sar(Node * node)768 void InstructionSelector::VisitWord32Sar(Node* node) {
769 X64OperandGenerator g(this);
770 Int32BinopMatcher m(node);
771 if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
772 Int32BinopMatcher mleft(m.left().node());
773 if (mleft.right().Is(16) && m.right().Is(16)) {
774 Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
775 return;
776 } else if (mleft.right().Is(24) && m.right().Is(24)) {
777 Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
778 return;
779 }
780 }
781 VisitWord32Shift(this, node, kX64Sar32);
782 }
783
VisitWord64Sar(Node * node)784 void InstructionSelector::VisitWord64Sar(Node* node) {
785 if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movsxlq)) return;
786 VisitWord64Shift(this, node, kX64Sar);
787 }
788
789
VisitWord32Ror(Node * node)790 void InstructionSelector::VisitWord32Ror(Node* node) {
791 VisitWord32Shift(this, node, kX64Ror32);
792 }
793
794
VisitWord64Ror(Node * node)795 void InstructionSelector::VisitWord64Ror(Node* node) {
796 VisitWord64Shift(this, node, kX64Ror);
797 }
798
799
VisitWord64Clz(Node * node)800 void InstructionSelector::VisitWord64Clz(Node* node) {
801 X64OperandGenerator g(this);
802 Emit(kX64Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
803 }
804
805
VisitWord32Clz(Node * node)806 void InstructionSelector::VisitWord32Clz(Node* node) {
807 X64OperandGenerator g(this);
808 Emit(kX64Lzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
809 }
810
811
VisitWord64Ctz(Node * node)812 void InstructionSelector::VisitWord64Ctz(Node* node) {
813 X64OperandGenerator g(this);
814 Emit(kX64Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
815 }
816
817
VisitWord32Ctz(Node * node)818 void InstructionSelector::VisitWord32Ctz(Node* node) {
819 X64OperandGenerator g(this);
820 Emit(kX64Tzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
821 }
822
823
VisitWord32ReverseBits(Node * node)824 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
825
826
VisitWord64ReverseBits(Node * node)827 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
828
VisitWord64ReverseBytes(Node * node)829 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
830
VisitWord32ReverseBytes(Node * node)831 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
832
VisitWord32Popcnt(Node * node)833 void InstructionSelector::VisitWord32Popcnt(Node* node) {
834 X64OperandGenerator g(this);
835 Emit(kX64Popcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
836 }
837
838
VisitWord64Popcnt(Node * node)839 void InstructionSelector::VisitWord64Popcnt(Node* node) {
840 X64OperandGenerator g(this);
841 Emit(kX64Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
842 }
843
844
VisitInt32Add(Node * node)845 void InstructionSelector::VisitInt32Add(Node* node) {
846 X64OperandGenerator g(this);
847
848 // Try to match the Add to a leal pattern
849 BaseWithIndexAndDisplacement32Matcher m(node);
850 if (m.matches() &&
851 (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
852 EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
853 m.displacement(), m.displacement_mode());
854 return;
855 }
856
857 // No leal pattern match, use addl
858 VisitBinop(this, node, kX64Add32);
859 }
860
861
VisitInt64Add(Node * node)862 void InstructionSelector::VisitInt64Add(Node* node) {
863 X64OperandGenerator g(this);
864
865 // Try to match the Add to a leaq pattern
866 BaseWithIndexAndDisplacement64Matcher m(node);
867 if (m.matches() &&
868 (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
869 EmitLea(this, kX64Lea, node, m.index(), m.scale(), m.base(),
870 m.displacement(), m.displacement_mode());
871 return;
872 }
873
874 // No leal pattern match, use addq
875 VisitBinop(this, node, kX64Add);
876 }
877
878
VisitInt64AddWithOverflow(Node * node)879 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
880 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
881 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
882 return VisitBinop(this, node, kX64Add, &cont);
883 }
884 FlagsContinuation cont;
885 VisitBinop(this, node, kX64Add, &cont);
886 }
887
888
VisitInt32Sub(Node * node)889 void InstructionSelector::VisitInt32Sub(Node* node) {
890 X64OperandGenerator g(this);
891 Int32BinopMatcher m(node);
892 if (m.left().Is(0)) {
893 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
894 } else {
895 if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
896 // Turn subtractions of constant values into immediate "leal" instructions
897 // by negating the value.
898 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
899 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
900 g.TempImmediate(-m.right().Value()));
901 return;
902 }
903 VisitBinop(this, node, kX64Sub32);
904 }
905 }
906
907
VisitInt64Sub(Node * node)908 void InstructionSelector::VisitInt64Sub(Node* node) {
909 X64OperandGenerator g(this);
910 Int64BinopMatcher m(node);
911 if (m.left().Is(0)) {
912 Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
913 } else {
914 if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
915 // Turn subtractions of constant values into immediate "leaq" instructions
916 // by negating the value.
917 Emit(kX64Lea | AddressingModeField::encode(kMode_MRI),
918 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
919 g.TempImmediate(-static_cast<int32_t>(m.right().Value())));
920 return;
921 }
922 VisitBinop(this, node, kX64Sub);
923 }
924 }
925
926
VisitInt64SubWithOverflow(Node * node)927 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
928 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
929 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
930 return VisitBinop(this, node, kX64Sub, &cont);
931 }
932 FlagsContinuation cont;
933 VisitBinop(this, node, kX64Sub, &cont);
934 }
935
936
937 namespace {
938
VisitMul(InstructionSelector * selector,Node * node,ArchOpcode opcode)939 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
940 X64OperandGenerator g(selector);
941 Int32BinopMatcher m(node);
942 Node* left = m.left().node();
943 Node* right = m.right().node();
944 if (g.CanBeImmediate(right)) {
945 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
946 g.UseImmediate(right));
947 } else {
948 if (g.CanBeBetterLeftOperand(right)) {
949 std::swap(left, right);
950 }
951 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
952 g.Use(right));
953 }
954 }
955
VisitMulHigh(InstructionSelector * selector,Node * node,ArchOpcode opcode)956 void VisitMulHigh(InstructionSelector* selector, Node* node,
957 ArchOpcode opcode) {
958 X64OperandGenerator g(selector);
959 Node* left = node->InputAt(0);
960 Node* right = node->InputAt(1);
961 if (selector->IsLive(left) && !selector->IsLive(right)) {
962 std::swap(left, right);
963 }
964 InstructionOperand temps[] = {g.TempRegister(rax)};
965 // TODO(turbofan): We use UseUniqueRegister here to improve register
966 // allocation.
967 selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
968 g.UseUniqueRegister(right), arraysize(temps), temps);
969 }
970
971
VisitDiv(InstructionSelector * selector,Node * node,ArchOpcode opcode)972 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
973 X64OperandGenerator g(selector);
974 InstructionOperand temps[] = {g.TempRegister(rdx)};
975 selector->Emit(
976 opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
977 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
978 }
979
980
VisitMod(InstructionSelector * selector,Node * node,ArchOpcode opcode)981 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
982 X64OperandGenerator g(selector);
983 InstructionOperand temps[] = {g.TempRegister(rax)};
984 selector->Emit(
985 opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
986 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
987 }
988
989 } // namespace
990
991
VisitInt32Mul(Node * node)992 void InstructionSelector::VisitInt32Mul(Node* node) {
993 Int32ScaleMatcher m(node, true);
994 if (m.matches()) {
995 Node* index = node->InputAt(0);
996 Node* base = m.power_of_two_plus_one() ? index : nullptr;
997 EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr,
998 kPositiveDisplacement);
999 return;
1000 }
1001 VisitMul(this, node, kX64Imul32);
1002 }
1003
VisitInt32MulWithOverflow(Node * node)1004 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
1005 // TODO(mvstanton): Use Int32ScaleMatcher somehow.
1006 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1007 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1008 return VisitBinop(this, node, kX64Imul32, &cont);
1009 }
1010 FlagsContinuation cont;
1011 VisitBinop(this, node, kX64Imul32, &cont);
1012 }
1013
VisitInt64Mul(Node * node)1014 void InstructionSelector::VisitInt64Mul(Node* node) {
1015 VisitMul(this, node, kX64Imul);
1016 }
1017
VisitInt32MulHigh(Node * node)1018 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1019 VisitMulHigh(this, node, kX64ImulHigh32);
1020 }
1021
1022
VisitInt32Div(Node * node)1023 void InstructionSelector::VisitInt32Div(Node* node) {
1024 VisitDiv(this, node, kX64Idiv32);
1025 }
1026
1027
VisitInt64Div(Node * node)1028 void InstructionSelector::VisitInt64Div(Node* node) {
1029 VisitDiv(this, node, kX64Idiv);
1030 }
1031
1032
VisitUint32Div(Node * node)1033 void InstructionSelector::VisitUint32Div(Node* node) {
1034 VisitDiv(this, node, kX64Udiv32);
1035 }
1036
1037
VisitUint64Div(Node * node)1038 void InstructionSelector::VisitUint64Div(Node* node) {
1039 VisitDiv(this, node, kX64Udiv);
1040 }
1041
1042
VisitInt32Mod(Node * node)1043 void InstructionSelector::VisitInt32Mod(Node* node) {
1044 VisitMod(this, node, kX64Idiv32);
1045 }
1046
1047
VisitInt64Mod(Node * node)1048 void InstructionSelector::VisitInt64Mod(Node* node) {
1049 VisitMod(this, node, kX64Idiv);
1050 }
1051
1052
VisitUint32Mod(Node * node)1053 void InstructionSelector::VisitUint32Mod(Node* node) {
1054 VisitMod(this, node, kX64Udiv32);
1055 }
1056
1057
VisitUint64Mod(Node * node)1058 void InstructionSelector::VisitUint64Mod(Node* node) {
1059 VisitMod(this, node, kX64Udiv);
1060 }
1061
1062
VisitUint32MulHigh(Node * node)1063 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1064 VisitMulHigh(this, node, kX64UmulHigh32);
1065 }
1066
1067
VisitChangeFloat32ToFloat64(Node * node)1068 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1069 X64OperandGenerator g(this);
1070 Emit(kSSEFloat32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1071 }
1072
1073
VisitChangeInt32ToFloat64(Node * node)1074 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1075 X64OperandGenerator g(this);
1076 Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1077 }
1078
1079
VisitChangeUint32ToFloat64(Node * node)1080 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1081 X64OperandGenerator g(this);
1082 Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1083 }
1084
1085
VisitChangeFloat64ToInt32(Node * node)1086 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1087 X64OperandGenerator g(this);
1088 Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1089 }
1090
1091
VisitChangeFloat64ToUint32(Node * node)1092 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1093 X64OperandGenerator g(this);
1094 Emit(kSSEFloat64ToUint32 | MiscField::encode(1), g.DefineAsRegister(node),
1095 g.Use(node->InputAt(0)));
1096 }
1097
VisitTruncateFloat64ToUint32(Node * node)1098 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1099 X64OperandGenerator g(this);
1100 Emit(kSSEFloat64ToUint32 | MiscField::encode(0), g.DefineAsRegister(node),
1101 g.Use(node->InputAt(0)));
1102 }
1103
VisitTruncateFloat32ToInt32(Node * node)1104 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1105 X64OperandGenerator g(this);
1106 Emit(kSSEFloat32ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1107 }
1108
1109
VisitTruncateFloat32ToUint32(Node * node)1110 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1111 X64OperandGenerator g(this);
1112 Emit(kSSEFloat32ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1113 }
1114
1115
VisitTryTruncateFloat32ToInt64(Node * node)1116 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1117 X64OperandGenerator g(this);
1118 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1119 InstructionOperand outputs[2];
1120 size_t output_count = 0;
1121 outputs[output_count++] = g.DefineAsRegister(node);
1122
1123 Node* success_output = NodeProperties::FindProjection(node, 1);
1124 if (success_output) {
1125 outputs[output_count++] = g.DefineAsRegister(success_output);
1126 }
1127
1128 Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs);
1129 }
1130
1131
VisitTryTruncateFloat64ToInt64(Node * node)1132 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1133 X64OperandGenerator g(this);
1134 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1135 InstructionOperand outputs[2];
1136 size_t output_count = 0;
1137 outputs[output_count++] = g.DefineAsRegister(node);
1138
1139 Node* success_output = NodeProperties::FindProjection(node, 1);
1140 if (success_output) {
1141 outputs[output_count++] = g.DefineAsRegister(success_output);
1142 }
1143
1144 Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs);
1145 }
1146
1147
VisitTryTruncateFloat32ToUint64(Node * node)1148 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1149 X64OperandGenerator g(this);
1150 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1151 InstructionOperand outputs[2];
1152 size_t output_count = 0;
1153 outputs[output_count++] = g.DefineAsRegister(node);
1154
1155 Node* success_output = NodeProperties::FindProjection(node, 1);
1156 if (success_output) {
1157 outputs[output_count++] = g.DefineAsRegister(success_output);
1158 }
1159
1160 Emit(kSSEFloat32ToUint64, output_count, outputs, 1, inputs);
1161 }
1162
1163
VisitTryTruncateFloat64ToUint64(Node * node)1164 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1165 X64OperandGenerator g(this);
1166 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1167 InstructionOperand outputs[2];
1168 size_t output_count = 0;
1169 outputs[output_count++] = g.DefineAsRegister(node);
1170
1171 Node* success_output = NodeProperties::FindProjection(node, 1);
1172 if (success_output) {
1173 outputs[output_count++] = g.DefineAsRegister(success_output);
1174 }
1175
1176 Emit(kSSEFloat64ToUint64, output_count, outputs, 1, inputs);
1177 }
1178
1179
VisitChangeInt32ToInt64(Node * node)1180 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1181 X64OperandGenerator g(this);
1182 Node* const value = node->InputAt(0);
1183 if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
1184 LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1185 MachineRepresentation rep = load_rep.representation();
1186 InstructionCode opcode = kArchNop;
1187 switch (rep) {
1188 case MachineRepresentation::kBit: // Fall through.
1189 case MachineRepresentation::kWord8:
1190 opcode = load_rep.IsSigned() ? kX64Movsxbq : kX64Movzxbq;
1191 break;
1192 case MachineRepresentation::kWord16:
1193 opcode = load_rep.IsSigned() ? kX64Movsxwq : kX64Movzxwq;
1194 break;
1195 case MachineRepresentation::kWord32:
1196 opcode = load_rep.IsSigned() ? kX64Movsxlq : kX64Movl;
1197 break;
1198 default:
1199 UNREACHABLE();
1200 return;
1201 }
1202 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
1203 size_t input_count = 0;
1204 InstructionOperand inputs[3];
1205 AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
1206 node->InputAt(0), inputs, &input_count);
1207 opcode |= AddressingModeField::encode(mode);
1208 Emit(opcode, 1, outputs, input_count, inputs);
1209 } else {
1210 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1211 }
1212 }
1213
1214 namespace {
1215
ZeroExtendsWord32ToWord64(Node * node)1216 bool ZeroExtendsWord32ToWord64(Node* node) {
1217 switch (node->opcode()) {
1218 case IrOpcode::kWord32And:
1219 case IrOpcode::kWord32Or:
1220 case IrOpcode::kWord32Xor:
1221 case IrOpcode::kWord32Shl:
1222 case IrOpcode::kWord32Shr:
1223 case IrOpcode::kWord32Sar:
1224 case IrOpcode::kWord32Ror:
1225 case IrOpcode::kWord32Equal:
1226 case IrOpcode::kInt32Add:
1227 case IrOpcode::kInt32Sub:
1228 case IrOpcode::kInt32Mul:
1229 case IrOpcode::kInt32MulHigh:
1230 case IrOpcode::kInt32Div:
1231 case IrOpcode::kInt32LessThan:
1232 case IrOpcode::kInt32LessThanOrEqual:
1233 case IrOpcode::kInt32Mod:
1234 case IrOpcode::kUint32Div:
1235 case IrOpcode::kUint32LessThan:
1236 case IrOpcode::kUint32LessThanOrEqual:
1237 case IrOpcode::kUint32Mod:
1238 case IrOpcode::kUint32MulHigh:
1239 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
1240 // zero-extension is a no-op.
1241 return true;
1242 case IrOpcode::kProjection: {
1243 Node* const value = node->InputAt(0);
1244 switch (value->opcode()) {
1245 case IrOpcode::kInt32AddWithOverflow:
1246 case IrOpcode::kInt32SubWithOverflow:
1247 case IrOpcode::kInt32MulWithOverflow:
1248 return true;
1249 default:
1250 return false;
1251 }
1252 }
1253 case IrOpcode::kLoad: {
1254 // The movzxbl/movsxbl/movzxwl/movsxwl operations implicitly zero-extend
1255 // to 64-bit on x64,
1256 // so the zero-extension is a no-op.
1257 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1258 switch (load_rep.representation()) {
1259 case MachineRepresentation::kWord8:
1260 case MachineRepresentation::kWord16:
1261 return true;
1262 default:
1263 return false;
1264 }
1265 }
1266 default:
1267 return false;
1268 }
1269 }
1270
1271 } // namespace
1272
VisitChangeUint32ToUint64(Node * node)1273 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1274 X64OperandGenerator g(this);
1275 Node* value = node->InputAt(0);
1276 if (ZeroExtendsWord32ToWord64(value)) {
1277 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
1278 // zero-extension is a no-op.
1279 return EmitIdentity(node);
1280 }
1281 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
1282 }
1283
1284
1285 namespace {
1286
VisitRO(InstructionSelector * selector,Node * node,InstructionCode opcode)1287 void VisitRO(InstructionSelector* selector, Node* node,
1288 InstructionCode opcode) {
1289 X64OperandGenerator g(selector);
1290 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1291 }
1292
1293
VisitRR(InstructionSelector * selector,Node * node,InstructionCode opcode)1294 void VisitRR(InstructionSelector* selector, Node* node,
1295 InstructionCode opcode) {
1296 X64OperandGenerator g(selector);
1297 selector->Emit(opcode, g.DefineAsRegister(node),
1298 g.UseRegister(node->InputAt(0)));
1299 }
1300
VisitRRO(InstructionSelector * selector,Node * node,InstructionCode opcode)1301 void VisitRRO(InstructionSelector* selector, Node* node,
1302 InstructionCode opcode) {
1303 X64OperandGenerator g(selector);
1304 selector->Emit(opcode, g.DefineSameAsFirst(node),
1305 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
1306 }
1307
VisitFloatBinop(InstructionSelector * selector,Node * node,ArchOpcode avx_opcode,ArchOpcode sse_opcode)1308 void VisitFloatBinop(InstructionSelector* selector, Node* node,
1309 ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
1310 X64OperandGenerator g(selector);
1311 InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
1312 InstructionOperand operand1 = g.Use(node->InputAt(1));
1313 if (selector->IsSupported(AVX)) {
1314 selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
1315 } else {
1316 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
1317 }
1318 }
1319
1320
VisitFloatUnop(InstructionSelector * selector,Node * node,Node * input,ArchOpcode avx_opcode,ArchOpcode sse_opcode)1321 void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
1322 ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
1323 X64OperandGenerator g(selector);
1324 if (selector->IsSupported(AVX)) {
1325 selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
1326 } else {
1327 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
1328 }
1329 }
1330
1331 } // namespace
1332
1333
VisitTruncateFloat64ToFloat32(Node * node)1334 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1335 VisitRO(this, node, kSSEFloat64ToFloat32);
1336 }
1337
VisitTruncateFloat64ToWord32(Node * node)1338 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1339 VisitRR(this, node, kArchTruncateDoubleToI);
1340 }
1341
1342
VisitTruncateInt64ToInt32(Node * node)1343 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1344 X64OperandGenerator g(this);
1345 Node* value = node->InputAt(0);
1346 if (CanCover(node, value)) {
1347 switch (value->opcode()) {
1348 case IrOpcode::kWord64Sar:
1349 case IrOpcode::kWord64Shr: {
1350 Int64BinopMatcher m(value);
1351 if (m.right().Is(32)) {
1352 if (TryMatchLoadWord64AndShiftRight(this, value, kX64Movl)) {
1353 return EmitIdentity(node);
1354 }
1355 Emit(kX64Shr, g.DefineSameAsFirst(node),
1356 g.UseRegister(m.left().node()), g.TempImmediate(32));
1357 return;
1358 }
1359 break;
1360 }
1361 default:
1362 break;
1363 }
1364 }
1365 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
1366 }
1367
VisitRoundFloat64ToInt32(Node * node)1368 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1369 VisitRO(this, node, kSSEFloat64ToInt32);
1370 }
1371
VisitRoundInt32ToFloat32(Node * node)1372 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1373 X64OperandGenerator g(this);
1374 Emit(kSSEInt32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1375 }
1376
1377
VisitRoundInt64ToFloat32(Node * node)1378 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1379 X64OperandGenerator g(this);
1380 Emit(kSSEInt64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1381 }
1382
1383
VisitRoundInt64ToFloat64(Node * node)1384 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1385 X64OperandGenerator g(this);
1386 Emit(kSSEInt64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1387 }
1388
1389
VisitRoundUint32ToFloat32(Node * node)1390 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1391 X64OperandGenerator g(this);
1392 Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1393 }
1394
1395
VisitRoundUint64ToFloat32(Node * node)1396 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1397 X64OperandGenerator g(this);
1398 InstructionOperand temps[] = {g.TempRegister()};
1399 Emit(kSSEUint64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
1400 arraysize(temps), temps);
1401 }
1402
1403
VisitRoundUint64ToFloat64(Node * node)1404 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1405 X64OperandGenerator g(this);
1406 InstructionOperand temps[] = {g.TempRegister()};
1407 Emit(kSSEUint64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
1408 arraysize(temps), temps);
1409 }
1410
1411
VisitBitcastFloat32ToInt32(Node * node)1412 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1413 X64OperandGenerator g(this);
1414 Emit(kX64BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1415 }
1416
1417
VisitBitcastFloat64ToInt64(Node * node)1418 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1419 X64OperandGenerator g(this);
1420 Emit(kX64BitcastDL, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1421 }
1422
1423
VisitBitcastInt32ToFloat32(Node * node)1424 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1425 X64OperandGenerator g(this);
1426 Emit(kX64BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1427 }
1428
1429
VisitBitcastInt64ToFloat64(Node * node)1430 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1431 X64OperandGenerator g(this);
1432 Emit(kX64BitcastLD, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1433 }
1434
1435
VisitFloat32Add(Node * node)1436 void InstructionSelector::VisitFloat32Add(Node* node) {
1437 VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
1438 }
1439
1440
VisitFloat32Sub(Node * node)1441 void InstructionSelector::VisitFloat32Sub(Node* node) {
1442 VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
1443 }
1444
VisitFloat32Mul(Node * node)1445 void InstructionSelector::VisitFloat32Mul(Node* node) {
1446 VisitFloatBinop(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
1447 }
1448
1449
VisitFloat32Div(Node * node)1450 void InstructionSelector::VisitFloat32Div(Node* node) {
1451 VisitFloatBinop(this, node, kAVXFloat32Div, kSSEFloat32Div);
1452 }
1453
1454
VisitFloat32Abs(Node * node)1455 void InstructionSelector::VisitFloat32Abs(Node* node) {
1456 VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
1457 }
1458
1459
VisitFloat32Sqrt(Node * node)1460 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1461 VisitRO(this, node, kSSEFloat32Sqrt);
1462 }
1463
VisitFloat32Max(Node * node)1464 void InstructionSelector::VisitFloat32Max(Node* node) {
1465 VisitRRO(this, node, kSSEFloat32Max);
1466 }
1467
VisitFloat32Min(Node * node)1468 void InstructionSelector::VisitFloat32Min(Node* node) {
1469 VisitRRO(this, node, kSSEFloat32Min);
1470 }
1471
VisitFloat64Add(Node * node)1472 void InstructionSelector::VisitFloat64Add(Node* node) {
1473 VisitFloatBinop(this, node, kAVXFloat64Add, kSSEFloat64Add);
1474 }
1475
1476
VisitFloat64Sub(Node * node)1477 void InstructionSelector::VisitFloat64Sub(Node* node) {
1478 VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
1479 }
1480
VisitFloat64Mul(Node * node)1481 void InstructionSelector::VisitFloat64Mul(Node* node) {
1482 VisitFloatBinop(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
1483 }
1484
1485
VisitFloat64Div(Node * node)1486 void InstructionSelector::VisitFloat64Div(Node* node) {
1487 VisitFloatBinop(this, node, kAVXFloat64Div, kSSEFloat64Div);
1488 }
1489
1490
VisitFloat64Mod(Node * node)1491 void InstructionSelector::VisitFloat64Mod(Node* node) {
1492 X64OperandGenerator g(this);
1493 InstructionOperand temps[] = {g.TempRegister(rax)};
1494 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
1495 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
1496 temps);
1497 }
1498
1499
VisitFloat64Max(Node * node)1500 void InstructionSelector::VisitFloat64Max(Node* node) {
1501 VisitRRO(this, node, kSSEFloat64Max);
1502 }
1503
1504
VisitFloat64Min(Node * node)1505 void InstructionSelector::VisitFloat64Min(Node* node) {
1506 VisitRRO(this, node, kSSEFloat64Min);
1507 }
1508
1509
VisitFloat64Abs(Node * node)1510 void InstructionSelector::VisitFloat64Abs(Node* node) {
1511 VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
1512 }
1513
VisitFloat64Sqrt(Node * node)1514 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1515 VisitRO(this, node, kSSEFloat64Sqrt);
1516 }
1517
1518
VisitFloat32RoundDown(Node * node)1519 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1520 VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
1521 }
1522
1523
VisitFloat64RoundDown(Node * node)1524 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1525 VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
1526 }
1527
1528
VisitFloat32RoundUp(Node * node)1529 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1530 VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
1531 }
1532
1533
VisitFloat64RoundUp(Node * node)1534 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1535 VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
1536 }
1537
1538
VisitFloat32RoundTruncate(Node * node)1539 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1540 VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
1541 }
1542
1543
VisitFloat64RoundTruncate(Node * node)1544 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1545 VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
1546 }
1547
1548
VisitFloat64RoundTiesAway(Node * node)1549 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1550 UNREACHABLE();
1551 }
1552
1553
VisitFloat32RoundTiesEven(Node * node)1554 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1555 VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
1556 }
1557
1558
VisitFloat64RoundTiesEven(Node * node)1559 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1560 VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
1561 }
1562
VisitFloat32Neg(Node * node)1563 void InstructionSelector::VisitFloat32Neg(Node* node) {
1564 VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
1565 }
1566
VisitFloat64Neg(Node * node)1567 void InstructionSelector::VisitFloat64Neg(Node* node) {
1568 VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
1569 }
1570
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1571 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1572 InstructionCode opcode) {
1573 X64OperandGenerator g(this);
1574 Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0),
1575 g.UseFixed(node->InputAt(1), xmm1))
1576 ->MarkAsCall();
1577 }
1578
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1579 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1580 InstructionCode opcode) {
1581 X64OperandGenerator g(this);
1582 Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0))
1583 ->MarkAsCall();
1584 }
1585
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * descriptor,Node * node)1586 void InstructionSelector::EmitPrepareArguments(
1587 ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1588 Node* node) {
1589 X64OperandGenerator g(this);
1590
1591 // Prepare for C function call.
1592 if (descriptor->IsCFunctionCall()) {
1593 Emit(kArchPrepareCallCFunction |
1594 MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
1595 0, nullptr, 0, nullptr);
1596
1597 // Poke any stack arguments.
1598 for (size_t n = 0; n < arguments->size(); ++n) {
1599 PushParameter input = (*arguments)[n];
1600 if (input.node()) {
1601 int slot = static_cast<int>(n);
1602 InstructionOperand value = g.CanBeImmediate(input.node())
1603 ? g.UseImmediate(input.node())
1604 : g.UseRegister(input.node());
1605 Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
1606 }
1607 }
1608 } else {
1609 // Push any stack arguments.
1610 for (PushParameter input : base::Reversed(*arguments)) {
1611 // TODO(titzer): X64Push cannot handle stack->stack double moves
1612 // because there is no way to encode fixed double slots.
1613 InstructionOperand value =
1614 g.CanBeImmediate(input.node())
1615 ? g.UseImmediate(input.node())
1616 : IsSupported(ATOM) ||
1617 sequence()->IsFP(GetVirtualRegister(input.node()))
1618 ? g.UseRegister(input.node())
1619 : g.Use(input.node());
1620 Emit(kX64Push, g.NoOutput(), value);
1621 }
1622 }
1623 }
1624
1625
IsTailCallAddressImmediate()1626 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
1627
GetTempsCountForTailCallFromJSFunction()1628 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1629
1630 namespace {
1631
VisitCompareWithMemoryOperand(InstructionSelector * selector,InstructionCode opcode,Node * left,InstructionOperand right,FlagsContinuation * cont)1632 void VisitCompareWithMemoryOperand(InstructionSelector* selector,
1633 InstructionCode opcode, Node* left,
1634 InstructionOperand right,
1635 FlagsContinuation* cont) {
1636 DCHECK(left->opcode() == IrOpcode::kLoad);
1637 X64OperandGenerator g(selector);
1638 size_t input_count = 0;
1639 InstructionOperand inputs[6];
1640 AddressingMode addressing_mode =
1641 g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
1642 opcode |= AddressingModeField::encode(addressing_mode);
1643 opcode = cont->Encode(opcode);
1644 inputs[input_count++] = right;
1645
1646 if (cont->IsBranch()) {
1647 inputs[input_count++] = g.Label(cont->true_block());
1648 inputs[input_count++] = g.Label(cont->false_block());
1649 selector->Emit(opcode, 0, nullptr, input_count, inputs);
1650 } else if (cont->IsDeoptimize()) {
1651 selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
1652 cont->reason(), cont->frame_state());
1653 } else {
1654 DCHECK(cont->IsSet());
1655 InstructionOperand output = g.DefineAsRegister(cont->result());
1656 selector->Emit(opcode, 1, &output, input_count, inputs);
1657 }
1658 }
1659
1660 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1661 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1662 InstructionOperand left, InstructionOperand right,
1663 FlagsContinuation* cont) {
1664 X64OperandGenerator g(selector);
1665 opcode = cont->Encode(opcode);
1666 if (cont->IsBranch()) {
1667 selector->Emit(opcode, g.NoOutput(), left, right,
1668 g.Label(cont->true_block()), g.Label(cont->false_block()));
1669 } else if (cont->IsDeoptimize()) {
1670 selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
1671 cont->frame_state());
1672 } else {
1673 DCHECK(cont->IsSet());
1674 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1675 }
1676 }
1677
1678
1679 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,Node * left,Node * right,FlagsContinuation * cont,bool commutative)1680 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1681 Node* left, Node* right, FlagsContinuation* cont,
1682 bool commutative) {
1683 X64OperandGenerator g(selector);
1684 if (commutative && g.CanBeBetterLeftOperand(right)) {
1685 std::swap(left, right);
1686 }
1687 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
1688 }
1689
1690 // Tries to match the size of the given opcode to that of the operands, if
1691 // possible.
TryNarrowOpcodeSize(InstructionCode opcode,Node * left,Node * right,FlagsContinuation * cont)1692 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
1693 Node* right, FlagsContinuation* cont) {
1694 // Currently, if one of the two operands is not a Load, we don't know what its
1695 // machine representation is, so we bail out.
1696 // TODO(epertoso): we can probably get some size information out of immediates
1697 // and phi nodes.
1698 if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
1699 return opcode;
1700 }
1701 // If the load representations don't match, both operands will be
1702 // zero/sign-extended to 32bit.
1703 MachineType left_type = LoadRepresentationOf(left->op());
1704 MachineType right_type = LoadRepresentationOf(right->op());
1705 if (left_type == right_type) {
1706 switch (left_type.representation()) {
1707 case MachineRepresentation::kBit:
1708 case MachineRepresentation::kWord8: {
1709 if (opcode == kX64Test32) return kX64Test8;
1710 if (opcode == kX64Cmp32) {
1711 if (left_type.semantic() == MachineSemantic::kUint32) {
1712 cont->OverwriteUnsignedIfSigned();
1713 } else {
1714 CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
1715 }
1716 return kX64Cmp8;
1717 }
1718 break;
1719 }
1720 case MachineRepresentation::kWord16:
1721 if (opcode == kX64Test32) return kX64Test16;
1722 if (opcode == kX64Cmp32) {
1723 if (left_type.semantic() == MachineSemantic::kUint32) {
1724 cont->OverwriteUnsignedIfSigned();
1725 } else {
1726 CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
1727 }
1728 return kX64Cmp16;
1729 }
1730 break;
1731 default:
1732 break;
1733 }
1734 }
1735 return opcode;
1736 }
1737
1738 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1739 void VisitWordCompare(InstructionSelector* selector, Node* node,
1740 InstructionCode opcode, FlagsContinuation* cont) {
1741 X64OperandGenerator g(selector);
1742 Node* left = node->InputAt(0);
1743 Node* right = node->InputAt(1);
1744
1745 opcode = TryNarrowOpcodeSize(opcode, left, right, cont);
1746
1747 // If one of the two inputs is an immediate, make sure it's on the right, or
1748 // if one of the two inputs is a memory operand, make sure it's on the left.
1749 int effect_level = selector->GetEffectLevel(node);
1750 if (cont->IsBranch()) {
1751 effect_level = selector->GetEffectLevel(
1752 cont->true_block()->PredecessorAt(0)->control_input());
1753 }
1754
1755 if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
1756 (g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
1757 !g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
1758 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1759 std::swap(left, right);
1760 }
1761
1762 // Match immediates on right side of comparison.
1763 if (g.CanBeImmediate(right)) {
1764 if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
1765 return VisitCompareWithMemoryOperand(selector, opcode, left,
1766 g.UseImmediate(right), cont);
1767 }
1768 return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
1769 cont);
1770 }
1771
1772 // Match memory operands on left side of comparison.
1773 if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
1774 return VisitCompareWithMemoryOperand(selector, opcode, left,
1775 g.UseRegister(right), cont);
1776 }
1777
1778 if (g.CanBeBetterLeftOperand(right)) {
1779 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1780 std::swap(left, right);
1781 }
1782
1783 return VisitCompare(selector, opcode, left, right, cont,
1784 node->op()->HasProperty(Operator::kCommutative));
1785 }
1786
1787 // Shared routine for 64-bit word comparison operations.
VisitWord64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1788 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1789 FlagsContinuation* cont) {
1790 X64OperandGenerator g(selector);
1791 if (selector->CanUseRootsRegister()) {
1792 Heap* const heap = selector->isolate()->heap();
1793 Heap::RootListIndex root_index;
1794 HeapObjectBinopMatcher m(node);
1795 if (m.right().HasValue() &&
1796 heap->IsRootHandle(m.right().Value(), &root_index)) {
1797 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1798 InstructionCode opcode =
1799 kX64Cmp | AddressingModeField::encode(kMode_Root);
1800 return VisitCompare(
1801 selector, opcode,
1802 g.TempImmediate((root_index * kPointerSize) - kRootRegisterBias),
1803 g.UseRegister(m.left().node()), cont);
1804 } else if (m.left().HasValue() &&
1805 heap->IsRootHandle(m.left().Value(), &root_index)) {
1806 InstructionCode opcode =
1807 kX64Cmp | AddressingModeField::encode(kMode_Root);
1808 return VisitCompare(
1809 selector, opcode,
1810 g.TempImmediate((root_index * kPointerSize) - kRootRegisterBias),
1811 g.UseRegister(m.right().node()), cont);
1812 }
1813 }
1814 Int64BinopMatcher m(node);
1815 if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
1816 LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
1817 ExternalReference js_stack_limit =
1818 ExternalReference::address_of_stack_limit(selector->isolate());
1819 if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
1820 // Compare(Load(js_stack_limit), LoadStackPointer)
1821 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1822 InstructionCode opcode = cont->Encode(kX64StackCheck);
1823 if (cont->IsBranch()) {
1824 selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
1825 g.Label(cont->false_block()));
1826 } else if (cont->IsDeoptimize()) {
1827 selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
1828 cont->frame_state());
1829 } else {
1830 DCHECK(cont->IsSet());
1831 selector->Emit(opcode, g.DefineAsRegister(cont->result()));
1832 }
1833 return;
1834 }
1835 }
1836 VisitWordCompare(selector, node, kX64Cmp, cont);
1837 }
1838
1839
1840 // Shared routine for comparison with zero.
VisitCompareZero(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1841 void VisitCompareZero(InstructionSelector* selector, Node* node,
1842 InstructionCode opcode, FlagsContinuation* cont) {
1843 X64OperandGenerator g(selector);
1844 VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
1845 }
1846
1847
1848 // Shared routine for multiple float32 compare operations (inputs commuted).
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1849 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1850 FlagsContinuation* cont) {
1851 Node* const left = node->InputAt(0);
1852 Node* const right = node->InputAt(1);
1853 InstructionCode const opcode =
1854 selector->IsSupported(AVX) ? kAVXFloat32Cmp : kSSEFloat32Cmp;
1855 VisitCompare(selector, opcode, right, left, cont, false);
1856 }
1857
1858
1859 // Shared routine for multiple float64 compare operations (inputs commuted).
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1860 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1861 FlagsContinuation* cont) {
1862 Node* const left = node->InputAt(0);
1863 Node* const right = node->InputAt(1);
1864 InstructionCode const opcode =
1865 selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
1866 VisitCompare(selector, opcode, right, left, cont, false);
1867 }
1868
1869 // Shared routine for word comparison against zero.
VisitWordCompareZero(InstructionSelector * selector,Node * user,Node * value,FlagsContinuation * cont)1870 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1871 Node* value, FlagsContinuation* cont) {
1872 // Try to combine with comparisons against 0 by simply inverting the branch.
1873 while (value->opcode() == IrOpcode::kWord32Equal &&
1874 selector->CanCover(user, value)) {
1875 Int32BinopMatcher m(value);
1876 if (!m.right().Is(0)) break;
1877
1878 user = value;
1879 value = m.left().node();
1880 cont->Negate();
1881 }
1882
1883 if (selector->CanCover(user, value)) {
1884 switch (value->opcode()) {
1885 case IrOpcode::kWord32Equal:
1886 cont->OverwriteAndNegateIfEqual(kEqual);
1887 return VisitWordCompare(selector, value, kX64Cmp32, cont);
1888 case IrOpcode::kInt32LessThan:
1889 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1890 return VisitWordCompare(selector, value, kX64Cmp32, cont);
1891 case IrOpcode::kInt32LessThanOrEqual:
1892 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1893 return VisitWordCompare(selector, value, kX64Cmp32, cont);
1894 case IrOpcode::kUint32LessThan:
1895 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1896 return VisitWordCompare(selector, value, kX64Cmp32, cont);
1897 case IrOpcode::kUint32LessThanOrEqual:
1898 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1899 return VisitWordCompare(selector, value, kX64Cmp32, cont);
1900 case IrOpcode::kWord64Equal: {
1901 cont->OverwriteAndNegateIfEqual(kEqual);
1902 Int64BinopMatcher m(value);
1903 if (m.right().Is(0)) {
1904 // Try to combine the branch with a comparison.
1905 Node* const user = m.node();
1906 Node* const value = m.left().node();
1907 if (selector->CanCover(user, value)) {
1908 switch (value->opcode()) {
1909 case IrOpcode::kInt64Sub:
1910 return VisitWord64Compare(selector, value, cont);
1911 case IrOpcode::kWord64And:
1912 return VisitWordCompare(selector, value, kX64Test, cont);
1913 default:
1914 break;
1915 }
1916 }
1917 return VisitCompareZero(selector, value, kX64Cmp, cont);
1918 }
1919 return VisitWord64Compare(selector, value, cont);
1920 }
1921 case IrOpcode::kInt64LessThan:
1922 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1923 return VisitWord64Compare(selector, value, cont);
1924 case IrOpcode::kInt64LessThanOrEqual:
1925 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1926 return VisitWord64Compare(selector, value, cont);
1927 case IrOpcode::kUint64LessThan:
1928 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1929 return VisitWord64Compare(selector, value, cont);
1930 case IrOpcode::kUint64LessThanOrEqual:
1931 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1932 return VisitWord64Compare(selector, value, cont);
1933 case IrOpcode::kFloat32Equal:
1934 cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
1935 return VisitFloat32Compare(selector, value, cont);
1936 case IrOpcode::kFloat32LessThan:
1937 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1938 return VisitFloat32Compare(selector, value, cont);
1939 case IrOpcode::kFloat32LessThanOrEqual:
1940 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1941 return VisitFloat32Compare(selector, value, cont);
1942 case IrOpcode::kFloat64Equal:
1943 cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
1944 return VisitFloat64Compare(selector, value, cont);
1945 case IrOpcode::kFloat64LessThan: {
1946 Float64BinopMatcher m(value);
1947 if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
1948 // This matches the pattern
1949 //
1950 // Float64LessThan(#0.0, Float64Abs(x))
1951 //
1952 // which TurboFan generates for NumberToBoolean in the general case,
1953 // and which evaluates to false if x is 0, -0 or NaN. We can compile
1954 // this to a simple (v)ucomisd using not_equal flags condition, which
1955 // avoids the costly Float64Abs.
1956 cont->OverwriteAndNegateIfEqual(kNotEqual);
1957 InstructionCode const opcode =
1958 selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
1959 return VisitCompare(selector, opcode, m.left().node(),
1960 m.right().InputAt(0), cont, false);
1961 }
1962 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1963 return VisitFloat64Compare(selector, value, cont);
1964 }
1965 case IrOpcode::kFloat64LessThanOrEqual:
1966 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1967 return VisitFloat64Compare(selector, value, cont);
1968 case IrOpcode::kProjection:
1969 // Check if this is the overflow output projection of an
1970 // <Operation>WithOverflow node.
1971 if (ProjectionIndexOf(value->op()) == 1u) {
1972 // We cannot combine the <Operation>WithOverflow with this branch
1973 // unless the 0th projection (the use of the actual value of the
1974 // <Operation> is either nullptr, which means there's no use of the
1975 // actual value, or was already defined, which means it is scheduled
1976 // *AFTER* this branch).
1977 Node* const node = value->InputAt(0);
1978 Node* const result = NodeProperties::FindProjection(node, 0);
1979 if (result == nullptr || selector->IsDefined(result)) {
1980 switch (node->opcode()) {
1981 case IrOpcode::kInt32AddWithOverflow:
1982 cont->OverwriteAndNegateIfEqual(kOverflow);
1983 return VisitBinop(selector, node, kX64Add32, cont);
1984 case IrOpcode::kInt32SubWithOverflow:
1985 cont->OverwriteAndNegateIfEqual(kOverflow);
1986 return VisitBinop(selector, node, kX64Sub32, cont);
1987 case IrOpcode::kInt32MulWithOverflow:
1988 cont->OverwriteAndNegateIfEqual(kOverflow);
1989 return VisitBinop(selector, node, kX64Imul32, cont);
1990 case IrOpcode::kInt64AddWithOverflow:
1991 cont->OverwriteAndNegateIfEqual(kOverflow);
1992 return VisitBinop(selector, node, kX64Add, cont);
1993 case IrOpcode::kInt64SubWithOverflow:
1994 cont->OverwriteAndNegateIfEqual(kOverflow);
1995 return VisitBinop(selector, node, kX64Sub, cont);
1996 default:
1997 break;
1998 }
1999 }
2000 }
2001 break;
2002 case IrOpcode::kInt32Sub:
2003 return VisitWordCompare(selector, value, kX64Cmp32, cont);
2004 case IrOpcode::kInt64Sub:
2005 return VisitWord64Compare(selector, value, cont);
2006 case IrOpcode::kWord32And:
2007 return VisitWordCompare(selector, value, kX64Test32, cont);
2008 case IrOpcode::kWord64And:
2009 return VisitWordCompare(selector, value, kX64Test, cont);
2010 default:
2011 break;
2012 }
2013 }
2014
2015 // Branch could not be combined with a compare, emit compare against 0.
2016 VisitCompareZero(selector, value, kX64Cmp32, cont);
2017 }
2018
2019 } // namespace
2020
VisitBranch(Node * branch,BasicBlock * tbranch,BasicBlock * fbranch)2021 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
2022 BasicBlock* fbranch) {
2023 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
2024 VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
2025 }
2026
VisitDeoptimizeIf(Node * node)2027 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
2028 FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
2029 kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
2030 VisitWordCompareZero(this, node, node->InputAt(0), &cont);
2031 }
2032
VisitDeoptimizeUnless(Node * node)2033 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
2034 FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
2035 kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
2036 VisitWordCompareZero(this, node, node->InputAt(0), &cont);
2037 }
2038
VisitSwitch(Node * node,const SwitchInfo & sw)2039 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2040 X64OperandGenerator g(this);
2041 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2042
2043 // Emit either ArchTableSwitch or ArchLookupSwitch.
2044 size_t table_space_cost = 4 + sw.value_range;
2045 size_t table_time_cost = 3;
2046 size_t lookup_space_cost = 3 + 2 * sw.case_count;
2047 size_t lookup_time_cost = sw.case_count;
2048 if (sw.case_count > 4 &&
2049 table_space_cost + 3 * table_time_cost <=
2050 lookup_space_cost + 3 * lookup_time_cost &&
2051 sw.min_value > std::numeric_limits<int32_t>::min()) {
2052 InstructionOperand index_operand = g.TempRegister();
2053 if (sw.min_value) {
2054 // The leal automatically zero extends, so result is a valid 64-bit index.
2055 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
2056 value_operand, g.TempImmediate(-sw.min_value));
2057 } else {
2058 // Zero extend, because we use it as 64-bit index into the jump table.
2059 Emit(kX64Movl, index_operand, value_operand);
2060 }
2061 // Generate a table lookup.
2062 return EmitTableSwitch(sw, index_operand);
2063 }
2064
2065 // Generate a sequence of conditional jumps.
2066 return EmitLookupSwitch(sw, value_operand);
2067 }
2068
2069
VisitWord32Equal(Node * const node)2070 void InstructionSelector::VisitWord32Equal(Node* const node) {
2071 Node* user = node;
2072 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2073 Int32BinopMatcher m(user);
2074 if (m.right().Is(0)) {
2075 Node* value = m.left().node();
2076
2077 // Try to combine with comparisons against 0 by simply inverting the branch.
2078 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
2079 Int32BinopMatcher m(value);
2080 if (m.right().Is(0)) {
2081 user = value;
2082 value = m.left().node();
2083 cont.Negate();
2084 } else {
2085 break;
2086 }
2087 }
2088
2089 // Try to combine the branch with a comparison.
2090 if (CanCover(user, value)) {
2091 switch (value->opcode()) {
2092 case IrOpcode::kInt32Sub:
2093 return VisitWordCompare(this, value, kX64Cmp32, &cont);
2094 case IrOpcode::kWord32And:
2095 return VisitWordCompare(this, value, kX64Test32, &cont);
2096 default:
2097 break;
2098 }
2099 }
2100 return VisitCompareZero(this, value, kX64Cmp32, &cont);
2101 }
2102 VisitWordCompare(this, node, kX64Cmp32, &cont);
2103 }
2104
2105
VisitInt32LessThan(Node * node)2106 void InstructionSelector::VisitInt32LessThan(Node* node) {
2107 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2108 VisitWordCompare(this, node, kX64Cmp32, &cont);
2109 }
2110
2111
VisitInt32LessThanOrEqual(Node * node)2112 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2113 FlagsContinuation cont =
2114 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2115 VisitWordCompare(this, node, kX64Cmp32, &cont);
2116 }
2117
2118
VisitUint32LessThan(Node * node)2119 void InstructionSelector::VisitUint32LessThan(Node* node) {
2120 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2121 VisitWordCompare(this, node, kX64Cmp32, &cont);
2122 }
2123
2124
VisitUint32LessThanOrEqual(Node * node)2125 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2126 FlagsContinuation cont =
2127 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2128 VisitWordCompare(this, node, kX64Cmp32, &cont);
2129 }
2130
2131
VisitWord64Equal(Node * const node)2132 void InstructionSelector::VisitWord64Equal(Node* const node) {
2133 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2134 Int64BinopMatcher m(node);
2135 if (m.right().Is(0)) {
2136 // Try to combine the equality check with a comparison.
2137 Node* const user = m.node();
2138 Node* const value = m.left().node();
2139 if (CanCover(user, value)) {
2140 switch (value->opcode()) {
2141 case IrOpcode::kInt64Sub:
2142 return VisitWord64Compare(this, value, &cont);
2143 case IrOpcode::kWord64And:
2144 return VisitWordCompare(this, value, kX64Test, &cont);
2145 default:
2146 break;
2147 }
2148 }
2149 }
2150 VisitWord64Compare(this, node, &cont);
2151 }
2152
2153
VisitInt32AddWithOverflow(Node * node)2154 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2155 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2156 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2157 return VisitBinop(this, node, kX64Add32, &cont);
2158 }
2159 FlagsContinuation cont;
2160 VisitBinop(this, node, kX64Add32, &cont);
2161 }
2162
2163
VisitInt32SubWithOverflow(Node * node)2164 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2165 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2166 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2167 return VisitBinop(this, node, kX64Sub32, &cont);
2168 }
2169 FlagsContinuation cont;
2170 VisitBinop(this, node, kX64Sub32, &cont);
2171 }
2172
2173
VisitInt64LessThan(Node * node)2174 void InstructionSelector::VisitInt64LessThan(Node* node) {
2175 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2176 VisitWord64Compare(this, node, &cont);
2177 }
2178
2179
VisitInt64LessThanOrEqual(Node * node)2180 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2181 FlagsContinuation cont =
2182 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2183 VisitWord64Compare(this, node, &cont);
2184 }
2185
2186
VisitUint64LessThan(Node * node)2187 void InstructionSelector::VisitUint64LessThan(Node* node) {
2188 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2189 VisitWord64Compare(this, node, &cont);
2190 }
2191
2192
VisitUint64LessThanOrEqual(Node * node)2193 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2194 FlagsContinuation cont =
2195 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2196 VisitWord64Compare(this, node, &cont);
2197 }
2198
2199
VisitFloat32Equal(Node * node)2200 void InstructionSelector::VisitFloat32Equal(Node* node) {
2201 FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
2202 VisitFloat32Compare(this, node, &cont);
2203 }
2204
2205
VisitFloat32LessThan(Node * node)2206 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2207 FlagsContinuation cont =
2208 FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
2209 VisitFloat32Compare(this, node, &cont);
2210 }
2211
2212
VisitFloat32LessThanOrEqual(Node * node)2213 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2214 FlagsContinuation cont =
2215 FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
2216 VisitFloat32Compare(this, node, &cont);
2217 }
2218
2219
VisitFloat64Equal(Node * node)2220 void InstructionSelector::VisitFloat64Equal(Node* node) {
2221 FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
2222 VisitFloat64Compare(this, node, &cont);
2223 }
2224
VisitFloat64LessThan(Node * node)2225 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2226 Float64BinopMatcher m(node);
2227 if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
2228 // This matches the pattern
2229 //
2230 // Float64LessThan(#0.0, Float64Abs(x))
2231 //
2232 // which TurboFan generates for NumberToBoolean in the general case,
2233 // and which evaluates to false if x is 0, -0 or NaN. We can compile
2234 // this to a simple (v)ucomisd using not_equal flags condition, which
2235 // avoids the costly Float64Abs.
2236 FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, node);
2237 InstructionCode const opcode =
2238 IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
2239 return VisitCompare(this, opcode, m.left().node(), m.right().InputAt(0),
2240 &cont, false);
2241 }
2242 FlagsContinuation cont =
2243 FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
2244 VisitFloat64Compare(this, node, &cont);
2245 }
2246
VisitFloat64LessThanOrEqual(Node * node)2247 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2248 FlagsContinuation cont =
2249 FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
2250 VisitFloat64Compare(this, node, &cont);
2251 }
2252
2253
VisitFloat64ExtractLowWord32(Node * node)2254 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2255 X64OperandGenerator g(this);
2256 Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
2257 g.Use(node->InputAt(0)));
2258 }
2259
2260
VisitFloat64ExtractHighWord32(Node * node)2261 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2262 X64OperandGenerator g(this);
2263 Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
2264 g.Use(node->InputAt(0)));
2265 }
2266
2267
VisitFloat64InsertLowWord32(Node * node)2268 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2269 X64OperandGenerator g(this);
2270 Node* left = node->InputAt(0);
2271 Node* right = node->InputAt(1);
2272 Float64Matcher mleft(left);
2273 if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
2274 Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
2275 return;
2276 }
2277 Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
2278 g.UseRegister(left), g.Use(right));
2279 }
2280
2281
VisitFloat64InsertHighWord32(Node * node)2282 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2283 X64OperandGenerator g(this);
2284 Node* left = node->InputAt(0);
2285 Node* right = node->InputAt(1);
2286 Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
2287 g.UseRegister(left), g.Use(right));
2288 }
2289
VisitFloat64SilenceNaN(Node * node)2290 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2291 X64OperandGenerator g(this);
2292 Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
2293 g.UseRegister(node->InputAt(0)));
2294 }
2295
VisitAtomicLoad(Node * node)2296 void InstructionSelector::VisitAtomicLoad(Node* node) {
2297 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2298 DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
2299 load_rep.representation() == MachineRepresentation::kWord16 ||
2300 load_rep.representation() == MachineRepresentation::kWord32);
2301 USE(load_rep);
2302 VisitLoad(node);
2303 }
2304
VisitAtomicStore(Node * node)2305 void InstructionSelector::VisitAtomicStore(Node* node) {
2306 X64OperandGenerator g(this);
2307 Node* base = node->InputAt(0);
2308 Node* index = node->InputAt(1);
2309 Node* value = node->InputAt(2);
2310
2311 MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2312 ArchOpcode opcode = kArchNop;
2313 switch (rep) {
2314 case MachineRepresentation::kWord8:
2315 opcode = kX64Xchgb;
2316 break;
2317 case MachineRepresentation::kWord16:
2318 opcode = kX64Xchgw;
2319 break;
2320 case MachineRepresentation::kWord32:
2321 opcode = kX64Xchgl;
2322 break;
2323 default:
2324 UNREACHABLE();
2325 return;
2326 }
2327 AddressingMode addressing_mode;
2328 InstructionOperand inputs[4];
2329 size_t input_count = 0;
2330 inputs[input_count++] = g.UseUniqueRegister(base);
2331 if (g.CanBeImmediate(index)) {
2332 inputs[input_count++] = g.UseImmediate(index);
2333 addressing_mode = kMode_MRI;
2334 } else {
2335 inputs[input_count++] = g.UseUniqueRegister(index);
2336 addressing_mode = kMode_MR1;
2337 }
2338 inputs[input_count++] = g.UseUniqueRegister(value);
2339 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2340 Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
2341 }
2342
VisitCreateInt32x4(Node * node)2343 void InstructionSelector::VisitCreateInt32x4(Node* node) {
2344 X64OperandGenerator g(this);
2345 Emit(kX64Int32x4Create, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
2346 }
2347
VisitInt32x4ExtractLane(Node * node)2348 void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
2349 X64OperandGenerator g(this);
2350 Emit(kX64Int32x4ExtractLane, g.DefineAsRegister(node),
2351 g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)));
2352 }
2353
2354 // static
2355 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2356 InstructionSelector::SupportedMachineOperatorFlags() {
2357 MachineOperatorBuilder::Flags flags =
2358 MachineOperatorBuilder::kWord32ShiftIsSafe |
2359 MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
2360 if (CpuFeatures::IsSupported(POPCNT)) {
2361 flags |= MachineOperatorBuilder::kWord32Popcnt |
2362 MachineOperatorBuilder::kWord64Popcnt;
2363 }
2364 if (CpuFeatures::IsSupported(SSE4_1)) {
2365 flags |= MachineOperatorBuilder::kFloat32RoundDown |
2366 MachineOperatorBuilder::kFloat64RoundDown |
2367 MachineOperatorBuilder::kFloat32RoundUp |
2368 MachineOperatorBuilder::kFloat64RoundUp |
2369 MachineOperatorBuilder::kFloat32RoundTruncate |
2370 MachineOperatorBuilder::kFloat64RoundTruncate |
2371 MachineOperatorBuilder::kFloat32RoundTiesEven |
2372 MachineOperatorBuilder::kFloat64RoundTiesEven;
2373 }
2374 return flags;
2375 }
2376
2377 // static
2378 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2379 InstructionSelector::AlignmentRequirements() {
2380 return MachineOperatorBuilder::AlignmentRequirements::
2381 FullUnalignedAccessSupport();
2382 }
2383
2384 } // namespace compiler
2385 } // namespace internal
2386 } // namespace v8
2387