1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <algorithm>
6
7 #include "src/base/adapters.h"
8 #include "src/compiler/instruction-selector-impl.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties.h"
11
12 namespace v8 {
13 namespace internal {
14 namespace compiler {
15
16 // Adds X64-specific methods for generating operands.
17 class X64OperandGenerator final : public OperandGenerator {
18 public:
X64OperandGenerator(InstructionSelector * selector)19 explicit X64OperandGenerator(InstructionSelector* selector)
20 : OperandGenerator(selector) {}
21
CanBeImmediate(Node * node)22 bool CanBeImmediate(Node* node) {
23 switch (node->opcode()) {
24 case IrOpcode::kInt32Constant:
25 return true;
26 case IrOpcode::kInt64Constant: {
27 const int64_t value = OpParameter<int64_t>(node);
28 return value == static_cast<int64_t>(static_cast<int32_t>(value));
29 }
30 case IrOpcode::kNumberConstant: {
31 const double value = OpParameter<double>(node);
32 return bit_cast<int64_t>(value) == 0;
33 }
34 default:
35 return false;
36 }
37 }
38
GenerateMemoryOperandInputs(Node * index,int scale_exponent,Node * base,Node * displacement,InstructionOperand inputs[],size_t * input_count)39 AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
40 Node* base, Node* displacement,
41 InstructionOperand inputs[],
42 size_t* input_count) {
43 AddressingMode mode = kMode_MRI;
44 if (base != nullptr) {
45 inputs[(*input_count)++] = UseRegister(base);
46 if (index != nullptr) {
47 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
48 inputs[(*input_count)++] = UseRegister(index);
49 if (displacement != nullptr) {
50 inputs[(*input_count)++] = UseImmediate(displacement);
51 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
52 kMode_MR4I, kMode_MR8I};
53 mode = kMRnI_modes[scale_exponent];
54 } else {
55 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
56 kMode_MR4, kMode_MR8};
57 mode = kMRn_modes[scale_exponent];
58 }
59 } else {
60 if (displacement == nullptr) {
61 mode = kMode_MR;
62 } else {
63 inputs[(*input_count)++] = UseImmediate(displacement);
64 mode = kMode_MRI;
65 }
66 }
67 } else {
68 DCHECK_NOT_NULL(index);
69 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
70 inputs[(*input_count)++] = UseRegister(index);
71 if (displacement != nullptr) {
72 inputs[(*input_count)++] = UseImmediate(displacement);
73 static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
74 kMode_M4I, kMode_M8I};
75 mode = kMnI_modes[scale_exponent];
76 } else {
77 static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
78 kMode_M4, kMode_M8};
79 mode = kMn_modes[scale_exponent];
80 if (mode == kMode_MR1) {
81 // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0]
82 inputs[(*input_count)++] = UseRegister(index);
83 }
84 }
85 }
86 return mode;
87 }
88
GetEffectiveAddressMemoryOperand(Node * operand,InstructionOperand inputs[],size_t * input_count)89 AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
90 InstructionOperand inputs[],
91 size_t* input_count) {
92 BaseWithIndexAndDisplacement64Matcher m(operand, true);
93 DCHECK(m.matches());
94 if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
95 return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
96 m.displacement(), inputs, input_count);
97 } else {
98 inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
99 inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
100 return kMode_MR1;
101 }
102 }
103
CanBeBetterLeftOperand(Node * node) const104 bool CanBeBetterLeftOperand(Node* node) const {
105 return !selector()->IsLive(node);
106 }
107 };
108
109
VisitLoad(Node * node)110 void InstructionSelector::VisitLoad(Node* node) {
111 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
112 X64OperandGenerator g(this);
113
114 ArchOpcode opcode = kArchNop;
115 switch (load_rep.representation()) {
116 case MachineRepresentation::kFloat32:
117 opcode = kX64Movss;
118 break;
119 case MachineRepresentation::kFloat64:
120 opcode = kX64Movsd;
121 break;
122 case MachineRepresentation::kBit: // Fall through.
123 case MachineRepresentation::kWord8:
124 opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl;
125 break;
126 case MachineRepresentation::kWord16:
127 opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl;
128 break;
129 case MachineRepresentation::kWord32:
130 opcode = kX64Movl;
131 break;
132 case MachineRepresentation::kTagged: // Fall through.
133 case MachineRepresentation::kWord64:
134 opcode = kX64Movq;
135 break;
136 case MachineRepresentation::kNone:
137 UNREACHABLE();
138 return;
139 }
140
141 InstructionOperand outputs[1];
142 outputs[0] = g.DefineAsRegister(node);
143 InstructionOperand inputs[3];
144 size_t input_count = 0;
145 AddressingMode mode =
146 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
147 InstructionCode code = opcode | AddressingModeField::encode(mode);
148 Emit(code, 1, outputs, input_count, inputs);
149 }
150
151
VisitStore(Node * node)152 void InstructionSelector::VisitStore(Node* node) {
153 X64OperandGenerator g(this);
154 Node* base = node->InputAt(0);
155 Node* index = node->InputAt(1);
156 Node* value = node->InputAt(2);
157
158 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
159 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
160 MachineRepresentation rep = store_rep.representation();
161
162 if (write_barrier_kind != kNoWriteBarrier) {
163 DCHECK_EQ(MachineRepresentation::kTagged, rep);
164 AddressingMode addressing_mode;
165 InstructionOperand inputs[3];
166 size_t input_count = 0;
167 inputs[input_count++] = g.UseUniqueRegister(base);
168 if (g.CanBeImmediate(index)) {
169 inputs[input_count++] = g.UseImmediate(index);
170 addressing_mode = kMode_MRI;
171 } else {
172 inputs[input_count++] = g.UseUniqueRegister(index);
173 addressing_mode = kMode_MR1;
174 }
175 inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
176 ? g.UseRegister(value)
177 : g.UseUniqueRegister(value);
178 RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
179 switch (write_barrier_kind) {
180 case kNoWriteBarrier:
181 UNREACHABLE();
182 break;
183 case kMapWriteBarrier:
184 record_write_mode = RecordWriteMode::kValueIsMap;
185 break;
186 case kPointerWriteBarrier:
187 record_write_mode = RecordWriteMode::kValueIsPointer;
188 break;
189 case kFullWriteBarrier:
190 record_write_mode = RecordWriteMode::kValueIsAny;
191 break;
192 }
193 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
194 size_t const temp_count = arraysize(temps);
195 InstructionCode code = kArchStoreWithWriteBarrier;
196 code |= AddressingModeField::encode(addressing_mode);
197 code |= MiscField::encode(static_cast<int>(record_write_mode));
198 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
199 } else {
200 ArchOpcode opcode = kArchNop;
201 switch (rep) {
202 case MachineRepresentation::kFloat32:
203 opcode = kX64Movss;
204 break;
205 case MachineRepresentation::kFloat64:
206 opcode = kX64Movsd;
207 break;
208 case MachineRepresentation::kBit: // Fall through.
209 case MachineRepresentation::kWord8:
210 opcode = kX64Movb;
211 break;
212 case MachineRepresentation::kWord16:
213 opcode = kX64Movw;
214 break;
215 case MachineRepresentation::kWord32:
216 opcode = kX64Movl;
217 break;
218 case MachineRepresentation::kTagged: // Fall through.
219 case MachineRepresentation::kWord64:
220 opcode = kX64Movq;
221 break;
222 case MachineRepresentation::kNone:
223 UNREACHABLE();
224 return;
225 }
226 InstructionOperand inputs[4];
227 size_t input_count = 0;
228 AddressingMode addressing_mode =
229 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
230 InstructionCode code =
231 opcode | AddressingModeField::encode(addressing_mode);
232 InstructionOperand value_operand =
233 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
234 inputs[input_count++] = value_operand;
235 Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
236 inputs);
237 }
238 }
239
240
VisitCheckedLoad(Node * node)241 void InstructionSelector::VisitCheckedLoad(Node* node) {
242 CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
243 X64OperandGenerator g(this);
244 Node* const buffer = node->InputAt(0);
245 Node* const offset = node->InputAt(1);
246 Node* const length = node->InputAt(2);
247 ArchOpcode opcode = kArchNop;
248 switch (load_rep.representation()) {
249 case MachineRepresentation::kWord8:
250 opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
251 break;
252 case MachineRepresentation::kWord16:
253 opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
254 break;
255 case MachineRepresentation::kWord32:
256 opcode = kCheckedLoadWord32;
257 break;
258 case MachineRepresentation::kWord64:
259 opcode = kCheckedLoadWord64;
260 break;
261 case MachineRepresentation::kFloat32:
262 opcode = kCheckedLoadFloat32;
263 break;
264 case MachineRepresentation::kFloat64:
265 opcode = kCheckedLoadFloat64;
266 break;
267 case MachineRepresentation::kBit:
268 case MachineRepresentation::kTagged:
269 case MachineRepresentation::kNone:
270 UNREACHABLE();
271 return;
272 }
273 if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
274 Int32Matcher mlength(length);
275 Int32BinopMatcher moffset(offset);
276 if (mlength.HasValue() && moffset.right().HasValue() &&
277 moffset.right().Value() >= 0 &&
278 mlength.Value() >= moffset.right().Value()) {
279 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
280 g.UseRegister(moffset.left().node()),
281 g.UseImmediate(moffset.right().node()), g.UseImmediate(length));
282 return;
283 }
284 }
285 InstructionOperand length_operand =
286 g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
287 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
288 g.UseRegister(offset), g.TempImmediate(0), length_operand);
289 }
290
291
VisitCheckedStore(Node * node)292 void InstructionSelector::VisitCheckedStore(Node* node) {
293 MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
294 X64OperandGenerator g(this);
295 Node* const buffer = node->InputAt(0);
296 Node* const offset = node->InputAt(1);
297 Node* const length = node->InputAt(2);
298 Node* const value = node->InputAt(3);
299 ArchOpcode opcode = kArchNop;
300 switch (rep) {
301 case MachineRepresentation::kWord8:
302 opcode = kCheckedStoreWord8;
303 break;
304 case MachineRepresentation::kWord16:
305 opcode = kCheckedStoreWord16;
306 break;
307 case MachineRepresentation::kWord32:
308 opcode = kCheckedStoreWord32;
309 break;
310 case MachineRepresentation::kWord64:
311 opcode = kCheckedStoreWord64;
312 break;
313 case MachineRepresentation::kFloat32:
314 opcode = kCheckedStoreFloat32;
315 break;
316 case MachineRepresentation::kFloat64:
317 opcode = kCheckedStoreFloat64;
318 break;
319 case MachineRepresentation::kBit:
320 case MachineRepresentation::kTagged:
321 case MachineRepresentation::kNone:
322 UNREACHABLE();
323 return;
324 }
325 InstructionOperand value_operand =
326 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
327 if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
328 Int32Matcher mlength(length);
329 Int32BinopMatcher moffset(offset);
330 if (mlength.HasValue() && moffset.right().HasValue() &&
331 moffset.right().Value() >= 0 &&
332 mlength.Value() >= moffset.right().Value()) {
333 Emit(opcode, g.NoOutput(), g.UseRegister(buffer),
334 g.UseRegister(moffset.left().node()),
335 g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
336 value_operand);
337 return;
338 }
339 }
340 InstructionOperand length_operand =
341 g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
342 Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
343 g.TempImmediate(0), length_operand, value_operand);
344 }
345
346
347 // Shared routine for multiple binary operations.
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)348 static void VisitBinop(InstructionSelector* selector, Node* node,
349 InstructionCode opcode, FlagsContinuation* cont) {
350 X64OperandGenerator g(selector);
351 Int32BinopMatcher m(node);
352 Node* left = m.left().node();
353 Node* right = m.right().node();
354 InstructionOperand inputs[4];
355 size_t input_count = 0;
356 InstructionOperand outputs[2];
357 size_t output_count = 0;
358
359 // TODO(turbofan): match complex addressing modes.
360 if (left == right) {
361 // If both inputs refer to the same operand, enforce allocating a register
362 // for both of them to ensure that we don't end up generating code like
363 // this:
364 //
365 // mov rax, [rbp-0x10]
366 // add rax, [rbp-0x10]
367 // jo label
368 InstructionOperand const input = g.UseRegister(left);
369 inputs[input_count++] = input;
370 inputs[input_count++] = input;
371 } else if (g.CanBeImmediate(right)) {
372 inputs[input_count++] = g.UseRegister(left);
373 inputs[input_count++] = g.UseImmediate(right);
374 } else {
375 if (node->op()->HasProperty(Operator::kCommutative) &&
376 g.CanBeBetterLeftOperand(right)) {
377 std::swap(left, right);
378 }
379 inputs[input_count++] = g.UseRegister(left);
380 inputs[input_count++] = g.Use(right);
381 }
382
383 if (cont->IsBranch()) {
384 inputs[input_count++] = g.Label(cont->true_block());
385 inputs[input_count++] = g.Label(cont->false_block());
386 }
387
388 outputs[output_count++] = g.DefineSameAsFirst(node);
389 if (cont->IsSet()) {
390 outputs[output_count++] = g.DefineAsRegister(cont->result());
391 }
392
393 DCHECK_NE(0u, input_count);
394 DCHECK_NE(0u, output_count);
395 DCHECK_GE(arraysize(inputs), input_count);
396 DCHECK_GE(arraysize(outputs), output_count);
397
398 selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
399 inputs);
400 }
401
402
403 // Shared routine for multiple binary operations.
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)404 static void VisitBinop(InstructionSelector* selector, Node* node,
405 InstructionCode opcode) {
406 FlagsContinuation cont;
407 VisitBinop(selector, node, opcode, &cont);
408 }
409
410
VisitWord32And(Node * node)411 void InstructionSelector::VisitWord32And(Node* node) {
412 X64OperandGenerator g(this);
413 Uint32BinopMatcher m(node);
414 if (m.right().Is(0xff)) {
415 Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
416 } else if (m.right().Is(0xffff)) {
417 Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
418 } else {
419 VisitBinop(this, node, kX64And32);
420 }
421 }
422
423
VisitWord64And(Node * node)424 void InstructionSelector::VisitWord64And(Node* node) {
425 VisitBinop(this, node, kX64And);
426 }
427
428
VisitWord32Or(Node * node)429 void InstructionSelector::VisitWord32Or(Node* node) {
430 VisitBinop(this, node, kX64Or32);
431 }
432
433
VisitWord64Or(Node * node)434 void InstructionSelector::VisitWord64Or(Node* node) {
435 VisitBinop(this, node, kX64Or);
436 }
437
438
VisitWord32Xor(Node * node)439 void InstructionSelector::VisitWord32Xor(Node* node) {
440 X64OperandGenerator g(this);
441 Uint32BinopMatcher m(node);
442 if (m.right().Is(-1)) {
443 Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
444 } else {
445 VisitBinop(this, node, kX64Xor32);
446 }
447 }
448
449
VisitWord64Xor(Node * node)450 void InstructionSelector::VisitWord64Xor(Node* node) {
451 X64OperandGenerator g(this);
452 Uint64BinopMatcher m(node);
453 if (m.right().Is(-1)) {
454 Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
455 } else {
456 VisitBinop(this, node, kX64Xor);
457 }
458 }
459
460
461 namespace {
462
463 // Shared routine for multiple 32-bit shift operations.
464 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
VisitWord32Shift(InstructionSelector * selector,Node * node,ArchOpcode opcode)465 void VisitWord32Shift(InstructionSelector* selector, Node* node,
466 ArchOpcode opcode) {
467 X64OperandGenerator g(selector);
468 Int32BinopMatcher m(node);
469 Node* left = m.left().node();
470 Node* right = m.right().node();
471
472 if (g.CanBeImmediate(right)) {
473 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
474 g.UseImmediate(right));
475 } else {
476 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
477 g.UseFixed(right, rcx));
478 }
479 }
480
481
482 // Shared routine for multiple 64-bit shift operations.
483 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
VisitWord64Shift(InstructionSelector * selector,Node * node,ArchOpcode opcode)484 void VisitWord64Shift(InstructionSelector* selector, Node* node,
485 ArchOpcode opcode) {
486 X64OperandGenerator g(selector);
487 Int64BinopMatcher m(node);
488 Node* left = m.left().node();
489 Node* right = m.right().node();
490
491 if (g.CanBeImmediate(right)) {
492 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
493 g.UseImmediate(right));
494 } else {
495 if (m.right().IsWord64And()) {
496 Int64BinopMatcher mright(right);
497 if (mright.right().Is(0x3F)) {
498 right = mright.left().node();
499 }
500 }
501 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
502 g.UseFixed(right, rcx));
503 }
504 }
505
506
EmitLea(InstructionSelector * selector,InstructionCode opcode,Node * result,Node * index,int scale,Node * base,Node * displacement)507 void EmitLea(InstructionSelector* selector, InstructionCode opcode,
508 Node* result, Node* index, int scale, Node* base,
509 Node* displacement) {
510 X64OperandGenerator g(selector);
511
512 InstructionOperand inputs[4];
513 size_t input_count = 0;
514 AddressingMode mode = g.GenerateMemoryOperandInputs(
515 index, scale, base, displacement, inputs, &input_count);
516
517 DCHECK_NE(0u, input_count);
518 DCHECK_GE(arraysize(inputs), input_count);
519
520 InstructionOperand outputs[1];
521 outputs[0] = g.DefineAsRegister(result);
522
523 opcode = AddressingModeField::encode(mode) | opcode;
524
525 selector->Emit(opcode, 1, outputs, input_count, inputs);
526 }
527
528 } // namespace
529
530
VisitWord32Shl(Node * node)531 void InstructionSelector::VisitWord32Shl(Node* node) {
532 Int32ScaleMatcher m(node, true);
533 if (m.matches()) {
534 Node* index = node->InputAt(0);
535 Node* base = m.power_of_two_plus_one() ? index : nullptr;
536 EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
537 return;
538 }
539 VisitWord32Shift(this, node, kX64Shl32);
540 }
541
542
VisitWord64Shl(Node * node)543 void InstructionSelector::VisitWord64Shl(Node* node) {
544 X64OperandGenerator g(this);
545 Int64BinopMatcher m(node);
546 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
547 m.right().IsInRange(32, 63)) {
548 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
549 // 32 bits anyway.
550 Emit(kX64Shl, g.DefineSameAsFirst(node),
551 g.UseRegister(m.left().node()->InputAt(0)),
552 g.UseImmediate(m.right().node()));
553 return;
554 }
555 VisitWord64Shift(this, node, kX64Shl);
556 }
557
558
VisitWord32Shr(Node * node)559 void InstructionSelector::VisitWord32Shr(Node* node) {
560 VisitWord32Shift(this, node, kX64Shr32);
561 }
562
563
VisitWord64Shr(Node * node)564 void InstructionSelector::VisitWord64Shr(Node* node) {
565 VisitWord64Shift(this, node, kX64Shr);
566 }
567
568
VisitWord32Sar(Node * node)569 void InstructionSelector::VisitWord32Sar(Node* node) {
570 X64OperandGenerator g(this);
571 Int32BinopMatcher m(node);
572 if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
573 Int32BinopMatcher mleft(m.left().node());
574 if (mleft.right().Is(16) && m.right().Is(16)) {
575 Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
576 return;
577 } else if (mleft.right().Is(24) && m.right().Is(24)) {
578 Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
579 return;
580 }
581 }
582 VisitWord32Shift(this, node, kX64Sar32);
583 }
584
585
VisitWord64Sar(Node * node)586 void InstructionSelector::VisitWord64Sar(Node* node) {
587 VisitWord64Shift(this, node, kX64Sar);
588 }
589
590
VisitWord32Ror(Node * node)591 void InstructionSelector::VisitWord32Ror(Node* node) {
592 VisitWord32Shift(this, node, kX64Ror32);
593 }
594
595
VisitWord64Ror(Node * node)596 void InstructionSelector::VisitWord64Ror(Node* node) {
597 VisitWord64Shift(this, node, kX64Ror);
598 }
599
600
VisitWord64Clz(Node * node)601 void InstructionSelector::VisitWord64Clz(Node* node) {
602 X64OperandGenerator g(this);
603 Emit(kX64Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
604 }
605
606
VisitWord32Clz(Node * node)607 void InstructionSelector::VisitWord32Clz(Node* node) {
608 X64OperandGenerator g(this);
609 Emit(kX64Lzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
610 }
611
612
VisitWord64Ctz(Node * node)613 void InstructionSelector::VisitWord64Ctz(Node* node) {
614 X64OperandGenerator g(this);
615 Emit(kX64Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
616 }
617
618
VisitWord32Ctz(Node * node)619 void InstructionSelector::VisitWord32Ctz(Node* node) {
620 X64OperandGenerator g(this);
621 Emit(kX64Tzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
622 }
623
624
VisitWord32Popcnt(Node * node)625 void InstructionSelector::VisitWord32Popcnt(Node* node) {
626 X64OperandGenerator g(this);
627 Emit(kX64Popcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
628 }
629
630
VisitWord64Popcnt(Node * node)631 void InstructionSelector::VisitWord64Popcnt(Node* node) {
632 X64OperandGenerator g(this);
633 Emit(kX64Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
634 }
635
636
VisitInt32Add(Node * node)637 void InstructionSelector::VisitInt32Add(Node* node) {
638 X64OperandGenerator g(this);
639
640 // Try to match the Add to a leal pattern
641 BaseWithIndexAndDisplacement32Matcher m(node);
642 if (m.matches() &&
643 (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
644 EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
645 m.displacement());
646 return;
647 }
648
649 // No leal pattern match, use addl
650 VisitBinop(this, node, kX64Add32);
651 }
652
653
VisitInt64Add(Node * node)654 void InstructionSelector::VisitInt64Add(Node* node) {
655 VisitBinop(this, node, kX64Add);
656 }
657
658
VisitInt64AddWithOverflow(Node * node)659 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
660 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
661 FlagsContinuation cont(kOverflow, ovf);
662 VisitBinop(this, node, kX64Add, &cont);
663 }
664 FlagsContinuation cont;
665 VisitBinop(this, node, kX64Add, &cont);
666 }
667
668
VisitInt32Sub(Node * node)669 void InstructionSelector::VisitInt32Sub(Node* node) {
670 X64OperandGenerator g(this);
671 Int32BinopMatcher m(node);
672 if (m.left().Is(0)) {
673 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
674 } else {
675 if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
676 // Turn subtractions of constant values into immediate "leal" instructions
677 // by negating the value.
678 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
679 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
680 g.TempImmediate(-m.right().Value()));
681 return;
682 }
683 VisitBinop(this, node, kX64Sub32);
684 }
685 }
686
687
VisitInt64Sub(Node * node)688 void InstructionSelector::VisitInt64Sub(Node* node) {
689 X64OperandGenerator g(this);
690 Int64BinopMatcher m(node);
691 if (m.left().Is(0)) {
692 Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
693 } else {
694 VisitBinop(this, node, kX64Sub);
695 }
696 }
697
698
VisitInt64SubWithOverflow(Node * node)699 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
700 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
701 FlagsContinuation cont(kOverflow, ovf);
702 return VisitBinop(this, node, kX64Sub, &cont);
703 }
704 FlagsContinuation cont;
705 VisitBinop(this, node, kX64Sub, &cont);
706 }
707
708
709 namespace {
710
VisitMul(InstructionSelector * selector,Node * node,ArchOpcode opcode)711 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
712 X64OperandGenerator g(selector);
713 Int32BinopMatcher m(node);
714 Node* left = m.left().node();
715 Node* right = m.right().node();
716 if (g.CanBeImmediate(right)) {
717 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
718 g.UseImmediate(right));
719 } else {
720 if (g.CanBeBetterLeftOperand(right)) {
721 std::swap(left, right);
722 }
723 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
724 g.Use(right));
725 }
726 }
727
728
VisitMulHigh(InstructionSelector * selector,Node * node,ArchOpcode opcode)729 void VisitMulHigh(InstructionSelector* selector, Node* node,
730 ArchOpcode opcode) {
731 X64OperandGenerator g(selector);
732 Node* left = node->InputAt(0);
733 Node* right = node->InputAt(1);
734 if (selector->IsLive(left) && !selector->IsLive(right)) {
735 std::swap(left, right);
736 }
737 // TODO(turbofan): We use UseUniqueRegister here to improve register
738 // allocation.
739 selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
740 g.UseUniqueRegister(right));
741 }
742
743
VisitDiv(InstructionSelector * selector,Node * node,ArchOpcode opcode)744 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
745 X64OperandGenerator g(selector);
746 InstructionOperand temps[] = {g.TempRegister(rdx)};
747 selector->Emit(
748 opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
749 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
750 }
751
752
VisitMod(InstructionSelector * selector,Node * node,ArchOpcode opcode)753 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
754 X64OperandGenerator g(selector);
755 selector->Emit(opcode, g.DefineAsFixed(node, rdx),
756 g.UseFixed(node->InputAt(0), rax),
757 g.UseUniqueRegister(node->InputAt(1)));
758 }
759
760 } // namespace
761
762
VisitInt32Mul(Node * node)763 void InstructionSelector::VisitInt32Mul(Node* node) {
764 Int32ScaleMatcher m(node, true);
765 if (m.matches()) {
766 Node* index = node->InputAt(0);
767 Node* base = m.power_of_two_plus_one() ? index : nullptr;
768 EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
769 return;
770 }
771 VisitMul(this, node, kX64Imul32);
772 }
773
774
VisitInt64Mul(Node * node)775 void InstructionSelector::VisitInt64Mul(Node* node) {
776 VisitMul(this, node, kX64Imul);
777 }
778
779
VisitInt32MulHigh(Node * node)780 void InstructionSelector::VisitInt32MulHigh(Node* node) {
781 VisitMulHigh(this, node, kX64ImulHigh32);
782 }
783
784
VisitInt32Div(Node * node)785 void InstructionSelector::VisitInt32Div(Node* node) {
786 VisitDiv(this, node, kX64Idiv32);
787 }
788
789
VisitInt64Div(Node * node)790 void InstructionSelector::VisitInt64Div(Node* node) {
791 VisitDiv(this, node, kX64Idiv);
792 }
793
794
VisitUint32Div(Node * node)795 void InstructionSelector::VisitUint32Div(Node* node) {
796 VisitDiv(this, node, kX64Udiv32);
797 }
798
799
VisitUint64Div(Node * node)800 void InstructionSelector::VisitUint64Div(Node* node) {
801 VisitDiv(this, node, kX64Udiv);
802 }
803
804
VisitInt32Mod(Node * node)805 void InstructionSelector::VisitInt32Mod(Node* node) {
806 VisitMod(this, node, kX64Idiv32);
807 }
808
809
VisitInt64Mod(Node * node)810 void InstructionSelector::VisitInt64Mod(Node* node) {
811 VisitMod(this, node, kX64Idiv);
812 }
813
814
VisitUint32Mod(Node * node)815 void InstructionSelector::VisitUint32Mod(Node* node) {
816 VisitMod(this, node, kX64Udiv32);
817 }
818
819
VisitUint64Mod(Node * node)820 void InstructionSelector::VisitUint64Mod(Node* node) {
821 VisitMod(this, node, kX64Udiv);
822 }
823
824
VisitUint32MulHigh(Node * node)825 void InstructionSelector::VisitUint32MulHigh(Node* node) {
826 VisitMulHigh(this, node, kX64UmulHigh32);
827 }
828
829
VisitChangeFloat32ToFloat64(Node * node)830 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
831 X64OperandGenerator g(this);
832 Emit(kSSEFloat32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
833 }
834
835
VisitChangeInt32ToFloat64(Node * node)836 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
837 X64OperandGenerator g(this);
838 Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
839 }
840
841
VisitChangeUint32ToFloat64(Node * node)842 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
843 X64OperandGenerator g(this);
844 Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
845 }
846
847
VisitChangeFloat64ToInt32(Node * node)848 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
849 X64OperandGenerator g(this);
850 Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
851 }
852
853
VisitChangeFloat64ToUint32(Node * node)854 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
855 X64OperandGenerator g(this);
856 Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
857 }
858
859
VisitTryTruncateFloat32ToInt64(Node * node)860 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
861 X64OperandGenerator g(this);
862 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
863 InstructionOperand outputs[2];
864 size_t output_count = 0;
865 outputs[output_count++] = g.DefineAsRegister(node);
866
867 Node* success_output = NodeProperties::FindProjection(node, 1);
868 if (success_output) {
869 outputs[output_count++] = g.DefineAsRegister(success_output);
870 }
871
872 Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs);
873 }
874
875
VisitTryTruncateFloat64ToInt64(Node * node)876 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
877 X64OperandGenerator g(this);
878 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
879 InstructionOperand outputs[2];
880 size_t output_count = 0;
881 outputs[output_count++] = g.DefineAsRegister(node);
882
883 Node* success_output = NodeProperties::FindProjection(node, 1);
884 if (success_output) {
885 outputs[output_count++] = g.DefineAsRegister(success_output);
886 }
887
888 Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs);
889 }
890
891
VisitTryTruncateFloat32ToUint64(Node * node)892 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
893 X64OperandGenerator g(this);
894 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
895 InstructionOperand outputs[2];
896 size_t output_count = 0;
897 outputs[output_count++] = g.DefineAsRegister(node);
898
899 Node* success_output = NodeProperties::FindProjection(node, 1);
900 if (success_output) {
901 outputs[output_count++] = g.DefineAsRegister(success_output);
902 }
903
904 Emit(kSSEFloat32ToUint64, output_count, outputs, 1, inputs);
905 }
906
907
VisitTryTruncateFloat64ToUint64(Node * node)908 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
909 X64OperandGenerator g(this);
910 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
911 InstructionOperand outputs[2];
912 size_t output_count = 0;
913 outputs[output_count++] = g.DefineAsRegister(node);
914
915 Node* success_output = NodeProperties::FindProjection(node, 1);
916 if (success_output) {
917 outputs[output_count++] = g.DefineAsRegister(success_output);
918 }
919
920 Emit(kSSEFloat64ToUint64, output_count, outputs, 1, inputs);
921 }
922
923
VisitChangeInt32ToInt64(Node * node)924 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
925 X64OperandGenerator g(this);
926 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
927 }
928
929
VisitChangeUint32ToUint64(Node * node)930 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
931 X64OperandGenerator g(this);
932 Node* value = node->InputAt(0);
933 switch (value->opcode()) {
934 case IrOpcode::kWord32And:
935 case IrOpcode::kWord32Or:
936 case IrOpcode::kWord32Xor:
937 case IrOpcode::kWord32Shl:
938 case IrOpcode::kWord32Shr:
939 case IrOpcode::kWord32Sar:
940 case IrOpcode::kWord32Ror:
941 case IrOpcode::kWord32Equal:
942 case IrOpcode::kInt32Add:
943 case IrOpcode::kInt32Sub:
944 case IrOpcode::kInt32Mul:
945 case IrOpcode::kInt32MulHigh:
946 case IrOpcode::kInt32Div:
947 case IrOpcode::kInt32LessThan:
948 case IrOpcode::kInt32LessThanOrEqual:
949 case IrOpcode::kInt32Mod:
950 case IrOpcode::kUint32Div:
951 case IrOpcode::kUint32LessThan:
952 case IrOpcode::kUint32LessThanOrEqual:
953 case IrOpcode::kUint32Mod:
954 case IrOpcode::kUint32MulHigh: {
955 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
956 // zero-extension is a no-op.
957 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
958 return;
959 }
960 default:
961 break;
962 }
963 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
964 }
965
966
967 namespace {
968
VisitRO(InstructionSelector * selector,Node * node,InstructionCode opcode)969 void VisitRO(InstructionSelector* selector, Node* node,
970 InstructionCode opcode) {
971 X64OperandGenerator g(selector);
972 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
973 }
974
975
VisitRR(InstructionSelector * selector,Node * node,InstructionCode opcode)976 void VisitRR(InstructionSelector* selector, Node* node,
977 InstructionCode opcode) {
978 X64OperandGenerator g(selector);
979 selector->Emit(opcode, g.DefineAsRegister(node),
980 g.UseRegister(node->InputAt(0)));
981 }
982
983
VisitFloatBinop(InstructionSelector * selector,Node * node,ArchOpcode avx_opcode,ArchOpcode sse_opcode)984 void VisitFloatBinop(InstructionSelector* selector, Node* node,
985 ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
986 X64OperandGenerator g(selector);
987 InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
988 InstructionOperand operand1 = g.Use(node->InputAt(1));
989 if (selector->IsSupported(AVX)) {
990 selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
991 } else {
992 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
993 }
994 }
995
996
VisitFloatUnop(InstructionSelector * selector,Node * node,Node * input,ArchOpcode avx_opcode,ArchOpcode sse_opcode)997 void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
998 ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
999 X64OperandGenerator g(selector);
1000 if (selector->IsSupported(AVX)) {
1001 selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
1002 } else {
1003 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
1004 }
1005 }
1006
1007 } // namespace
1008
1009
VisitTruncateFloat64ToFloat32(Node * node)1010 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1011 VisitRO(this, node, kSSEFloat64ToFloat32);
1012 }
1013
1014
VisitTruncateFloat64ToInt32(Node * node)1015 void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
1016 switch (TruncationModeOf(node->op())) {
1017 case TruncationMode::kJavaScript:
1018 return VisitRR(this, node, kArchTruncateDoubleToI);
1019 case TruncationMode::kRoundToZero:
1020 return VisitRO(this, node, kSSEFloat64ToInt32);
1021 }
1022 UNREACHABLE();
1023 }
1024
1025
VisitTruncateInt64ToInt32(Node * node)1026 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1027 X64OperandGenerator g(this);
1028 Node* value = node->InputAt(0);
1029 if (CanCover(node, value)) {
1030 switch (value->opcode()) {
1031 case IrOpcode::kWord64Sar:
1032 case IrOpcode::kWord64Shr: {
1033 Int64BinopMatcher m(value);
1034 if (m.right().Is(32)) {
1035 Emit(kX64Shr, g.DefineSameAsFirst(node),
1036 g.UseRegister(m.left().node()), g.TempImmediate(32));
1037 return;
1038 }
1039 break;
1040 }
1041 default:
1042 break;
1043 }
1044 }
1045 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
1046 }
1047
1048
VisitRoundInt64ToFloat32(Node * node)1049 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1050 X64OperandGenerator g(this);
1051 Emit(kSSEInt64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1052 }
1053
1054
VisitRoundInt64ToFloat64(Node * node)1055 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1056 X64OperandGenerator g(this);
1057 Emit(kSSEInt64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1058 }
1059
1060
VisitRoundUint64ToFloat32(Node * node)1061 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1062 X64OperandGenerator g(this);
1063 InstructionOperand temps[] = {g.TempRegister()};
1064 Emit(kSSEUint64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
1065 arraysize(temps), temps);
1066 }
1067
1068
VisitRoundUint64ToFloat64(Node * node)1069 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1070 X64OperandGenerator g(this);
1071 InstructionOperand temps[] = {g.TempRegister()};
1072 Emit(kSSEUint64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
1073 arraysize(temps), temps);
1074 }
1075
1076
VisitBitcastFloat32ToInt32(Node * node)1077 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1078 X64OperandGenerator g(this);
1079 Emit(kX64BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1080 }
1081
1082
VisitBitcastFloat64ToInt64(Node * node)1083 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1084 X64OperandGenerator g(this);
1085 Emit(kX64BitcastDL, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1086 }
1087
1088
VisitBitcastInt32ToFloat32(Node * node)1089 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1090 X64OperandGenerator g(this);
1091 Emit(kX64BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1092 }
1093
1094
VisitBitcastInt64ToFloat64(Node * node)1095 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1096 X64OperandGenerator g(this);
1097 Emit(kX64BitcastLD, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1098 }
1099
1100
VisitFloat32Add(Node * node)1101 void InstructionSelector::VisitFloat32Add(Node* node) {
1102 VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
1103 }
1104
1105
VisitFloat32Sub(Node * node)1106 void InstructionSelector::VisitFloat32Sub(Node* node) {
1107 X64OperandGenerator g(this);
1108 Float32BinopMatcher m(node);
1109 if (m.left().IsMinusZero()) {
1110 VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
1111 kSSEFloat32Neg);
1112 return;
1113 }
1114 VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
1115 }
1116
1117
VisitFloat32Mul(Node * node)1118 void InstructionSelector::VisitFloat32Mul(Node* node) {
1119 VisitFloatBinop(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
1120 }
1121
1122
VisitFloat32Div(Node * node)1123 void InstructionSelector::VisitFloat32Div(Node* node) {
1124 VisitFloatBinop(this, node, kAVXFloat32Div, kSSEFloat32Div);
1125 }
1126
1127
VisitFloat32Max(Node * node)1128 void InstructionSelector::VisitFloat32Max(Node* node) {
1129 VisitFloatBinop(this, node, kAVXFloat32Max, kSSEFloat32Max);
1130 }
1131
1132
VisitFloat32Min(Node * node)1133 void InstructionSelector::VisitFloat32Min(Node* node) {
1134 VisitFloatBinop(this, node, kAVXFloat32Min, kSSEFloat32Min);
1135 }
1136
1137
VisitFloat32Abs(Node * node)1138 void InstructionSelector::VisitFloat32Abs(Node* node) {
1139 VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
1140 }
1141
1142
VisitFloat32Sqrt(Node * node)1143 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1144 VisitRO(this, node, kSSEFloat32Sqrt);
1145 }
1146
1147
VisitFloat64Add(Node * node)1148 void InstructionSelector::VisitFloat64Add(Node* node) {
1149 VisitFloatBinop(this, node, kAVXFloat64Add, kSSEFloat64Add);
1150 }
1151
1152
VisitFloat64Sub(Node * node)1153 void InstructionSelector::VisitFloat64Sub(Node* node) {
1154 X64OperandGenerator g(this);
1155 Float64BinopMatcher m(node);
1156 if (m.left().IsMinusZero()) {
1157 if (m.right().IsFloat64RoundDown() &&
1158 CanCover(m.node(), m.right().node())) {
1159 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
1160 CanCover(m.right().node(), m.right().InputAt(0))) {
1161 Float64BinopMatcher mright0(m.right().InputAt(0));
1162 if (mright0.left().IsMinusZero()) {
1163 Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
1164 g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
1165 return;
1166 }
1167 }
1168 }
1169 VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
1170 kSSEFloat64Neg);
1171 return;
1172 }
1173 VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
1174 }
1175
1176
VisitFloat64Mul(Node * node)1177 void InstructionSelector::VisitFloat64Mul(Node* node) {
1178 VisitFloatBinop(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
1179 }
1180
1181
VisitFloat64Div(Node * node)1182 void InstructionSelector::VisitFloat64Div(Node* node) {
1183 VisitFloatBinop(this, node, kAVXFloat64Div, kSSEFloat64Div);
1184 }
1185
1186
VisitFloat64Mod(Node * node)1187 void InstructionSelector::VisitFloat64Mod(Node* node) {
1188 X64OperandGenerator g(this);
1189 InstructionOperand temps[] = {g.TempRegister(rax)};
1190 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
1191 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
1192 temps);
1193 }
1194
1195
VisitFloat64Max(Node * node)1196 void InstructionSelector::VisitFloat64Max(Node* node) {
1197 VisitFloatBinop(this, node, kAVXFloat64Max, kSSEFloat64Max);
1198 }
1199
1200
VisitFloat64Min(Node * node)1201 void InstructionSelector::VisitFloat64Min(Node* node) {
1202 VisitFloatBinop(this, node, kAVXFloat64Min, kSSEFloat64Min);
1203 }
1204
1205
VisitFloat64Abs(Node * node)1206 void InstructionSelector::VisitFloat64Abs(Node* node) {
1207 VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
1208 }
1209
1210
VisitFloat64Sqrt(Node * node)1211 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1212 VisitRO(this, node, kSSEFloat64Sqrt);
1213 }
1214
1215
VisitFloat32RoundDown(Node * node)1216 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1217 VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
1218 }
1219
1220
VisitFloat64RoundDown(Node * node)1221 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1222 VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
1223 }
1224
1225
VisitFloat32RoundUp(Node * node)1226 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1227 VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
1228 }
1229
1230
VisitFloat64RoundUp(Node * node)1231 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1232 VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
1233 }
1234
1235
VisitFloat32RoundTruncate(Node * node)1236 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1237 VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
1238 }
1239
1240
VisitFloat64RoundTruncate(Node * node)1241 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1242 VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
1243 }
1244
1245
VisitFloat64RoundTiesAway(Node * node)1246 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1247 UNREACHABLE();
1248 }
1249
1250
VisitFloat32RoundTiesEven(Node * node)1251 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1252 VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
1253 }
1254
1255
VisitFloat64RoundTiesEven(Node * node)1256 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1257 VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
1258 }
1259
1260
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * descriptor,Node * node)1261 void InstructionSelector::EmitPrepareArguments(
1262 ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1263 Node* node) {
1264 X64OperandGenerator g(this);
1265
1266 // Prepare for C function call.
1267 if (descriptor->IsCFunctionCall()) {
1268 Emit(kArchPrepareCallCFunction |
1269 MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
1270 0, nullptr, 0, nullptr);
1271
1272 // Poke any stack arguments.
1273 for (size_t n = 0; n < arguments->size(); ++n) {
1274 PushParameter input = (*arguments)[n];
1275 if (input.node()) {
1276 int slot = static_cast<int>(n);
1277 InstructionOperand value = g.CanBeImmediate(input.node())
1278 ? g.UseImmediate(input.node())
1279 : g.UseRegister(input.node());
1280 Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
1281 }
1282 }
1283 } else {
1284 // Push any stack arguments.
1285 for (PushParameter input : base::Reversed(*arguments)) {
1286 // TODO(titzer): X64Push cannot handle stack->stack double moves
1287 // because there is no way to encode fixed double slots.
1288 InstructionOperand value =
1289 g.CanBeImmediate(input.node())
1290 ? g.UseImmediate(input.node())
1291 : IsSupported(ATOM) ||
1292 sequence()->IsFloat(GetVirtualRegister(input.node()))
1293 ? g.UseRegister(input.node())
1294 : g.Use(input.node());
1295 Emit(kX64Push, g.NoOutput(), value);
1296 }
1297 }
1298 }
1299
1300
IsTailCallAddressImmediate()1301 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
1302
1303
1304 namespace {
1305
1306 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1307 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1308 InstructionOperand left, InstructionOperand right,
1309 FlagsContinuation* cont) {
1310 X64OperandGenerator g(selector);
1311 opcode = cont->Encode(opcode);
1312 if (cont->IsBranch()) {
1313 selector->Emit(opcode, g.NoOutput(), left, right,
1314 g.Label(cont->true_block()), g.Label(cont->false_block()));
1315 } else {
1316 DCHECK(cont->IsSet());
1317 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1318 }
1319 }
1320
1321
1322 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,Node * left,Node * right,FlagsContinuation * cont,bool commutative)1323 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1324 Node* left, Node* right, FlagsContinuation* cont,
1325 bool commutative) {
1326 X64OperandGenerator g(selector);
1327 if (commutative && g.CanBeBetterLeftOperand(right)) {
1328 std::swap(left, right);
1329 }
1330 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
1331 }
1332
1333
1334 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1335 void VisitWordCompare(InstructionSelector* selector, Node* node,
1336 InstructionCode opcode, FlagsContinuation* cont) {
1337 X64OperandGenerator g(selector);
1338 Node* const left = node->InputAt(0);
1339 Node* const right = node->InputAt(1);
1340
1341 // Match immediates on left or right side of comparison.
1342 if (g.CanBeImmediate(right)) {
1343 VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
1344 } else if (g.CanBeImmediate(left)) {
1345 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1346 VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
1347 } else {
1348 VisitCompare(selector, opcode, left, right, cont,
1349 node->op()->HasProperty(Operator::kCommutative));
1350 }
1351 }
1352
1353
1354 // Shared routine for 64-bit word comparison operations.
VisitWord64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1355 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1356 FlagsContinuation* cont) {
1357 X64OperandGenerator g(selector);
1358 Int64BinopMatcher m(node);
1359 if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
1360 LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
1361 ExternalReference js_stack_limit =
1362 ExternalReference::address_of_stack_limit(selector->isolate());
1363 if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
1364 // Compare(Load(js_stack_limit), LoadStackPointer)
1365 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1366 InstructionCode opcode = cont->Encode(kX64StackCheck);
1367 if (cont->IsBranch()) {
1368 selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
1369 g.Label(cont->false_block()));
1370 } else {
1371 DCHECK(cont->IsSet());
1372 selector->Emit(opcode, g.DefineAsRegister(cont->result()));
1373 }
1374 return;
1375 }
1376 }
1377 VisitWordCompare(selector, node, kX64Cmp, cont);
1378 }
1379
1380
1381 // Shared routine for comparison with zero.
VisitCompareZero(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1382 void VisitCompareZero(InstructionSelector* selector, Node* node,
1383 InstructionCode opcode, FlagsContinuation* cont) {
1384 X64OperandGenerator g(selector);
1385 VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
1386 }
1387
1388
1389 // Shared routine for multiple float32 compare operations (inputs commuted).
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1390 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1391 FlagsContinuation* cont) {
1392 Node* const left = node->InputAt(0);
1393 Node* const right = node->InputAt(1);
1394 InstructionCode const opcode =
1395 selector->IsSupported(AVX) ? kAVXFloat32Cmp : kSSEFloat32Cmp;
1396 VisitCompare(selector, opcode, right, left, cont, false);
1397 }
1398
1399
1400 // Shared routine for multiple float64 compare operations (inputs commuted).
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1401 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1402 FlagsContinuation* cont) {
1403 Node* const left = node->InputAt(0);
1404 Node* const right = node->InputAt(1);
1405 InstructionCode const opcode =
1406 selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
1407 VisitCompare(selector, opcode, right, left, cont, false);
1408 }
1409
1410 } // namespace
1411
1412
VisitBranch(Node * branch,BasicBlock * tbranch,BasicBlock * fbranch)1413 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1414 BasicBlock* fbranch) {
1415 X64OperandGenerator g(this);
1416 Node* user = branch;
1417 Node* value = branch->InputAt(0);
1418
1419 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1420
1421 // Try to combine with comparisons against 0 by simply inverting the branch.
1422 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
1423 Int32BinopMatcher m(value);
1424 if (m.right().Is(0)) {
1425 user = value;
1426 value = m.left().node();
1427 cont.Negate();
1428 } else {
1429 break;
1430 }
1431 }
1432
1433 // Try to combine the branch with a comparison.
1434 if (CanCover(user, value)) {
1435 switch (value->opcode()) {
1436 case IrOpcode::kWord32Equal:
1437 cont.OverwriteAndNegateIfEqual(kEqual);
1438 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1439 case IrOpcode::kInt32LessThan:
1440 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1441 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1442 case IrOpcode::kInt32LessThanOrEqual:
1443 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1444 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1445 case IrOpcode::kUint32LessThan:
1446 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1447 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1448 case IrOpcode::kUint32LessThanOrEqual:
1449 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1450 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1451 case IrOpcode::kWord64Equal: {
1452 cont.OverwriteAndNegateIfEqual(kEqual);
1453 Int64BinopMatcher m(value);
1454 if (m.right().Is(0)) {
1455 // Try to combine the branch with a comparison.
1456 Node* const user = m.node();
1457 Node* const value = m.left().node();
1458 if (CanCover(user, value)) {
1459 switch (value->opcode()) {
1460 case IrOpcode::kInt64Sub:
1461 return VisitWord64Compare(this, value, &cont);
1462 case IrOpcode::kWord64And:
1463 return VisitWordCompare(this, value, kX64Test, &cont);
1464 default:
1465 break;
1466 }
1467 }
1468 return VisitCompareZero(this, value, kX64Cmp, &cont);
1469 }
1470 return VisitWord64Compare(this, value, &cont);
1471 }
1472 case IrOpcode::kInt64LessThan:
1473 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1474 return VisitWord64Compare(this, value, &cont);
1475 case IrOpcode::kInt64LessThanOrEqual:
1476 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1477 return VisitWord64Compare(this, value, &cont);
1478 case IrOpcode::kUint64LessThan:
1479 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1480 return VisitWord64Compare(this, value, &cont);
1481 case IrOpcode::kUint64LessThanOrEqual:
1482 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1483 return VisitWord64Compare(this, value, &cont);
1484 case IrOpcode::kFloat32Equal:
1485 cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
1486 return VisitFloat32Compare(this, value, &cont);
1487 case IrOpcode::kFloat32LessThan:
1488 cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1489 return VisitFloat32Compare(this, value, &cont);
1490 case IrOpcode::kFloat32LessThanOrEqual:
1491 cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1492 return VisitFloat32Compare(this, value, &cont);
1493 case IrOpcode::kFloat64Equal:
1494 cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
1495 return VisitFloat64Compare(this, value, &cont);
1496 case IrOpcode::kFloat64LessThan:
1497 cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1498 return VisitFloat64Compare(this, value, &cont);
1499 case IrOpcode::kFloat64LessThanOrEqual:
1500 cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1501 return VisitFloat64Compare(this, value, &cont);
1502 case IrOpcode::kProjection:
1503 // Check if this is the overflow output projection of an
1504 // <Operation>WithOverflow node.
1505 if (ProjectionIndexOf(value->op()) == 1u) {
1506 // We cannot combine the <Operation>WithOverflow with this branch
1507 // unless the 0th projection (the use of the actual value of the
1508 // <Operation> is either nullptr, which means there's no use of the
1509 // actual value, or was already defined, which means it is scheduled
1510 // *AFTER* this branch).
1511 Node* const node = value->InputAt(0);
1512 Node* const result = NodeProperties::FindProjection(node, 0);
1513 if (result == nullptr || IsDefined(result)) {
1514 switch (node->opcode()) {
1515 case IrOpcode::kInt32AddWithOverflow:
1516 cont.OverwriteAndNegateIfEqual(kOverflow);
1517 return VisitBinop(this, node, kX64Add32, &cont);
1518 case IrOpcode::kInt32SubWithOverflow:
1519 cont.OverwriteAndNegateIfEqual(kOverflow);
1520 return VisitBinop(this, node, kX64Sub32, &cont);
1521 case IrOpcode::kInt64AddWithOverflow:
1522 cont.OverwriteAndNegateIfEqual(kOverflow);
1523 return VisitBinop(this, node, kX64Add, &cont);
1524 case IrOpcode::kInt64SubWithOverflow:
1525 cont.OverwriteAndNegateIfEqual(kOverflow);
1526 return VisitBinop(this, node, kX64Sub, &cont);
1527 default:
1528 break;
1529 }
1530 }
1531 }
1532 break;
1533 case IrOpcode::kInt32Sub:
1534 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1535 case IrOpcode::kInt64Sub:
1536 return VisitWord64Compare(this, value, &cont);
1537 case IrOpcode::kWord32And:
1538 return VisitWordCompare(this, value, kX64Test32, &cont);
1539 case IrOpcode::kWord64And:
1540 return VisitWordCompare(this, value, kX64Test, &cont);
1541 default:
1542 break;
1543 }
1544 }
1545
1546 // Branch could not be combined with a compare, emit compare against 0.
1547 VisitCompareZero(this, value, kX64Cmp32, &cont);
1548 }
1549
1550
VisitSwitch(Node * node,const SwitchInfo & sw)1551 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1552 X64OperandGenerator g(this);
1553 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1554
1555 // Emit either ArchTableSwitch or ArchLookupSwitch.
1556 size_t table_space_cost = 4 + sw.value_range;
1557 size_t table_time_cost = 3;
1558 size_t lookup_space_cost = 3 + 2 * sw.case_count;
1559 size_t lookup_time_cost = sw.case_count;
1560 if (sw.case_count > 4 &&
1561 table_space_cost + 3 * table_time_cost <=
1562 lookup_space_cost + 3 * lookup_time_cost &&
1563 sw.min_value > std::numeric_limits<int32_t>::min()) {
1564 InstructionOperand index_operand = g.TempRegister();
1565 if (sw.min_value) {
1566 // The leal automatically zero extends, so result is a valid 64-bit index.
1567 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
1568 value_operand, g.TempImmediate(-sw.min_value));
1569 } else {
1570 // Zero extend, because we use it as 64-bit index into the jump table.
1571 Emit(kX64Movl, index_operand, value_operand);
1572 }
1573 // Generate a table lookup.
1574 return EmitTableSwitch(sw, index_operand);
1575 }
1576
1577 // Generate a sequence of conditional jumps.
1578 return EmitLookupSwitch(sw, value_operand);
1579 }
1580
1581
VisitWord32Equal(Node * const node)1582 void InstructionSelector::VisitWord32Equal(Node* const node) {
1583 Node* user = node;
1584 FlagsContinuation cont(kEqual, node);
1585 Int32BinopMatcher m(user);
1586 if (m.right().Is(0)) {
1587 Node* value = m.left().node();
1588
1589 // Try to combine with comparisons against 0 by simply inverting the branch.
1590 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
1591 Int32BinopMatcher m(value);
1592 if (m.right().Is(0)) {
1593 user = value;
1594 value = m.left().node();
1595 cont.Negate();
1596 } else {
1597 break;
1598 }
1599 }
1600
1601 // Try to combine the branch with a comparison.
1602 if (CanCover(user, value)) {
1603 switch (value->opcode()) {
1604 case IrOpcode::kInt32Sub:
1605 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1606 case IrOpcode::kWord32And:
1607 return VisitWordCompare(this, value, kX64Test32, &cont);
1608 default:
1609 break;
1610 }
1611 }
1612 return VisitCompareZero(this, value, kX64Cmp32, &cont);
1613 }
1614 VisitWordCompare(this, node, kX64Cmp32, &cont);
1615 }
1616
1617
VisitInt32LessThan(Node * node)1618 void InstructionSelector::VisitInt32LessThan(Node* node) {
1619 FlagsContinuation cont(kSignedLessThan, node);
1620 VisitWordCompare(this, node, kX64Cmp32, &cont);
1621 }
1622
1623
VisitInt32LessThanOrEqual(Node * node)1624 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1625 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1626 VisitWordCompare(this, node, kX64Cmp32, &cont);
1627 }
1628
1629
VisitUint32LessThan(Node * node)1630 void InstructionSelector::VisitUint32LessThan(Node* node) {
1631 FlagsContinuation cont(kUnsignedLessThan, node);
1632 VisitWordCompare(this, node, kX64Cmp32, &cont);
1633 }
1634
1635
VisitUint32LessThanOrEqual(Node * node)1636 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1637 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1638 VisitWordCompare(this, node, kX64Cmp32, &cont);
1639 }
1640
1641
VisitWord64Equal(Node * const node)1642 void InstructionSelector::VisitWord64Equal(Node* const node) {
1643 FlagsContinuation cont(kEqual, node);
1644 Int64BinopMatcher m(node);
1645 if (m.right().Is(0)) {
1646 // Try to combine the equality check with a comparison.
1647 Node* const user = m.node();
1648 Node* const value = m.left().node();
1649 if (CanCover(user, value)) {
1650 switch (value->opcode()) {
1651 case IrOpcode::kInt64Sub:
1652 return VisitWord64Compare(this, value, &cont);
1653 case IrOpcode::kWord64And:
1654 return VisitWordCompare(this, value, kX64Test, &cont);
1655 default:
1656 break;
1657 }
1658 }
1659 }
1660 VisitWord64Compare(this, node, &cont);
1661 }
1662
1663
VisitInt32AddWithOverflow(Node * node)1664 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1665 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1666 FlagsContinuation cont(kOverflow, ovf);
1667 VisitBinop(this, node, kX64Add32, &cont);
1668 }
1669 FlagsContinuation cont;
1670 VisitBinop(this, node, kX64Add32, &cont);
1671 }
1672
1673
VisitInt32SubWithOverflow(Node * node)1674 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1675 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1676 FlagsContinuation cont(kOverflow, ovf);
1677 return VisitBinop(this, node, kX64Sub32, &cont);
1678 }
1679 FlagsContinuation cont;
1680 VisitBinop(this, node, kX64Sub32, &cont);
1681 }
1682
1683
VisitInt64LessThan(Node * node)1684 void InstructionSelector::VisitInt64LessThan(Node* node) {
1685 FlagsContinuation cont(kSignedLessThan, node);
1686 VisitWord64Compare(this, node, &cont);
1687 }
1688
1689
VisitInt64LessThanOrEqual(Node * node)1690 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1691 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1692 VisitWord64Compare(this, node, &cont);
1693 }
1694
1695
VisitUint64LessThan(Node * node)1696 void InstructionSelector::VisitUint64LessThan(Node* node) {
1697 FlagsContinuation cont(kUnsignedLessThan, node);
1698 VisitWord64Compare(this, node, &cont);
1699 }
1700
1701
VisitUint64LessThanOrEqual(Node * node)1702 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
1703 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1704 VisitWord64Compare(this, node, &cont);
1705 }
1706
1707
VisitFloat32Equal(Node * node)1708 void InstructionSelector::VisitFloat32Equal(Node* node) {
1709 FlagsContinuation cont(kUnorderedEqual, node);
1710 VisitFloat32Compare(this, node, &cont);
1711 }
1712
1713
VisitFloat32LessThan(Node * node)1714 void InstructionSelector::VisitFloat32LessThan(Node* node) {
1715 FlagsContinuation cont(kUnsignedGreaterThan, node);
1716 VisitFloat32Compare(this, node, &cont);
1717 }
1718
1719
VisitFloat32LessThanOrEqual(Node * node)1720 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1721 FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
1722 VisitFloat32Compare(this, node, &cont);
1723 }
1724
1725
VisitFloat64Equal(Node * node)1726 void InstructionSelector::VisitFloat64Equal(Node* node) {
1727 FlagsContinuation cont(kUnorderedEqual, node);
1728 VisitFloat64Compare(this, node, &cont);
1729 }
1730
1731
VisitFloat64LessThan(Node * node)1732 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1733 FlagsContinuation cont(kUnsignedGreaterThan, node);
1734 VisitFloat64Compare(this, node, &cont);
1735 }
1736
1737
VisitFloat64LessThanOrEqual(Node * node)1738 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1739 FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
1740 VisitFloat64Compare(this, node, &cont);
1741 }
1742
1743
VisitFloat64ExtractLowWord32(Node * node)1744 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1745 X64OperandGenerator g(this);
1746 Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
1747 g.Use(node->InputAt(0)));
1748 }
1749
1750
VisitFloat64ExtractHighWord32(Node * node)1751 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1752 X64OperandGenerator g(this);
1753 Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
1754 g.Use(node->InputAt(0)));
1755 }
1756
1757
VisitFloat64InsertLowWord32(Node * node)1758 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1759 X64OperandGenerator g(this);
1760 Node* left = node->InputAt(0);
1761 Node* right = node->InputAt(1);
1762 Float64Matcher mleft(left);
1763 if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
1764 Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
1765 return;
1766 }
1767 Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
1768 g.UseRegister(left), g.Use(right));
1769 }
1770
1771
VisitFloat64InsertHighWord32(Node * node)1772 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1773 X64OperandGenerator g(this);
1774 Node* left = node->InputAt(0);
1775 Node* right = node->InputAt(1);
1776 Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
1777 g.UseRegister(left), g.Use(right));
1778 }
1779
1780
1781 // static
1782 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()1783 InstructionSelector::SupportedMachineOperatorFlags() {
1784 MachineOperatorBuilder::Flags flags =
1785 MachineOperatorBuilder::kFloat32Max |
1786 MachineOperatorBuilder::kFloat32Min |
1787 MachineOperatorBuilder::kFloat64Max |
1788 MachineOperatorBuilder::kFloat64Min |
1789 MachineOperatorBuilder::kWord32ShiftIsSafe |
1790 MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
1791 if (CpuFeatures::IsSupported(POPCNT)) {
1792 flags |= MachineOperatorBuilder::kWord32Popcnt |
1793 MachineOperatorBuilder::kWord64Popcnt;
1794 }
1795 if (CpuFeatures::IsSupported(SSE4_1)) {
1796 flags |= MachineOperatorBuilder::kFloat32RoundDown |
1797 MachineOperatorBuilder::kFloat64RoundDown |
1798 MachineOperatorBuilder::kFloat32RoundUp |
1799 MachineOperatorBuilder::kFloat64RoundUp |
1800 MachineOperatorBuilder::kFloat32RoundTruncate |
1801 MachineOperatorBuilder::kFloat64RoundTruncate |
1802 MachineOperatorBuilder::kFloat32RoundTiesEven |
1803 MachineOperatorBuilder::kFloat64RoundTiesEven;
1804 }
1805 return flags;
1806 }
1807
1808 } // namespace compiler
1809 } // namespace internal
1810 } // namespace v8
1811