1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler/node-properties.h"
8 
9 namespace v8 {
10 namespace internal {
11 namespace compiler {
12 
13 enum ImmediateMode {
14   kArithmeticImm,  // 12 bit unsigned immediate shifted left 0 or 12 bits
15   kShift32Imm,     // 0 - 31
16   kShift64Imm,     // 0 - 63
17   kLogical32Imm,
18   kLogical64Imm,
19   kLoadStoreImm8,   // signed 8 bit or 12 bit unsigned scaled by access size
20   kLoadStoreImm16,
21   kLoadStoreImm32,
22   kLoadStoreImm64,
23   kNoImmediate
24 };
25 
26 
27 // Adds Arm64-specific methods for generating operands.
28 class Arm64OperandGenerator final : public OperandGenerator {
29  public:
Arm64OperandGenerator(InstructionSelector * selector)30   explicit Arm64OperandGenerator(InstructionSelector* selector)
31       : OperandGenerator(selector) {}
32 
UseOperand(Node * node,ImmediateMode mode)33   InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
34     if (CanBeImmediate(node, mode)) {
35       return UseImmediate(node);
36     }
37     return UseRegister(node);
38   }
39 
40   // Use the zero register if the node has the immediate value zero, otherwise
41   // assign a register.
UseRegisterOrImmediateZero(Node * node)42   InstructionOperand UseRegisterOrImmediateZero(Node* node) {
43     if (IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) {
44       return UseImmediate(node);
45     }
46     return UseRegister(node);
47   }
48 
49   // Use the provided node if it has the required value, or create a
50   // TempImmediate otherwise.
UseImmediateOrTemp(Node * node,int32_t value)51   InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
52     if (GetIntegerConstantValue(node) == value) {
53       return UseImmediate(node);
54     }
55     return TempImmediate(value);
56   }
57 
IsIntegerConstant(Node * node)58   bool IsIntegerConstant(Node* node) {
59     return (node->opcode() == IrOpcode::kInt32Constant) ||
60            (node->opcode() == IrOpcode::kInt64Constant);
61   }
62 
GetIntegerConstantValue(Node * node)63   int64_t GetIntegerConstantValue(Node* node) {
64     if (node->opcode() == IrOpcode::kInt32Constant) {
65       return OpParameter<int32_t>(node);
66     }
67     DCHECK(node->opcode() == IrOpcode::kInt64Constant);
68     return OpParameter<int64_t>(node);
69   }
70 
CanBeImmediate(Node * node,ImmediateMode mode)71   bool CanBeImmediate(Node* node, ImmediateMode mode) {
72     return IsIntegerConstant(node) &&
73            CanBeImmediate(GetIntegerConstantValue(node), mode);
74   }
75 
CanBeImmediate(int64_t value,ImmediateMode mode)76   bool CanBeImmediate(int64_t value, ImmediateMode mode) {
77     unsigned ignored;
78     switch (mode) {
79       case kLogical32Imm:
80         // TODO(dcarney): some unencodable values can be handled by
81         // switching instructions.
82         return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
83                                        &ignored, &ignored, &ignored);
84       case kLogical64Imm:
85         return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
86                                        &ignored, &ignored, &ignored);
87       case kArithmeticImm:
88         return Assembler::IsImmAddSub(value);
89       case kLoadStoreImm8:
90         return IsLoadStoreImmediate(value, LSByte);
91       case kLoadStoreImm16:
92         return IsLoadStoreImmediate(value, LSHalfword);
93       case kLoadStoreImm32:
94         return IsLoadStoreImmediate(value, LSWord);
95       case kLoadStoreImm64:
96         return IsLoadStoreImmediate(value, LSDoubleWord);
97       case kNoImmediate:
98         return false;
99       case kShift32Imm:  // Fall through.
100       case kShift64Imm:
101         // Shift operations only observe the bottom 5 or 6 bits of the value.
102         // All possible shifts can be encoded by discarding bits which have no
103         // effect.
104         return true;
105     }
106     return false;
107   }
108 
109  private:
IsLoadStoreImmediate(int64_t value,LSDataSize size)110   bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
111     return Assembler::IsImmLSScaled(value, size) ||
112            Assembler::IsImmLSUnscaled(value);
113   }
114 };
115 
116 
117 namespace {
118 
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)119 void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
120   Arm64OperandGenerator g(selector);
121   selector->Emit(opcode, g.DefineAsRegister(node),
122                  g.UseRegister(node->InputAt(0)));
123 }
124 
125 
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)126 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
127   Arm64OperandGenerator g(selector);
128   selector->Emit(opcode, g.DefineAsRegister(node),
129                  g.UseRegister(node->InputAt(0)),
130                  g.UseRegister(node->InputAt(1)));
131 }
132 
133 
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node,ImmediateMode operand_mode)134 void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
135               ImmediateMode operand_mode) {
136   Arm64OperandGenerator g(selector);
137   selector->Emit(opcode, g.DefineAsRegister(node),
138                  g.UseRegister(node->InputAt(0)),
139                  g.UseOperand(node->InputAt(1), operand_mode));
140 }
141 
142 
TryMatchAnyShift(InstructionSelector * selector,Node * node,Node * input_node,InstructionCode * opcode,bool try_ror)143 bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
144                       Node* input_node, InstructionCode* opcode, bool try_ror) {
145   Arm64OperandGenerator g(selector);
146 
147   if (!selector->CanCover(node, input_node)) return false;
148   if (input_node->InputCount() != 2) return false;
149   if (!g.IsIntegerConstant(input_node->InputAt(1))) return false;
150 
151   switch (input_node->opcode()) {
152     case IrOpcode::kWord32Shl:
153     case IrOpcode::kWord64Shl:
154       *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
155       return true;
156     case IrOpcode::kWord32Shr:
157     case IrOpcode::kWord64Shr:
158       *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
159       return true;
160     case IrOpcode::kWord32Sar:
161     case IrOpcode::kWord64Sar:
162       *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
163       return true;
164     case IrOpcode::kWord32Ror:
165     case IrOpcode::kWord64Ror:
166       if (try_ror) {
167         *opcode |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
168         return true;
169       }
170       return false;
171     default:
172       return false;
173   }
174 }
175 
176 
TryMatchAnyExtend(Arm64OperandGenerator * g,InstructionSelector * selector,Node * node,Node * left_node,Node * right_node,InstructionOperand * left_op,InstructionOperand * right_op,InstructionCode * opcode)177 bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
178                        Node* node, Node* left_node, Node* right_node,
179                        InstructionOperand* left_op,
180                        InstructionOperand* right_op, InstructionCode* opcode) {
181   if (!selector->CanCover(node, right_node)) return false;
182 
183   NodeMatcher nm(right_node);
184 
185   if (nm.IsWord32And()) {
186     Int32BinopMatcher mright(right_node);
187     if (mright.right().Is(0xff) || mright.right().Is(0xffff)) {
188       int32_t mask = mright.right().Value();
189       *left_op = g->UseRegister(left_node);
190       *right_op = g->UseRegister(mright.left().node());
191       *opcode |= AddressingModeField::encode(
192           (mask == 0xff) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
193       return true;
194     }
195   } else if (nm.IsWord32Sar()) {
196     Int32BinopMatcher mright(right_node);
197     if (selector->CanCover(mright.node(), mright.left().node()) &&
198         mright.left().IsWord32Shl()) {
199       Int32BinopMatcher mleft_of_right(mright.left().node());
200       if ((mright.right().Is(16) && mleft_of_right.right().Is(16)) ||
201           (mright.right().Is(24) && mleft_of_right.right().Is(24))) {
202         int32_t shift = mright.right().Value();
203         *left_op = g->UseRegister(left_node);
204         *right_op = g->UseRegister(mleft_of_right.left().node());
205         *opcode |= AddressingModeField::encode(
206             (shift == 24) ? kMode_Operand2_R_SXTB : kMode_Operand2_R_SXTH);
207         return true;
208       }
209     }
210   }
211   return false;
212 }
213 
214 
215 // Shared routine for multiple binary operations.
216 template <typename Matcher>
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,ImmediateMode operand_mode,FlagsContinuation * cont)217 void VisitBinop(InstructionSelector* selector, Node* node,
218                 InstructionCode opcode, ImmediateMode operand_mode,
219                 FlagsContinuation* cont) {
220   Arm64OperandGenerator g(selector);
221   Matcher m(node);
222   InstructionOperand inputs[5];
223   size_t input_count = 0;
224   InstructionOperand outputs[2];
225   size_t output_count = 0;
226   bool is_cmp = (opcode == kArm64Cmp32) || (opcode == kArm64Cmn32);
227 
228   // We can commute cmp by switching the inputs and commuting the flags
229   // continuation.
230   bool can_commute = m.HasProperty(Operator::kCommutative) || is_cmp;
231 
232   // The cmp and cmn instructions are encoded as sub or add with zero output
233   // register, and therefore support the same operand modes.
234   bool is_add_sub = m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() ||
235                     m.IsInt64Sub() || is_cmp;
236 
237   Node* left_node = m.left().node();
238   Node* right_node = m.right().node();
239 
240   if (g.CanBeImmediate(right_node, operand_mode)) {
241     inputs[input_count++] = g.UseRegister(left_node);
242     inputs[input_count++] = g.UseImmediate(right_node);
243   } else if (is_cmp && g.CanBeImmediate(left_node, operand_mode)) {
244     cont->Commute();
245     inputs[input_count++] = g.UseRegister(right_node);
246     inputs[input_count++] = g.UseImmediate(left_node);
247   } else if (is_add_sub &&
248              TryMatchAnyExtend(&g, selector, node, left_node, right_node,
249                                &inputs[0], &inputs[1], &opcode)) {
250     input_count += 2;
251   } else if (is_add_sub && can_commute &&
252              TryMatchAnyExtend(&g, selector, node, right_node, left_node,
253                                &inputs[0], &inputs[1], &opcode)) {
254     if (is_cmp) cont->Commute();
255     input_count += 2;
256   } else if (TryMatchAnyShift(selector, node, right_node, &opcode,
257                               !is_add_sub)) {
258     Matcher m_shift(right_node);
259     inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
260     inputs[input_count++] = g.UseRegister(m_shift.left().node());
261     inputs[input_count++] = g.UseImmediate(m_shift.right().node());
262   } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
263                                              !is_add_sub)) {
264     if (is_cmp) cont->Commute();
265     Matcher m_shift(left_node);
266     inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
267     inputs[input_count++] = g.UseRegister(m_shift.left().node());
268     inputs[input_count++] = g.UseImmediate(m_shift.right().node());
269   } else {
270     inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
271     inputs[input_count++] = g.UseRegister(right_node);
272   }
273 
274   if (cont->IsBranch()) {
275     inputs[input_count++] = g.Label(cont->true_block());
276     inputs[input_count++] = g.Label(cont->false_block());
277   }
278 
279   if (!is_cmp) {
280     outputs[output_count++] = g.DefineAsRegister(node);
281   }
282 
283   if (cont->IsSet()) {
284     outputs[output_count++] = g.DefineAsRegister(cont->result());
285   }
286 
287   DCHECK_NE(0u, input_count);
288   DCHECK((output_count != 0) || is_cmp);
289   DCHECK_GE(arraysize(inputs), input_count);
290   DCHECK_GE(arraysize(outputs), output_count);
291 
292   selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
293                  inputs);
294 }
295 
296 
297 // Shared routine for multiple binary operations.
298 template <typename Matcher>
VisitBinop(InstructionSelector * selector,Node * node,ArchOpcode opcode,ImmediateMode operand_mode)299 void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
300                 ImmediateMode operand_mode) {
301   FlagsContinuation cont;
302   VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
303 }
304 
305 
306 template <typename Matcher>
VisitAddSub(InstructionSelector * selector,Node * node,ArchOpcode opcode,ArchOpcode negate_opcode)307 void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
308                  ArchOpcode negate_opcode) {
309   Arm64OperandGenerator g(selector);
310   Matcher m(node);
311   if (m.right().HasValue() && (m.right().Value() < 0) &&
312       g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
313     selector->Emit(negate_opcode, g.DefineAsRegister(node),
314                    g.UseRegister(m.left().node()),
315                    g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
316   } else {
317     VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
318   }
319 }
320 
321 
322 // For multiplications by immediate of the form x * (2^k + 1), where k > 0,
323 // return the value of k, otherwise return zero. This is used to reduce the
324 // multiplication to addition with left shift: x + (x << k).
325 template <typename Matcher>
LeftShiftForReducedMultiply(Matcher * m)326 int32_t LeftShiftForReducedMultiply(Matcher* m) {
327   DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
328   if (m->right().HasValue() && m->right().Value() >= 3) {
329     uint64_t value_minus_one = m->right().Value() - 1;
330     if (base::bits::IsPowerOfTwo64(value_minus_one)) {
331       return WhichPowerOf2_64(value_minus_one);
332     }
333   }
334   return 0;
335 }
336 
337 }  // namespace
338 
339 
VisitLoad(Node * node)340 void InstructionSelector::VisitLoad(Node* node) {
341   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
342   Arm64OperandGenerator g(this);
343   Node* base = node->InputAt(0);
344   Node* index = node->InputAt(1);
345   ArchOpcode opcode = kArchNop;
346   ImmediateMode immediate_mode = kNoImmediate;
347   switch (load_rep.representation()) {
348     case MachineRepresentation::kFloat32:
349       opcode = kArm64LdrS;
350       immediate_mode = kLoadStoreImm32;
351       break;
352     case MachineRepresentation::kFloat64:
353       opcode = kArm64LdrD;
354       immediate_mode = kLoadStoreImm64;
355       break;
356     case MachineRepresentation::kBit:  // Fall through.
357     case MachineRepresentation::kWord8:
358       opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
359       immediate_mode = kLoadStoreImm8;
360       break;
361     case MachineRepresentation::kWord16:
362       opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
363       immediate_mode = kLoadStoreImm16;
364       break;
365     case MachineRepresentation::kWord32:
366       opcode = kArm64LdrW;
367       immediate_mode = kLoadStoreImm32;
368       break;
369     case MachineRepresentation::kTagged:  // Fall through.
370     case MachineRepresentation::kWord64:
371       opcode = kArm64Ldr;
372       immediate_mode = kLoadStoreImm64;
373       break;
374     case MachineRepresentation::kNone:
375       UNREACHABLE();
376       return;
377   }
378   if (g.CanBeImmediate(index, immediate_mode)) {
379     Emit(opcode | AddressingModeField::encode(kMode_MRI),
380          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
381   } else {
382     Emit(opcode | AddressingModeField::encode(kMode_MRR),
383          g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
384   }
385 }
386 
387 
VisitStore(Node * node)388 void InstructionSelector::VisitStore(Node* node) {
389   Arm64OperandGenerator g(this);
390   Node* base = node->InputAt(0);
391   Node* index = node->InputAt(1);
392   Node* value = node->InputAt(2);
393 
394   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
395   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
396   MachineRepresentation rep = store_rep.representation();
397 
398   // TODO(arm64): I guess this could be done in a better way.
399   if (write_barrier_kind != kNoWriteBarrier) {
400     DCHECK_EQ(MachineRepresentation::kTagged, rep);
401     InstructionOperand inputs[3];
402     size_t input_count = 0;
403     inputs[input_count++] = g.UseUniqueRegister(base);
404     inputs[input_count++] = g.UseUniqueRegister(index);
405     inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
406                                 ? g.UseRegister(value)
407                                 : g.UseUniqueRegister(value);
408     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
409     switch (write_barrier_kind) {
410       case kNoWriteBarrier:
411         UNREACHABLE();
412         break;
413       case kMapWriteBarrier:
414         record_write_mode = RecordWriteMode::kValueIsMap;
415         break;
416       case kPointerWriteBarrier:
417         record_write_mode = RecordWriteMode::kValueIsPointer;
418         break;
419       case kFullWriteBarrier:
420         record_write_mode = RecordWriteMode::kValueIsAny;
421         break;
422     }
423     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
424     size_t const temp_count = arraysize(temps);
425     InstructionCode code = kArchStoreWithWriteBarrier;
426     code |= MiscField::encode(static_cast<int>(record_write_mode));
427     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
428   } else {
429     ArchOpcode opcode = kArchNop;
430     ImmediateMode immediate_mode = kNoImmediate;
431     switch (rep) {
432       case MachineRepresentation::kFloat32:
433         opcode = kArm64StrS;
434         immediate_mode = kLoadStoreImm32;
435         break;
436       case MachineRepresentation::kFloat64:
437         opcode = kArm64StrD;
438         immediate_mode = kLoadStoreImm64;
439         break;
440       case MachineRepresentation::kBit:  // Fall through.
441       case MachineRepresentation::kWord8:
442         opcode = kArm64Strb;
443         immediate_mode = kLoadStoreImm8;
444         break;
445       case MachineRepresentation::kWord16:
446         opcode = kArm64Strh;
447         immediate_mode = kLoadStoreImm16;
448         break;
449       case MachineRepresentation::kWord32:
450         opcode = kArm64StrW;
451         immediate_mode = kLoadStoreImm32;
452         break;
453       case MachineRepresentation::kTagged:  // Fall through.
454       case MachineRepresentation::kWord64:
455         opcode = kArm64Str;
456         immediate_mode = kLoadStoreImm64;
457         break;
458       case MachineRepresentation::kNone:
459         UNREACHABLE();
460         return;
461     }
462     if (g.CanBeImmediate(index, immediate_mode)) {
463       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
464            g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
465     } else {
466       Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
467            g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
468     }
469   }
470 }
471 
472 
VisitCheckedLoad(Node * node)473 void InstructionSelector::VisitCheckedLoad(Node* node) {
474   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
475   Arm64OperandGenerator g(this);
476   Node* const buffer = node->InputAt(0);
477   Node* const offset = node->InputAt(1);
478   Node* const length = node->InputAt(2);
479   ArchOpcode opcode = kArchNop;
480   switch (load_rep.representation()) {
481     case MachineRepresentation::kWord8:
482       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
483       break;
484     case MachineRepresentation::kWord16:
485       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
486       break;
487     case MachineRepresentation::kWord32:
488       opcode = kCheckedLoadWord32;
489       break;
490     case MachineRepresentation::kWord64:
491       opcode = kCheckedLoadWord64;
492       break;
493     case MachineRepresentation::kFloat32:
494       opcode = kCheckedLoadFloat32;
495       break;
496     case MachineRepresentation::kFloat64:
497       opcode = kCheckedLoadFloat64;
498       break;
499     case MachineRepresentation::kBit:     // Fall through.
500     case MachineRepresentation::kTagged:  // Fall through.
501     case MachineRepresentation::kNone:
502       UNREACHABLE();
503       return;
504   }
505   Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
506        g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
507 }
508 
509 
VisitCheckedStore(Node * node)510 void InstructionSelector::VisitCheckedStore(Node* node) {
511   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
512   Arm64OperandGenerator g(this);
513   Node* const buffer = node->InputAt(0);
514   Node* const offset = node->InputAt(1);
515   Node* const length = node->InputAt(2);
516   Node* const value = node->InputAt(3);
517   ArchOpcode opcode = kArchNop;
518   switch (rep) {
519     case MachineRepresentation::kWord8:
520       opcode = kCheckedStoreWord8;
521       break;
522     case MachineRepresentation::kWord16:
523       opcode = kCheckedStoreWord16;
524       break;
525     case MachineRepresentation::kWord32:
526       opcode = kCheckedStoreWord32;
527       break;
528     case MachineRepresentation::kWord64:
529       opcode = kCheckedStoreWord64;
530       break;
531     case MachineRepresentation::kFloat32:
532       opcode = kCheckedStoreFloat32;
533       break;
534     case MachineRepresentation::kFloat64:
535       opcode = kCheckedStoreFloat64;
536       break;
537     case MachineRepresentation::kBit:     // Fall through.
538     case MachineRepresentation::kTagged:  // Fall through.
539     case MachineRepresentation::kNone:
540       UNREACHABLE();
541       return;
542   }
543   Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
544        g.UseOperand(length, kArithmeticImm), g.UseRegister(value));
545 }
546 
547 
548 template <typename Matcher>
VisitLogical(InstructionSelector * selector,Node * node,Matcher * m,ArchOpcode opcode,bool left_can_cover,bool right_can_cover,ImmediateMode imm_mode)549 static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
550                          ArchOpcode opcode, bool left_can_cover,
551                          bool right_can_cover, ImmediateMode imm_mode) {
552   Arm64OperandGenerator g(selector);
553 
554   // Map instruction to equivalent operation with inverted right input.
555   ArchOpcode inv_opcode = opcode;
556   switch (opcode) {
557     case kArm64And32:
558       inv_opcode = kArm64Bic32;
559       break;
560     case kArm64And:
561       inv_opcode = kArm64Bic;
562       break;
563     case kArm64Or32:
564       inv_opcode = kArm64Orn32;
565       break;
566     case kArm64Or:
567       inv_opcode = kArm64Orn;
568       break;
569     case kArm64Eor32:
570       inv_opcode = kArm64Eon32;
571       break;
572     case kArm64Eor:
573       inv_opcode = kArm64Eon;
574       break;
575     default:
576       UNREACHABLE();
577   }
578 
579   // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
580   if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
581     Matcher mleft(m->left().node());
582     if (mleft.right().Is(-1)) {
583       // TODO(all): support shifted operand on right.
584       selector->Emit(inv_opcode, g.DefineAsRegister(node),
585                      g.UseRegister(m->right().node()),
586                      g.UseRegister(mleft.left().node()));
587       return;
588     }
589   }
590 
591   // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
592   if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
593       right_can_cover) {
594     Matcher mright(m->right().node());
595     if (mright.right().Is(-1)) {
596       // TODO(all): support shifted operand on right.
597       selector->Emit(inv_opcode, g.DefineAsRegister(node),
598                      g.UseRegister(m->left().node()),
599                      g.UseRegister(mright.left().node()));
600       return;
601     }
602   }
603 
604   if (m->IsWord32Xor() && m->right().Is(-1)) {
605     selector->Emit(kArm64Not32, g.DefineAsRegister(node),
606                    g.UseRegister(m->left().node()));
607   } else if (m->IsWord64Xor() && m->right().Is(-1)) {
608     selector->Emit(kArm64Not, g.DefineAsRegister(node),
609                    g.UseRegister(m->left().node()));
610   } else {
611     VisitBinop<Matcher>(selector, node, opcode, imm_mode);
612   }
613 }
614 
615 
VisitWord32And(Node * node)616 void InstructionSelector::VisitWord32And(Node* node) {
617   Arm64OperandGenerator g(this);
618   Int32BinopMatcher m(node);
619   if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
620       m.right().HasValue()) {
621     uint32_t mask = m.right().Value();
622     uint32_t mask_width = base::bits::CountPopulation32(mask);
623     uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
624     if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
625       // The mask must be contiguous, and occupy the least-significant bits.
626       DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
627 
628       // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
629       // significant bits.
630       Int32BinopMatcher mleft(m.left().node());
631       if (mleft.right().HasValue()) {
632         // Any shift value can match; int32 shifts use `value % 32`.
633         uint32_t lsb = mleft.right().Value() & 0x1f;
634 
635         // Ubfx cannot extract bits past the register size, however since
636         // shifting the original value would have introduced some zeros we can
637         // still use ubfx with a smaller mask and the remaining bits will be
638         // zeros.
639         if (lsb + mask_width > 32) mask_width = 32 - lsb;
640 
641         Emit(kArm64Ubfx32, g.DefineAsRegister(node),
642              g.UseRegister(mleft.left().node()),
643              g.UseImmediateOrTemp(mleft.right().node(), lsb),
644              g.TempImmediate(mask_width));
645         return;
646       }
647       // Other cases fall through to the normal And operation.
648     }
649   }
650   VisitLogical<Int32BinopMatcher>(
651       this, node, &m, kArm64And32, CanCover(node, m.left().node()),
652       CanCover(node, m.right().node()), kLogical32Imm);
653 }
654 
655 
VisitWord64And(Node * node)656 void InstructionSelector::VisitWord64And(Node* node) {
657   Arm64OperandGenerator g(this);
658   Int64BinopMatcher m(node);
659   if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
660       m.right().HasValue()) {
661     uint64_t mask = m.right().Value();
662     uint64_t mask_width = base::bits::CountPopulation64(mask);
663     uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
664     if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
665       // The mask must be contiguous, and occupy the least-significant bits.
666       DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
667 
668       // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
669       // significant bits.
670       Int64BinopMatcher mleft(m.left().node());
671       if (mleft.right().HasValue()) {
672         // Any shift value can match; int64 shifts use `value % 64`.
673         uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
674 
675         // Ubfx cannot extract bits past the register size, however since
676         // shifting the original value would have introduced some zeros we can
677         // still use ubfx with a smaller mask and the remaining bits will be
678         // zeros.
679         if (lsb + mask_width > 64) mask_width = 64 - lsb;
680 
681         Emit(kArm64Ubfx, g.DefineAsRegister(node),
682              g.UseRegister(mleft.left().node()),
683              g.UseImmediateOrTemp(mleft.right().node(), lsb),
684              g.TempImmediate(static_cast<int32_t>(mask_width)));
685         return;
686       }
687       // Other cases fall through to the normal And operation.
688     }
689   }
690   VisitLogical<Int64BinopMatcher>(
691       this, node, &m, kArm64And, CanCover(node, m.left().node()),
692       CanCover(node, m.right().node()), kLogical64Imm);
693 }
694 
695 
VisitWord32Or(Node * node)696 void InstructionSelector::VisitWord32Or(Node* node) {
697   Int32BinopMatcher m(node);
698   VisitLogical<Int32BinopMatcher>(
699       this, node, &m, kArm64Or32, CanCover(node, m.left().node()),
700       CanCover(node, m.right().node()), kLogical32Imm);
701 }
702 
703 
VisitWord64Or(Node * node)704 void InstructionSelector::VisitWord64Or(Node* node) {
705   Int64BinopMatcher m(node);
706   VisitLogical<Int64BinopMatcher>(
707       this, node, &m, kArm64Or, CanCover(node, m.left().node()),
708       CanCover(node, m.right().node()), kLogical64Imm);
709 }
710 
711 
VisitWord32Xor(Node * node)712 void InstructionSelector::VisitWord32Xor(Node* node) {
713   Int32BinopMatcher m(node);
714   VisitLogical<Int32BinopMatcher>(
715       this, node, &m, kArm64Eor32, CanCover(node, m.left().node()),
716       CanCover(node, m.right().node()), kLogical32Imm);
717 }
718 
719 
VisitWord64Xor(Node * node)720 void InstructionSelector::VisitWord64Xor(Node* node) {
721   Int64BinopMatcher m(node);
722   VisitLogical<Int64BinopMatcher>(
723       this, node, &m, kArm64Eor, CanCover(node, m.left().node()),
724       CanCover(node, m.right().node()), kLogical64Imm);
725 }
726 
727 
VisitWord32Shl(Node * node)728 void InstructionSelector::VisitWord32Shl(Node* node) {
729   Int32BinopMatcher m(node);
730   if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
731       m.right().IsInRange(1, 31)) {
732     Arm64OperandGenerator g(this);
733     Int32BinopMatcher mleft(m.left().node());
734     if (mleft.right().HasValue()) {
735       uint32_t mask = mleft.right().Value();
736       uint32_t mask_width = base::bits::CountPopulation32(mask);
737       uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
738       if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
739         uint32_t shift = m.right().Value();
740         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
741         DCHECK_NE(0u, shift);
742 
743         if ((shift + mask_width) >= 32) {
744           // If the mask is contiguous and reaches or extends beyond the top
745           // bit, only the shift is needed.
746           Emit(kArm64Lsl32, g.DefineAsRegister(node),
747                g.UseRegister(mleft.left().node()),
748                g.UseImmediate(m.right().node()));
749           return;
750         } else {
751           // Select Ubfiz for Shl(And(x, mask), imm) where the mask is
752           // contiguous, and the shift immediate non-zero.
753           Emit(kArm64Ubfiz32, g.DefineAsRegister(node),
754                g.UseRegister(mleft.left().node()),
755                g.UseImmediate(m.right().node()), g.TempImmediate(mask_width));
756           return;
757         }
758       }
759     }
760   }
761   VisitRRO(this, kArm64Lsl32, node, kShift32Imm);
762 }
763 
764 
VisitWord64Shl(Node * node)765 void InstructionSelector::VisitWord64Shl(Node* node) {
766   Arm64OperandGenerator g(this);
767   Int64BinopMatcher m(node);
768   if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
769       m.right().IsInRange(32, 63)) {
770     // There's no need to sign/zero-extend to 64-bit if we shift out the upper
771     // 32 bits anyway.
772     Emit(kArm64Lsl, g.DefineAsRegister(node),
773          g.UseRegister(m.left().node()->InputAt(0)),
774          g.UseImmediate(m.right().node()));
775     return;
776   }
777   VisitRRO(this, kArm64Lsl, node, kShift64Imm);
778 }
779 
780 
781 namespace {
782 
TryEmitBitfieldExtract32(InstructionSelector * selector,Node * node)783 bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
784   Arm64OperandGenerator g(selector);
785   Int32BinopMatcher m(node);
786   if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
787     // Select Ubfx or Sbfx for (x << (K & 0x1f)) OP (K & 0x1f), where
788     // OP is >>> or >> and (K & 0x1f) != 0.
789     Int32BinopMatcher mleft(m.left().node());
790     if (mleft.right().HasValue() && m.right().HasValue() &&
791         (mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
792       DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
793       ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
794 
795       int right_val = m.right().Value() & 0x1f;
796       DCHECK_NE(right_val, 0);
797 
798       selector->Emit(opcode, g.DefineAsRegister(node),
799                      g.UseRegister(mleft.left().node()), g.TempImmediate(0),
800                      g.TempImmediate(32 - right_val));
801       return true;
802     }
803   }
804   return false;
805 }
806 
807 }  // namespace
808 
809 
VisitWord32Shr(Node * node)810 void InstructionSelector::VisitWord32Shr(Node* node) {
811   Int32BinopMatcher m(node);
812   if (m.left().IsWord32And() && m.right().HasValue()) {
813     uint32_t lsb = m.right().Value() & 0x1f;
814     Int32BinopMatcher mleft(m.left().node());
815     if (mleft.right().HasValue()) {
816       // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
817       // shifted into the least-significant bits.
818       uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
819       unsigned mask_width = base::bits::CountPopulation32(mask);
820       unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
821       if ((mask_msb + mask_width + lsb) == 32) {
822         Arm64OperandGenerator g(this);
823         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
824         Emit(kArm64Ubfx32, g.DefineAsRegister(node),
825              g.UseRegister(mleft.left().node()),
826              g.UseImmediateOrTemp(m.right().node(), lsb),
827              g.TempImmediate(mask_width));
828         return;
829       }
830     }
831   } else if (TryEmitBitfieldExtract32(this, node)) {
832     return;
833   }
834 
835   if (m.left().IsUint32MulHigh() && m.right().HasValue() &&
836       CanCover(node, node->InputAt(0))) {
837     // Combine this shift with the multiply and shift that would be generated
838     // by Uint32MulHigh.
839     Arm64OperandGenerator g(this);
840     Node* left = m.left().node();
841     int shift = m.right().Value() & 0x1f;
842     InstructionOperand const smull_operand = g.TempRegister();
843     Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
844          g.UseRegister(left->InputAt(1)));
845     Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand,
846          g.TempImmediate(32 + shift));
847     return;
848   }
849 
850   VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
851 }
852 
853 
VisitWord64Shr(Node * node)854 void InstructionSelector::VisitWord64Shr(Node* node) {
855   Int64BinopMatcher m(node);
856   if (m.left().IsWord64And() && m.right().HasValue()) {
857     uint32_t lsb = m.right().Value() & 0x3f;
858     Int64BinopMatcher mleft(m.left().node());
859     if (mleft.right().HasValue()) {
860       // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
861       // shifted into the least-significant bits.
862       uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
863       unsigned mask_width = base::bits::CountPopulation64(mask);
864       unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
865       if ((mask_msb + mask_width + lsb) == 64) {
866         Arm64OperandGenerator g(this);
867         DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
868         Emit(kArm64Ubfx, g.DefineAsRegister(node),
869              g.UseRegister(mleft.left().node()),
870              g.UseImmediateOrTemp(m.right().node(), lsb),
871              g.TempImmediate(mask_width));
872         return;
873       }
874     }
875   }
876   VisitRRO(this, kArm64Lsr, node, kShift64Imm);
877 }
878 
879 
VisitWord32Sar(Node * node)880 void InstructionSelector::VisitWord32Sar(Node* node) {
881   if (TryEmitBitfieldExtract32(this, node)) {
882     return;
883   }
884 
885   Int32BinopMatcher m(node);
886   if (m.left().IsInt32MulHigh() && m.right().HasValue() &&
887       CanCover(node, node->InputAt(0))) {
888     // Combine this shift with the multiply and shift that would be generated
889     // by Int32MulHigh.
890     Arm64OperandGenerator g(this);
891     Node* left = m.left().node();
892     int shift = m.right().Value() & 0x1f;
893     InstructionOperand const smull_operand = g.TempRegister();
894     Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
895          g.UseRegister(left->InputAt(1)));
896     Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand,
897          g.TempImmediate(32 + shift));
898     return;
899   }
900 
901   if (m.left().IsInt32Add() && m.right().HasValue() &&
902       CanCover(node, node->InputAt(0))) {
903     Node* add_node = m.left().node();
904     Int32BinopMatcher madd_node(add_node);
905     if (madd_node.left().IsInt32MulHigh() &&
906         CanCover(add_node, madd_node.left().node())) {
907       // Combine the shift that would be generated by Int32MulHigh with the add
908       // on the left of this Sar operation. We do it here, as the result of the
909       // add potentially has 33 bits, so we have to ensure the result is
910       // truncated by being the input to this 32-bit Sar operation.
911       Arm64OperandGenerator g(this);
912       Node* mul_node = madd_node.left().node();
913 
914       InstructionOperand const smull_operand = g.TempRegister();
915       Emit(kArm64Smull, smull_operand, g.UseRegister(mul_node->InputAt(0)),
916            g.UseRegister(mul_node->InputAt(1)));
917 
918       InstructionOperand const add_operand = g.TempRegister();
919       Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_ASR_I),
920            add_operand, g.UseRegister(add_node->InputAt(1)), smull_operand,
921            g.TempImmediate(32));
922 
923       Emit(kArm64Asr32, g.DefineAsRegister(node), add_operand,
924            g.UseImmediate(node->InputAt(1)));
925       return;
926     }
927   }
928 
929   VisitRRO(this, kArm64Asr32, node, kShift32Imm);
930 }
931 
932 
VisitWord64Sar(Node * node)933 void InstructionSelector::VisitWord64Sar(Node* node) {
934   VisitRRO(this, kArm64Asr, node, kShift64Imm);
935 }
936 
937 
VisitWord32Ror(Node * node)938 void InstructionSelector::VisitWord32Ror(Node* node) {
939   VisitRRO(this, kArm64Ror32, node, kShift32Imm);
940 }
941 
942 
VisitWord64Ror(Node * node)943 void InstructionSelector::VisitWord64Ror(Node* node) {
944   VisitRRO(this, kArm64Ror, node, kShift64Imm);
945 }
946 
947 
VisitWord64Clz(Node * node)948 void InstructionSelector::VisitWord64Clz(Node* node) {
949   Arm64OperandGenerator g(this);
950   Emit(kArm64Clz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
951 }
952 
953 
VisitWord32Clz(Node * node)954 void InstructionSelector::VisitWord32Clz(Node* node) {
955   Arm64OperandGenerator g(this);
956   Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
957 }
958 
959 
VisitWord32Ctz(Node * node)960 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
961 
962 
VisitWord64Ctz(Node * node)963 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
964 
965 
VisitWord32Popcnt(Node * node)966 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
967 
968 
VisitWord64Popcnt(Node * node)969 void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
970 
971 
VisitInt32Add(Node * node)972 void InstructionSelector::VisitInt32Add(Node* node) {
973   Arm64OperandGenerator g(this);
974   Int32BinopMatcher m(node);
975   // Select Madd(x, y, z) for Add(Mul(x, y), z).
976   if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
977     Int32BinopMatcher mleft(m.left().node());
978     // Check multiply can't be later reduced to addition with shift.
979     if (LeftShiftForReducedMultiply(&mleft) == 0) {
980       Emit(kArm64Madd32, g.DefineAsRegister(node),
981            g.UseRegister(mleft.left().node()),
982            g.UseRegister(mleft.right().node()),
983            g.UseRegister(m.right().node()));
984       return;
985     }
986   }
987   // Select Madd(x, y, z) for Add(z, Mul(x, y)).
988   if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
989     Int32BinopMatcher mright(m.right().node());
990     // Check multiply can't be later reduced to addition with shift.
991     if (LeftShiftForReducedMultiply(&mright) == 0) {
992       Emit(kArm64Madd32, g.DefineAsRegister(node),
993            g.UseRegister(mright.left().node()),
994            g.UseRegister(mright.right().node()),
995            g.UseRegister(m.left().node()));
996       return;
997     }
998   }
999   VisitAddSub<Int32BinopMatcher>(this, node, kArm64Add32, kArm64Sub32);
1000 }
1001 
1002 
VisitInt64Add(Node * node)1003 void InstructionSelector::VisitInt64Add(Node* node) {
1004   Arm64OperandGenerator g(this);
1005   Int64BinopMatcher m(node);
1006   // Select Madd(x, y, z) for Add(Mul(x, y), z).
1007   if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
1008     Int64BinopMatcher mleft(m.left().node());
1009     // Check multiply can't be later reduced to addition with shift.
1010     if (LeftShiftForReducedMultiply(&mleft) == 0) {
1011       Emit(kArm64Madd, g.DefineAsRegister(node),
1012            g.UseRegister(mleft.left().node()),
1013            g.UseRegister(mleft.right().node()),
1014            g.UseRegister(m.right().node()));
1015       return;
1016     }
1017   }
1018   // Select Madd(x, y, z) for Add(z, Mul(x, y)).
1019   if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
1020     Int64BinopMatcher mright(m.right().node());
1021     // Check multiply can't be later reduced to addition with shift.
1022     if (LeftShiftForReducedMultiply(&mright) == 0) {
1023       Emit(kArm64Madd, g.DefineAsRegister(node),
1024            g.UseRegister(mright.left().node()),
1025            g.UseRegister(mright.right().node()),
1026            g.UseRegister(m.left().node()));
1027       return;
1028     }
1029   }
1030   VisitAddSub<Int64BinopMatcher>(this, node, kArm64Add, kArm64Sub);
1031 }
1032 
1033 
VisitInt32Sub(Node * node)1034 void InstructionSelector::VisitInt32Sub(Node* node) {
1035   Arm64OperandGenerator g(this);
1036   Int32BinopMatcher m(node);
1037 
1038   // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
1039   if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
1040     Int32BinopMatcher mright(m.right().node());
1041     // Check multiply can't be later reduced to addition with shift.
1042     if (LeftShiftForReducedMultiply(&mright) == 0) {
1043       Emit(kArm64Msub32, g.DefineAsRegister(node),
1044            g.UseRegister(mright.left().node()),
1045            g.UseRegister(mright.right().node()),
1046            g.UseRegister(m.left().node()));
1047       return;
1048     }
1049   }
1050 
1051   VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
1052 }
1053 
1054 
VisitInt64Sub(Node * node)1055 void InstructionSelector::VisitInt64Sub(Node* node) {
1056   Arm64OperandGenerator g(this);
1057   Int64BinopMatcher m(node);
1058 
1059   // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
1060   if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
1061     Int64BinopMatcher mright(m.right().node());
1062     // Check multiply can't be later reduced to addition with shift.
1063     if (LeftShiftForReducedMultiply(&mright) == 0) {
1064       Emit(kArm64Msub, g.DefineAsRegister(node),
1065            g.UseRegister(mright.left().node()),
1066            g.UseRegister(mright.right().node()),
1067            g.UseRegister(m.left().node()));
1068       return;
1069     }
1070   }
1071 
1072   VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
1073 }
1074 
1075 
VisitInt32Mul(Node * node)1076 void InstructionSelector::VisitInt32Mul(Node* node) {
1077   Arm64OperandGenerator g(this);
1078   Int32BinopMatcher m(node);
1079 
1080   // First, try to reduce the multiplication to addition with left shift.
1081   // x * (2^k + 1) -> x + (x << k)
1082   int32_t shift = LeftShiftForReducedMultiply(&m);
1083   if (shift > 0) {
1084     Emit(kArm64Add32 | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1085          g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1086          g.UseRegister(m.left().node()), g.TempImmediate(shift));
1087     return;
1088   }
1089 
1090   if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
1091     Int32BinopMatcher mleft(m.left().node());
1092 
1093     // Select Mneg(x, y) for Mul(Sub(0, x), y).
1094     if (mleft.left().Is(0)) {
1095       Emit(kArm64Mneg32, g.DefineAsRegister(node),
1096            g.UseRegister(mleft.right().node()),
1097            g.UseRegister(m.right().node()));
1098       return;
1099     }
1100   }
1101 
1102   if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
1103     Int32BinopMatcher mright(m.right().node());
1104 
1105     // Select Mneg(x, y) for Mul(x, Sub(0, y)).
1106     if (mright.left().Is(0)) {
1107       Emit(kArm64Mneg32, g.DefineAsRegister(node),
1108            g.UseRegister(m.left().node()),
1109            g.UseRegister(mright.right().node()));
1110       return;
1111     }
1112   }
1113 
1114   VisitRRR(this, kArm64Mul32, node);
1115 }
1116 
1117 
VisitInt64Mul(Node * node)1118 void InstructionSelector::VisitInt64Mul(Node* node) {
1119   Arm64OperandGenerator g(this);
1120   Int64BinopMatcher m(node);
1121 
1122   // First, try to reduce the multiplication to addition with left shift.
1123   // x * (2^k + 1) -> x + (x << k)
1124   int32_t shift = LeftShiftForReducedMultiply(&m);
1125   if (shift > 0) {
1126     Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1127          g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1128          g.UseRegister(m.left().node()), g.TempImmediate(shift));
1129     return;
1130   }
1131 
1132   if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
1133     Int64BinopMatcher mleft(m.left().node());
1134 
1135     // Select Mneg(x, y) for Mul(Sub(0, x), y).
1136     if (mleft.left().Is(0)) {
1137       Emit(kArm64Mneg, g.DefineAsRegister(node),
1138            g.UseRegister(mleft.right().node()),
1139            g.UseRegister(m.right().node()));
1140       return;
1141     }
1142   }
1143 
1144   if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
1145     Int64BinopMatcher mright(m.right().node());
1146 
1147     // Select Mneg(x, y) for Mul(x, Sub(0, y)).
1148     if (mright.left().Is(0)) {
1149       Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1150            g.UseRegister(mright.right().node()));
1151       return;
1152     }
1153   }
1154 
1155   VisitRRR(this, kArm64Mul, node);
1156 }
1157 
1158 
VisitInt32MulHigh(Node * node)1159 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1160   Arm64OperandGenerator g(this);
1161   InstructionOperand const smull_operand = g.TempRegister();
1162   Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)),
1163        g.UseRegister(node->InputAt(1)));
1164   Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
1165 }
1166 
1167 
VisitUint32MulHigh(Node * node)1168 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1169   Arm64OperandGenerator g(this);
1170   InstructionOperand const smull_operand = g.TempRegister();
1171   Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
1172        g.UseRegister(node->InputAt(1)));
1173   Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
1174 }
1175 
1176 
VisitInt32Div(Node * node)1177 void InstructionSelector::VisitInt32Div(Node* node) {
1178   VisitRRR(this, kArm64Idiv32, node);
1179 }
1180 
1181 
VisitInt64Div(Node * node)1182 void InstructionSelector::VisitInt64Div(Node* node) {
1183   VisitRRR(this, kArm64Idiv, node);
1184 }
1185 
1186 
VisitUint32Div(Node * node)1187 void InstructionSelector::VisitUint32Div(Node* node) {
1188   VisitRRR(this, kArm64Udiv32, node);
1189 }
1190 
1191 
VisitUint64Div(Node * node)1192 void InstructionSelector::VisitUint64Div(Node* node) {
1193   VisitRRR(this, kArm64Udiv, node);
1194 }
1195 
1196 
VisitInt32Mod(Node * node)1197 void InstructionSelector::VisitInt32Mod(Node* node) {
1198   VisitRRR(this, kArm64Imod32, node);
1199 }
1200 
1201 
VisitInt64Mod(Node * node)1202 void InstructionSelector::VisitInt64Mod(Node* node) {
1203   VisitRRR(this, kArm64Imod, node);
1204 }
1205 
1206 
VisitUint32Mod(Node * node)1207 void InstructionSelector::VisitUint32Mod(Node* node) {
1208   VisitRRR(this, kArm64Umod32, node);
1209 }
1210 
1211 
VisitUint64Mod(Node * node)1212 void InstructionSelector::VisitUint64Mod(Node* node) {
1213   VisitRRR(this, kArm64Umod, node);
1214 }
1215 
1216 
VisitChangeFloat32ToFloat64(Node * node)1217 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1218   VisitRR(this, kArm64Float32ToFloat64, node);
1219 }
1220 
1221 
VisitChangeInt32ToFloat64(Node * node)1222 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1223   VisitRR(this, kArm64Int32ToFloat64, node);
1224 }
1225 
1226 
VisitChangeUint32ToFloat64(Node * node)1227 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1228   VisitRR(this, kArm64Uint32ToFloat64, node);
1229 }
1230 
1231 
VisitChangeFloat64ToInt32(Node * node)1232 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1233   VisitRR(this, kArm64Float64ToInt32, node);
1234 }
1235 
1236 
VisitChangeFloat64ToUint32(Node * node)1237 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1238   VisitRR(this, kArm64Float64ToUint32, node);
1239 }
1240 
1241 
VisitTryTruncateFloat32ToInt64(Node * node)1242 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1243   Arm64OperandGenerator g(this);
1244 
1245   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1246   InstructionOperand outputs[2];
1247   size_t output_count = 0;
1248   outputs[output_count++] = g.DefineAsRegister(node);
1249 
1250   Node* success_output = NodeProperties::FindProjection(node, 1);
1251   if (success_output) {
1252     outputs[output_count++] = g.DefineAsRegister(success_output);
1253   }
1254 
1255   Emit(kArm64Float32ToInt64, output_count, outputs, 1, inputs);
1256 }
1257 
1258 
VisitTryTruncateFloat64ToInt64(Node * node)1259 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1260   Arm64OperandGenerator g(this);
1261 
1262   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1263   InstructionOperand outputs[2];
1264   size_t output_count = 0;
1265   outputs[output_count++] = g.DefineAsRegister(node);
1266 
1267   Node* success_output = NodeProperties::FindProjection(node, 1);
1268   if (success_output) {
1269     outputs[output_count++] = g.DefineAsRegister(success_output);
1270   }
1271 
1272   Emit(kArm64Float64ToInt64, output_count, outputs, 1, inputs);
1273 }
1274 
1275 
VisitTryTruncateFloat32ToUint64(Node * node)1276 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1277   Arm64OperandGenerator g(this);
1278 
1279   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1280   InstructionOperand outputs[2];
1281   size_t output_count = 0;
1282   outputs[output_count++] = g.DefineAsRegister(node);
1283 
1284   Node* success_output = NodeProperties::FindProjection(node, 1);
1285   if (success_output) {
1286     outputs[output_count++] = g.DefineAsRegister(success_output);
1287   }
1288 
1289   Emit(kArm64Float32ToUint64, output_count, outputs, 1, inputs);
1290 }
1291 
1292 
VisitTryTruncateFloat64ToUint64(Node * node)1293 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1294   Arm64OperandGenerator g(this);
1295 
1296   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1297   InstructionOperand outputs[2];
1298   size_t output_count = 0;
1299   outputs[output_count++] = g.DefineAsRegister(node);
1300 
1301   Node* success_output = NodeProperties::FindProjection(node, 1);
1302   if (success_output) {
1303     outputs[output_count++] = g.DefineAsRegister(success_output);
1304   }
1305 
1306   Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
1307 }
1308 
1309 
VisitChangeInt32ToInt64(Node * node)1310 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1311   VisitRR(this, kArm64Sxtw, node);
1312 }
1313 
1314 
VisitChangeUint32ToUint64(Node * node)1315 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1316   Arm64OperandGenerator g(this);
1317   Node* value = node->InputAt(0);
1318   switch (value->opcode()) {
1319     case IrOpcode::kWord32And:
1320     case IrOpcode::kWord32Or:
1321     case IrOpcode::kWord32Xor:
1322     case IrOpcode::kWord32Shl:
1323     case IrOpcode::kWord32Shr:
1324     case IrOpcode::kWord32Sar:
1325     case IrOpcode::kWord32Ror:
1326     case IrOpcode::kWord32Equal:
1327     case IrOpcode::kInt32Add:
1328     case IrOpcode::kInt32AddWithOverflow:
1329     case IrOpcode::kInt32Sub:
1330     case IrOpcode::kInt32SubWithOverflow:
1331     case IrOpcode::kInt32Mul:
1332     case IrOpcode::kInt32MulHigh:
1333     case IrOpcode::kInt32Div:
1334     case IrOpcode::kInt32Mod:
1335     case IrOpcode::kInt32LessThan:
1336     case IrOpcode::kInt32LessThanOrEqual:
1337     case IrOpcode::kUint32Div:
1338     case IrOpcode::kUint32LessThan:
1339     case IrOpcode::kUint32LessThanOrEqual:
1340     case IrOpcode::kUint32Mod:
1341     case IrOpcode::kUint32MulHigh: {
1342       // 32-bit operations will write their result in a W register (implicitly
1343       // clearing the top 32-bit of the corresponding X register) so the
1344       // zero-extension is a no-op.
1345       Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1346       return;
1347     }
1348     default:
1349       break;
1350   }
1351   Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
1352 }
1353 
1354 
VisitTruncateFloat64ToFloat32(Node * node)1355 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1356   VisitRR(this, kArm64Float64ToFloat32, node);
1357 }
1358 
1359 
VisitTruncateFloat64ToInt32(Node * node)1360 void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
1361   switch (TruncationModeOf(node->op())) {
1362     case TruncationMode::kJavaScript:
1363       return VisitRR(this, kArchTruncateDoubleToI, node);
1364     case TruncationMode::kRoundToZero:
1365       return VisitRR(this, kArm64Float64ToInt32, node);
1366   }
1367   UNREACHABLE();
1368 }
1369 
1370 
VisitTruncateInt64ToInt32(Node * node)1371 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1372   Arm64OperandGenerator g(this);
1373   Node* value = node->InputAt(0);
1374   if (CanCover(node, value) && value->InputCount() >= 2) {
1375     Int64BinopMatcher m(value);
1376     if ((m.IsWord64Sar() && m.right().HasValue() &&
1377          (m.right().Value() == 32)) ||
1378         (m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
1379       Emit(kArm64Lsr, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1380            g.UseImmediate(m.right().node()));
1381       return;
1382     }
1383   }
1384 
1385   Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1386 }
1387 
1388 
VisitRoundInt64ToFloat32(Node * node)1389 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1390   VisitRR(this, kArm64Int64ToFloat32, node);
1391 }
1392 
1393 
VisitRoundInt64ToFloat64(Node * node)1394 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1395   VisitRR(this, kArm64Int64ToFloat64, node);
1396 }
1397 
1398 
VisitRoundUint64ToFloat32(Node * node)1399 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1400   VisitRR(this, kArm64Uint64ToFloat32, node);
1401 }
1402 
1403 
VisitRoundUint64ToFloat64(Node * node)1404 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1405   VisitRR(this, kArm64Uint64ToFloat64, node);
1406 }
1407 
1408 
VisitBitcastFloat32ToInt32(Node * node)1409 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1410   VisitRR(this, kArm64Float64ExtractLowWord32, node);
1411 }
1412 
1413 
VisitBitcastFloat64ToInt64(Node * node)1414 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1415   VisitRR(this, kArm64U64MoveFloat64, node);
1416 }
1417 
1418 
VisitBitcastInt32ToFloat32(Node * node)1419 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1420   VisitRR(this, kArm64Float64MoveU64, node);
1421 }
1422 
1423 
VisitBitcastInt64ToFloat64(Node * node)1424 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1425   VisitRR(this, kArm64Float64MoveU64, node);
1426 }
1427 
1428 
VisitFloat32Add(Node * node)1429 void InstructionSelector::VisitFloat32Add(Node* node) {
1430   VisitRRR(this, kArm64Float32Add, node);
1431 }
1432 
1433 
VisitFloat64Add(Node * node)1434 void InstructionSelector::VisitFloat64Add(Node* node) {
1435   VisitRRR(this, kArm64Float64Add, node);
1436 }
1437 
1438 
VisitFloat32Sub(Node * node)1439 void InstructionSelector::VisitFloat32Sub(Node* node) {
1440   VisitRRR(this, kArm64Float32Sub, node);
1441 }
1442 
1443 
VisitFloat64Sub(Node * node)1444 void InstructionSelector::VisitFloat64Sub(Node* node) {
1445   Arm64OperandGenerator g(this);
1446   Float64BinopMatcher m(node);
1447   if (m.left().IsMinusZero()) {
1448     if (m.right().IsFloat64RoundDown() &&
1449         CanCover(m.node(), m.right().node())) {
1450       if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
1451           CanCover(m.right().node(), m.right().InputAt(0))) {
1452         Float64BinopMatcher mright0(m.right().InputAt(0));
1453         if (mright0.left().IsMinusZero()) {
1454           Emit(kArm64Float64RoundUp, g.DefineAsRegister(node),
1455                g.UseRegister(mright0.right().node()));
1456           return;
1457         }
1458       }
1459     }
1460     Emit(kArm64Float64Neg, g.DefineAsRegister(node),
1461          g.UseRegister(m.right().node()));
1462     return;
1463   }
1464   VisitRRR(this, kArm64Float64Sub, node);
1465 }
1466 
1467 
VisitFloat32Mul(Node * node)1468 void InstructionSelector::VisitFloat32Mul(Node* node) {
1469   VisitRRR(this, kArm64Float32Mul, node);
1470 }
1471 
1472 
VisitFloat64Mul(Node * node)1473 void InstructionSelector::VisitFloat64Mul(Node* node) {
1474   VisitRRR(this, kArm64Float64Mul, node);
1475 }
1476 
1477 
VisitFloat32Div(Node * node)1478 void InstructionSelector::VisitFloat32Div(Node* node) {
1479   VisitRRR(this, kArm64Float32Div, node);
1480 }
1481 
1482 
VisitFloat64Div(Node * node)1483 void InstructionSelector::VisitFloat64Div(Node* node) {
1484   VisitRRR(this, kArm64Float64Div, node);
1485 }
1486 
1487 
VisitFloat64Mod(Node * node)1488 void InstructionSelector::VisitFloat64Mod(Node* node) {
1489   Arm64OperandGenerator g(this);
1490   Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
1491        g.UseFixed(node->InputAt(0), d0),
1492        g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
1493 }
1494 
1495 
VisitFloat32Max(Node * node)1496 void InstructionSelector::VisitFloat32Max(Node* node) {
1497   VisitRRR(this, kArm64Float32Max, node);
1498 }
1499 
1500 
VisitFloat64Max(Node * node)1501 void InstructionSelector::VisitFloat64Max(Node* node) {
1502   VisitRRR(this, kArm64Float64Max, node);
1503 }
1504 
1505 
VisitFloat32Min(Node * node)1506 void InstructionSelector::VisitFloat32Min(Node* node) {
1507   VisitRRR(this, kArm64Float32Min, node);
1508 }
1509 
1510 
VisitFloat64Min(Node * node)1511 void InstructionSelector::VisitFloat64Min(Node* node) {
1512   VisitRRR(this, kArm64Float64Min, node);
1513 }
1514 
1515 
VisitFloat32Abs(Node * node)1516 void InstructionSelector::VisitFloat32Abs(Node* node) {
1517   VisitRR(this, kArm64Float32Abs, node);
1518 }
1519 
1520 
VisitFloat64Abs(Node * node)1521 void InstructionSelector::VisitFloat64Abs(Node* node) {
1522   VisitRR(this, kArm64Float64Abs, node);
1523 }
1524 
1525 
VisitFloat32Sqrt(Node * node)1526 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1527   VisitRR(this, kArm64Float32Sqrt, node);
1528 }
1529 
1530 
VisitFloat64Sqrt(Node * node)1531 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1532   VisitRR(this, kArm64Float64Sqrt, node);
1533 }
1534 
1535 
VisitFloat32RoundDown(Node * node)1536 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1537   VisitRR(this, kArm64Float32RoundDown, node);
1538 }
1539 
1540 
VisitFloat64RoundDown(Node * node)1541 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1542   VisitRR(this, kArm64Float64RoundDown, node);
1543 }
1544 
1545 
VisitFloat32RoundUp(Node * node)1546 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1547   VisitRR(this, kArm64Float32RoundUp, node);
1548 }
1549 
1550 
VisitFloat64RoundUp(Node * node)1551 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1552   VisitRR(this, kArm64Float64RoundUp, node);
1553 }
1554 
1555 
VisitFloat32RoundTruncate(Node * node)1556 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1557   VisitRR(this, kArm64Float32RoundTruncate, node);
1558 }
1559 
1560 
VisitFloat64RoundTruncate(Node * node)1561 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1562   VisitRR(this, kArm64Float64RoundTruncate, node);
1563 }
1564 
1565 
VisitFloat64RoundTiesAway(Node * node)1566 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1567   VisitRR(this, kArm64Float64RoundTiesAway, node);
1568 }
1569 
1570 
VisitFloat32RoundTiesEven(Node * node)1571 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1572   VisitRR(this, kArm64Float32RoundTiesEven, node);
1573 }
1574 
1575 
VisitFloat64RoundTiesEven(Node * node)1576 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1577   VisitRR(this, kArm64Float64RoundTiesEven, node);
1578 }
1579 
1580 
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * descriptor,Node * node)1581 void InstructionSelector::EmitPrepareArguments(
1582     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1583     Node* node) {
1584   Arm64OperandGenerator g(this);
1585 
1586   // Push the arguments to the stack.
1587   int aligned_push_count = static_cast<int>(arguments->size());
1588 
1589   bool pushed_count_uneven = aligned_push_count & 1;
1590   int claim_count = aligned_push_count;
1591   if (pushed_count_uneven && descriptor->UseNativeStack()) {
1592     // We can only claim for an even number of call arguments when we use the
1593     // native stack.
1594     claim_count++;
1595   }
1596   // TODO(dcarney): claim and poke probably take small immediates,
1597   //                loop here or whatever.
1598   // Bump the stack pointer(s).
1599   if (aligned_push_count > 0) {
1600     // TODO(dcarney): it would be better to bump the csp here only
1601     //                and emit paired stores with increment for non c frames.
1602     Emit(kArm64ClaimForCallArguments, g.NoOutput(),
1603          g.TempImmediate(claim_count));
1604   }
1605 
1606   // Move arguments to the stack.
1607   int slot = aligned_push_count - 1;
1608   while (slot >= 0) {
1609     Emit(kArm64Poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
1610          g.TempImmediate(slot));
1611     slot--;
1612     // TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
1613     //              same type.
1614     // Emit(kArm64PokePair, g.NoOutput(), g.UseRegister((*arguments)[slot]),
1615     //      g.UseRegister((*arguments)[slot - 1]), g.TempImmediate(slot));
1616     // slot -= 2;
1617   }
1618 }
1619 
1620 
IsTailCallAddressImmediate()1621 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1622 
1623 
1624 namespace {
1625 
1626 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1627 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1628                   InstructionOperand left, InstructionOperand right,
1629                   FlagsContinuation* cont) {
1630   Arm64OperandGenerator g(selector);
1631   opcode = cont->Encode(opcode);
1632   if (cont->IsBranch()) {
1633     selector->Emit(opcode, g.NoOutput(), left, right,
1634                    g.Label(cont->true_block()), g.Label(cont->false_block()));
1635   } else {
1636     DCHECK(cont->IsSet());
1637     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1638   }
1639 }
1640 
1641 
1642 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative,ImmediateMode immediate_mode)1643 void VisitWordCompare(InstructionSelector* selector, Node* node,
1644                       InstructionCode opcode, FlagsContinuation* cont,
1645                       bool commutative, ImmediateMode immediate_mode) {
1646   Arm64OperandGenerator g(selector);
1647   Node* left = node->InputAt(0);
1648   Node* right = node->InputAt(1);
1649 
1650   // Match immediates on left or right side of comparison.
1651   if (g.CanBeImmediate(right, immediate_mode)) {
1652     VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1653                  cont);
1654   } else if (g.CanBeImmediate(left, immediate_mode)) {
1655     if (!commutative) cont->Commute();
1656     VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1657                  cont);
1658   } else {
1659     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1660                  cont);
1661   }
1662 }
1663 
1664 
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1665 void VisitWord32Compare(InstructionSelector* selector, Node* node,
1666                         FlagsContinuation* cont) {
1667   Int32BinopMatcher m(node);
1668   ArchOpcode opcode = kArm64Cmp32;
1669 
1670   // Select negated compare for comparisons with negated right input.
1671   if (m.right().IsInt32Sub()) {
1672     Node* sub = m.right().node();
1673     Int32BinopMatcher msub(sub);
1674     if (msub.left().Is(0)) {
1675       bool can_cover = selector->CanCover(node, sub);
1676       node->ReplaceInput(1, msub.right().node());
1677       // Even if the comparison node covers the subtraction, after the input
1678       // replacement above, the node still won't cover the input to the
1679       // subtraction; the subtraction still uses it.
1680       // In order to get shifted operations to work, we must remove the rhs
1681       // input to the subtraction, as TryMatchAnyShift requires this node to
1682       // cover the input shift. We do this by setting it to the lhs input,
1683       // as we know it's zero, and the result of the subtraction isn't used by
1684       // any other node.
1685       if (can_cover) sub->ReplaceInput(1, msub.left().node());
1686       opcode = kArm64Cmn32;
1687     }
1688   }
1689   VisitBinop<Int32BinopMatcher>(selector, node, opcode, kArithmeticImm, cont);
1690 }
1691 
1692 
VisitWordTest(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1693 void VisitWordTest(InstructionSelector* selector, Node* node,
1694                    InstructionCode opcode, FlagsContinuation* cont) {
1695   Arm64OperandGenerator g(selector);
1696   VisitCompare(selector, opcode, g.UseRegister(node), g.UseRegister(node),
1697                cont);
1698 }
1699 
1700 
VisitWord32Test(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1701 void VisitWord32Test(InstructionSelector* selector, Node* node,
1702                      FlagsContinuation* cont) {
1703   VisitWordTest(selector, node, kArm64Tst32, cont);
1704 }
1705 
1706 
VisitWord64Test(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1707 void VisitWord64Test(InstructionSelector* selector, Node* node,
1708                      FlagsContinuation* cont) {
1709   VisitWordTest(selector, node, kArm64Tst, cont);
1710 }
1711 
1712 
1713 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1714 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1715                          FlagsContinuation* cont) {
1716   Arm64OperandGenerator g(selector);
1717   Float32BinopMatcher m(node);
1718   if (m.right().Is(0.0f)) {
1719     VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
1720                  g.UseImmediate(m.right().node()), cont);
1721   } else if (m.left().Is(0.0f)) {
1722     cont->Commute();
1723     VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.right().node()),
1724                  g.UseImmediate(m.left().node()), cont);
1725   } else {
1726     VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
1727                  g.UseRegister(m.right().node()), cont);
1728   }
1729 }
1730 
1731 
1732 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1733 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1734                          FlagsContinuation* cont) {
1735   Arm64OperandGenerator g(selector);
1736   Float64BinopMatcher m(node);
1737   if (m.right().Is(0.0)) {
1738     VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
1739                  g.UseImmediate(m.right().node()), cont);
1740   } else if (m.left().Is(0.0)) {
1741     cont->Commute();
1742     VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.right().node()),
1743                  g.UseImmediate(m.left().node()), cont);
1744   } else {
1745     VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
1746                  g.UseRegister(m.right().node()), cont);
1747   }
1748 }
1749 
1750 }  // namespace
1751 
1752 
VisitBranch(Node * branch,BasicBlock * tbranch,BasicBlock * fbranch)1753 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1754                                       BasicBlock* fbranch) {
1755   OperandGenerator g(this);
1756   Node* user = branch;
1757   Node* value = branch->InputAt(0);
1758 
1759   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1760 
1761   // Try to combine with comparisons against 0 by simply inverting the branch.
1762   while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
1763     Int32BinopMatcher m(value);
1764     if (m.right().Is(0)) {
1765       user = value;
1766       value = m.left().node();
1767       cont.Negate();
1768     } else {
1769       break;
1770     }
1771   }
1772 
1773   // Try to combine the branch with a comparison.
1774   if (CanCover(user, value)) {
1775     switch (value->opcode()) {
1776       case IrOpcode::kWord32Equal:
1777         cont.OverwriteAndNegateIfEqual(kEqual);
1778         return VisitWord32Compare(this, value, &cont);
1779       case IrOpcode::kInt32LessThan:
1780         cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1781         return VisitWord32Compare(this, value, &cont);
1782       case IrOpcode::kInt32LessThanOrEqual:
1783         cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1784         return VisitWord32Compare(this, value, &cont);
1785       case IrOpcode::kUint32LessThan:
1786         cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1787         return VisitWord32Compare(this, value, &cont);
1788       case IrOpcode::kUint32LessThanOrEqual:
1789         cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1790         return VisitWord32Compare(this, value, &cont);
1791       case IrOpcode::kWord64Equal:
1792         cont.OverwriteAndNegateIfEqual(kEqual);
1793         return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1794                                 kArithmeticImm);
1795       case IrOpcode::kInt64LessThan:
1796         cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1797         return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1798                                 kArithmeticImm);
1799       case IrOpcode::kInt64LessThanOrEqual:
1800         cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1801         return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1802                                 kArithmeticImm);
1803       case IrOpcode::kUint64LessThan:
1804         cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1805         return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1806                                 kArithmeticImm);
1807       case IrOpcode::kUint64LessThanOrEqual:
1808         cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1809         return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1810                                 kArithmeticImm);
1811       case IrOpcode::kFloat32Equal:
1812         cont.OverwriteAndNegateIfEqual(kEqual);
1813         return VisitFloat32Compare(this, value, &cont);
1814       case IrOpcode::kFloat32LessThan:
1815         cont.OverwriteAndNegateIfEqual(kFloatLessThan);
1816         return VisitFloat32Compare(this, value, &cont);
1817       case IrOpcode::kFloat32LessThanOrEqual:
1818         cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
1819         return VisitFloat32Compare(this, value, &cont);
1820       case IrOpcode::kFloat64Equal:
1821         cont.OverwriteAndNegateIfEqual(kEqual);
1822         return VisitFloat64Compare(this, value, &cont);
1823       case IrOpcode::kFloat64LessThan:
1824         cont.OverwriteAndNegateIfEqual(kFloatLessThan);
1825         return VisitFloat64Compare(this, value, &cont);
1826       case IrOpcode::kFloat64LessThanOrEqual:
1827         cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
1828         return VisitFloat64Compare(this, value, &cont);
1829       case IrOpcode::kProjection:
1830         // Check if this is the overflow output projection of an
1831         // <Operation>WithOverflow node.
1832         if (ProjectionIndexOf(value->op()) == 1u) {
1833           // We cannot combine the <Operation>WithOverflow with this branch
1834           // unless the 0th projection (the use of the actual value of the
1835           // <Operation> is either nullptr, which means there's no use of the
1836           // actual value, or was already defined, which means it is scheduled
1837           // *AFTER* this branch).
1838           Node* const node = value->InputAt(0);
1839           Node* const result = NodeProperties::FindProjection(node, 0);
1840           if (result == nullptr || IsDefined(result)) {
1841             switch (node->opcode()) {
1842               case IrOpcode::kInt32AddWithOverflow:
1843                 cont.OverwriteAndNegateIfEqual(kOverflow);
1844                 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
1845                                                      kArithmeticImm, &cont);
1846               case IrOpcode::kInt32SubWithOverflow:
1847                 cont.OverwriteAndNegateIfEqual(kOverflow);
1848                 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
1849                                                      kArithmeticImm, &cont);
1850               case IrOpcode::kInt64AddWithOverflow:
1851                 cont.OverwriteAndNegateIfEqual(kOverflow);
1852                 return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add,
1853                                                      kArithmeticImm, &cont);
1854               case IrOpcode::kInt64SubWithOverflow:
1855                 cont.OverwriteAndNegateIfEqual(kOverflow);
1856                 return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub,
1857                                                      kArithmeticImm, &cont);
1858               default:
1859                 break;
1860             }
1861           }
1862         }
1863         break;
1864       case IrOpcode::kInt32Add:
1865         return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
1866                                 kArithmeticImm);
1867       case IrOpcode::kInt32Sub:
1868         return VisitWord32Compare(this, value, &cont);
1869       case IrOpcode::kWord32And: {
1870         Int32BinopMatcher m(value);
1871         if (m.right().HasValue() &&
1872             (base::bits::CountPopulation32(m.right().Value()) == 1)) {
1873           // If the mask has only one bit set, we can use tbz/tbnz.
1874           DCHECK((cont.condition() == kEqual) ||
1875                  (cont.condition() == kNotEqual));
1876           Emit(cont.Encode(kArm64TestAndBranch32), g.NoOutput(),
1877                g.UseRegister(m.left().node()),
1878                g.TempImmediate(
1879                    base::bits::CountTrailingZeros32(m.right().Value())),
1880                g.Label(cont.true_block()), g.Label(cont.false_block()));
1881           return;
1882         }
1883         return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
1884                                 kLogical32Imm);
1885       }
1886       case IrOpcode::kWord64And: {
1887         Int64BinopMatcher m(value);
1888         if (m.right().HasValue() &&
1889             (base::bits::CountPopulation64(m.right().Value()) == 1)) {
1890           // If the mask has only one bit set, we can use tbz/tbnz.
1891           DCHECK((cont.condition() == kEqual) ||
1892                  (cont.condition() == kNotEqual));
1893           Emit(cont.Encode(kArm64TestAndBranch), g.NoOutput(),
1894                g.UseRegister(m.left().node()),
1895                g.TempImmediate(
1896                    base::bits::CountTrailingZeros64(m.right().Value())),
1897                g.Label(cont.true_block()), g.Label(cont.false_block()));
1898           return;
1899         }
1900         return VisitWordCompare(this, value, kArm64Tst, &cont, true,
1901                                 kLogical64Imm);
1902       }
1903       default:
1904         break;
1905     }
1906   }
1907 
1908   // Branch could not be combined with a compare, compare against 0 and branch.
1909   Emit(cont.Encode(kArm64CompareAndBranch32), g.NoOutput(),
1910        g.UseRegister(value), g.Label(cont.true_block()),
1911        g.Label(cont.false_block()));
1912 }
1913 
1914 
VisitSwitch(Node * node,const SwitchInfo & sw)1915 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1916   Arm64OperandGenerator g(this);
1917   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1918 
1919   // Emit either ArchTableSwitch or ArchLookupSwitch.
1920   size_t table_space_cost = 4 + sw.value_range;
1921   size_t table_time_cost = 3;
1922   size_t lookup_space_cost = 3 + 2 * sw.case_count;
1923   size_t lookup_time_cost = sw.case_count;
1924   if (sw.case_count > 0 &&
1925       table_space_cost + 3 * table_time_cost <=
1926           lookup_space_cost + 3 * lookup_time_cost &&
1927       sw.min_value > std::numeric_limits<int32_t>::min()) {
1928     InstructionOperand index_operand = value_operand;
1929     if (sw.min_value) {
1930       index_operand = g.TempRegister();
1931       Emit(kArm64Sub32, index_operand, value_operand,
1932            g.TempImmediate(sw.min_value));
1933     }
1934     // Generate a table lookup.
1935     return EmitTableSwitch(sw, index_operand);
1936   }
1937 
1938   // Generate a sequence of conditional jumps.
1939   return EmitLookupSwitch(sw, value_operand);
1940 }
1941 
1942 
VisitWord32Equal(Node * const node)1943 void InstructionSelector::VisitWord32Equal(Node* const node) {
1944   Node* const user = node;
1945   FlagsContinuation cont(kEqual, node);
1946   Int32BinopMatcher m(user);
1947   if (m.right().Is(0)) {
1948     Node* const value = m.left().node();
1949     if (CanCover(user, value)) {
1950       switch (value->opcode()) {
1951         case IrOpcode::kInt32Add:
1952           return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
1953                                   kArithmeticImm);
1954         case IrOpcode::kInt32Sub:
1955           return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
1956                                   kArithmeticImm);
1957         case IrOpcode::kWord32And:
1958           return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
1959                                   kLogical32Imm);
1960         case IrOpcode::kWord32Equal: {
1961           // Word32Equal(Word32Equal(x, y), 0) => Word32Compare(x, y, ne).
1962           Int32BinopMatcher mequal(value);
1963           node->ReplaceInput(0, mequal.left().node());
1964           node->ReplaceInput(1, mequal.right().node());
1965           cont.Negate();
1966           return VisitWord32Compare(this, node, &cont);
1967         }
1968         default:
1969           break;
1970       }
1971       return VisitWord32Test(this, value, &cont);
1972     }
1973   }
1974   VisitWord32Compare(this, node, &cont);
1975 }
1976 
1977 
VisitInt32LessThan(Node * node)1978 void InstructionSelector::VisitInt32LessThan(Node* node) {
1979   FlagsContinuation cont(kSignedLessThan, node);
1980   VisitWord32Compare(this, node, &cont);
1981 }
1982 
1983 
VisitInt32LessThanOrEqual(Node * node)1984 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1985   FlagsContinuation cont(kSignedLessThanOrEqual, node);
1986   VisitWord32Compare(this, node, &cont);
1987 }
1988 
1989 
VisitUint32LessThan(Node * node)1990 void InstructionSelector::VisitUint32LessThan(Node* node) {
1991   FlagsContinuation cont(kUnsignedLessThan, node);
1992   VisitWord32Compare(this, node, &cont);
1993 }
1994 
1995 
VisitUint32LessThanOrEqual(Node * node)1996 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1997   FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1998   VisitWord32Compare(this, node, &cont);
1999 }
2000 
2001 
VisitWord64Equal(Node * const node)2002 void InstructionSelector::VisitWord64Equal(Node* const node) {
2003   Node* const user = node;
2004   FlagsContinuation cont(kEqual, node);
2005   Int64BinopMatcher m(user);
2006   if (m.right().Is(0)) {
2007     Node* const value = m.left().node();
2008     if (CanCover(user, value)) {
2009       switch (value->opcode()) {
2010         case IrOpcode::kWord64And:
2011           return VisitWordCompare(this, value, kArm64Tst, &cont, true,
2012                                   kLogical64Imm);
2013         default:
2014           break;
2015       }
2016       return VisitWord64Test(this, value, &cont);
2017     }
2018   }
2019   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2020 }
2021 
2022 
VisitInt32AddWithOverflow(Node * node)2023 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2024   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2025     FlagsContinuation cont(kOverflow, ovf);
2026     return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
2027                                          kArithmeticImm, &cont);
2028   }
2029   FlagsContinuation cont;
2030   VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, &cont);
2031 }
2032 
2033 
VisitInt32SubWithOverflow(Node * node)2034 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2035   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2036     FlagsContinuation cont(kOverflow, ovf);
2037     return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
2038                                          kArithmeticImm, &cont);
2039   }
2040   FlagsContinuation cont;
2041   VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, &cont);
2042 }
2043 
2044 
VisitInt64AddWithOverflow(Node * node)2045 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2046   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2047     FlagsContinuation cont(kOverflow, ovf);
2048     return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm,
2049                                          &cont);
2050   }
2051   FlagsContinuation cont;
2052   VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm, &cont);
2053 }
2054 
2055 
VisitInt64SubWithOverflow(Node * node)2056 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2057   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2058     FlagsContinuation cont(kOverflow, ovf);
2059     return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm,
2060                                          &cont);
2061   }
2062   FlagsContinuation cont;
2063   VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm, &cont);
2064 }
2065 
2066 
VisitInt64LessThan(Node * node)2067 void InstructionSelector::VisitInt64LessThan(Node* node) {
2068   FlagsContinuation cont(kSignedLessThan, node);
2069   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2070 }
2071 
2072 
VisitInt64LessThanOrEqual(Node * node)2073 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2074   FlagsContinuation cont(kSignedLessThanOrEqual, node);
2075   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2076 }
2077 
2078 
VisitUint64LessThan(Node * node)2079 void InstructionSelector::VisitUint64LessThan(Node* node) {
2080   FlagsContinuation cont(kUnsignedLessThan, node);
2081   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2082 }
2083 
2084 
VisitUint64LessThanOrEqual(Node * node)2085 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2086   FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
2087   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2088 }
2089 
2090 
VisitFloat32Equal(Node * node)2091 void InstructionSelector::VisitFloat32Equal(Node* node) {
2092   FlagsContinuation cont(kEqual, node);
2093   VisitFloat32Compare(this, node, &cont);
2094 }
2095 
2096 
VisitFloat32LessThan(Node * node)2097 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2098   FlagsContinuation cont(kFloatLessThan, node);
2099   VisitFloat32Compare(this, node, &cont);
2100 }
2101 
2102 
VisitFloat32LessThanOrEqual(Node * node)2103 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2104   FlagsContinuation cont(kFloatLessThanOrEqual, node);
2105   VisitFloat32Compare(this, node, &cont);
2106 }
2107 
2108 
VisitFloat64Equal(Node * node)2109 void InstructionSelector::VisitFloat64Equal(Node* node) {
2110   FlagsContinuation cont(kEqual, node);
2111   VisitFloat64Compare(this, node, &cont);
2112 }
2113 
2114 
VisitFloat64LessThan(Node * node)2115 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2116   FlagsContinuation cont(kFloatLessThan, node);
2117   VisitFloat64Compare(this, node, &cont);
2118 }
2119 
2120 
VisitFloat64LessThanOrEqual(Node * node)2121 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2122   FlagsContinuation cont(kFloatLessThanOrEqual, node);
2123   VisitFloat64Compare(this, node, &cont);
2124 }
2125 
2126 
VisitFloat64ExtractLowWord32(Node * node)2127 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2128   Arm64OperandGenerator g(this);
2129   Emit(kArm64Float64ExtractLowWord32, g.DefineAsRegister(node),
2130        g.UseRegister(node->InputAt(0)));
2131 }
2132 
2133 
VisitFloat64ExtractHighWord32(Node * node)2134 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2135   Arm64OperandGenerator g(this);
2136   Emit(kArm64Float64ExtractHighWord32, g.DefineAsRegister(node),
2137        g.UseRegister(node->InputAt(0)));
2138 }
2139 
2140 
VisitFloat64InsertLowWord32(Node * node)2141 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2142   Arm64OperandGenerator g(this);
2143   Node* left = node->InputAt(0);
2144   Node* right = node->InputAt(1);
2145   if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
2146       CanCover(node, left)) {
2147     Node* right_of_left = left->InputAt(1);
2148     Emit(kArm64Bfi, g.DefineSameAsFirst(right), g.UseRegister(right),
2149          g.UseRegister(right_of_left), g.TempImmediate(32),
2150          g.TempImmediate(32));
2151     Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(right));
2152     return;
2153   }
2154   Emit(kArm64Float64InsertLowWord32, g.DefineAsRegister(node),
2155        g.UseRegister(left), g.UseRegister(right));
2156 }
2157 
2158 
VisitFloat64InsertHighWord32(Node * node)2159 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2160   Arm64OperandGenerator g(this);
2161   Node* left = node->InputAt(0);
2162   Node* right = node->InputAt(1);
2163   if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
2164       CanCover(node, left)) {
2165     Node* right_of_left = left->InputAt(1);
2166     Emit(kArm64Bfi, g.DefineSameAsFirst(left), g.UseRegister(right_of_left),
2167          g.UseRegister(right), g.TempImmediate(32), g.TempImmediate(32));
2168     Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(left));
2169     return;
2170   }
2171   Emit(kArm64Float64InsertHighWord32, g.DefineAsRegister(node),
2172        g.UseRegister(left), g.UseRegister(right));
2173 }
2174 
2175 
2176 // static
2177 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2178 InstructionSelector::SupportedMachineOperatorFlags() {
2179   return MachineOperatorBuilder::kFloat32Max |
2180          MachineOperatorBuilder::kFloat32Min |
2181          MachineOperatorBuilder::kFloat32RoundDown |
2182          MachineOperatorBuilder::kFloat64Max |
2183          MachineOperatorBuilder::kFloat64Min |
2184          MachineOperatorBuilder::kFloat64RoundDown |
2185          MachineOperatorBuilder::kFloat32RoundUp |
2186          MachineOperatorBuilder::kFloat64RoundUp |
2187          MachineOperatorBuilder::kFloat32RoundTruncate |
2188          MachineOperatorBuilder::kFloat64RoundTruncate |
2189          MachineOperatorBuilder::kFloat64RoundTiesAway |
2190          MachineOperatorBuilder::kFloat32RoundTiesEven |
2191          MachineOperatorBuilder::kFloat64RoundTiesEven |
2192          MachineOperatorBuilder::kWord32ShiftIsSafe |
2193          MachineOperatorBuilder::kInt32DivIsSafe |
2194          MachineOperatorBuilder::kUint32DivIsSafe;
2195 }
2196 
2197 }  // namespace compiler
2198 }  // namespace internal
2199 }  // namespace v8
2200