1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler/node-properties.h"
8 
9 namespace v8 {
10 namespace internal {
11 namespace compiler {
12 
13 enum ImmediateMode {
14   kArithmeticImm,  // 12 bit unsigned immediate shifted left 0 or 12 bits
15   kShift32Imm,     // 0 - 31
16   kShift64Imm,     // 0 - 63
17   kLogical32Imm,
18   kLogical64Imm,
19   kLoadStoreImm8,   // signed 8 bit or 12 bit unsigned scaled by access size
20   kLoadStoreImm16,
21   kLoadStoreImm32,
22   kLoadStoreImm64,
23   kNoImmediate
24 };
25 
26 
27 // Adds Arm64-specific methods for generating operands.
28 class Arm64OperandGenerator final : public OperandGenerator {
29  public:
Arm64OperandGenerator(InstructionSelector * selector)30   explicit Arm64OperandGenerator(InstructionSelector* selector)
31       : OperandGenerator(selector) {}
32 
UseOperand(Node * node,ImmediateMode mode)33   InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
34     if (CanBeImmediate(node, mode)) {
35       return UseImmediate(node);
36     }
37     return UseRegister(node);
38   }
39 
40   // Use the zero register if the node has the immediate value zero, otherwise
41   // assign a register.
UseRegisterOrImmediateZero(Node * node)42   InstructionOperand UseRegisterOrImmediateZero(Node* node) {
43     if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
44         (IsFloatConstant(node) &&
45          (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
46       return UseImmediate(node);
47     }
48     return UseRegister(node);
49   }
50 
51   // Use the provided node if it has the required value, or create a
52   // TempImmediate otherwise.
UseImmediateOrTemp(Node * node,int32_t value)53   InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
54     if (GetIntegerConstantValue(node) == value) {
55       return UseImmediate(node);
56     }
57     return TempImmediate(value);
58   }
59 
IsIntegerConstant(Node * node)60   bool IsIntegerConstant(Node* node) {
61     return (node->opcode() == IrOpcode::kInt32Constant) ||
62            (node->opcode() == IrOpcode::kInt64Constant);
63   }
64 
GetIntegerConstantValue(Node * node)65   int64_t GetIntegerConstantValue(Node* node) {
66     if (node->opcode() == IrOpcode::kInt32Constant) {
67       return OpParameter<int32_t>(node);
68     }
69     DCHECK(node->opcode() == IrOpcode::kInt64Constant);
70     return OpParameter<int64_t>(node);
71   }
72 
IsFloatConstant(Node * node)73   bool IsFloatConstant(Node* node) {
74     return (node->opcode() == IrOpcode::kFloat32Constant) ||
75            (node->opcode() == IrOpcode::kFloat64Constant);
76   }
77 
GetFloatConstantValue(Node * node)78   double GetFloatConstantValue(Node* node) {
79     if (node->opcode() == IrOpcode::kFloat32Constant) {
80       return OpParameter<float>(node);
81     }
82     DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
83     return OpParameter<double>(node);
84   }
85 
CanBeImmediate(Node * node,ImmediateMode mode)86   bool CanBeImmediate(Node* node, ImmediateMode mode) {
87     return IsIntegerConstant(node) &&
88            CanBeImmediate(GetIntegerConstantValue(node), mode);
89   }
90 
CanBeImmediate(int64_t value,ImmediateMode mode)91   bool CanBeImmediate(int64_t value, ImmediateMode mode) {
92     unsigned ignored;
93     switch (mode) {
94       case kLogical32Imm:
95         // TODO(dcarney): some unencodable values can be handled by
96         // switching instructions.
97         return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
98                                        &ignored, &ignored, &ignored);
99       case kLogical64Imm:
100         return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
101                                        &ignored, &ignored, &ignored);
102       case kArithmeticImm:
103         return Assembler::IsImmAddSub(value);
104       case kLoadStoreImm8:
105         return IsLoadStoreImmediate(value, LSByte);
106       case kLoadStoreImm16:
107         return IsLoadStoreImmediate(value, LSHalfword);
108       case kLoadStoreImm32:
109         return IsLoadStoreImmediate(value, LSWord);
110       case kLoadStoreImm64:
111         return IsLoadStoreImmediate(value, LSDoubleWord);
112       case kNoImmediate:
113         return false;
114       case kShift32Imm:  // Fall through.
115       case kShift64Imm:
116         // Shift operations only observe the bottom 5 or 6 bits of the value.
117         // All possible shifts can be encoded by discarding bits which have no
118         // effect.
119         return true;
120     }
121     return false;
122   }
123 
CanBeLoadStoreShiftImmediate(Node * node,MachineRepresentation rep)124   bool CanBeLoadStoreShiftImmediate(Node* node, MachineRepresentation rep) {
125     // TODO(arm64): Load and Store on 128 bit Q registers is not supported yet.
126     DCHECK_NE(MachineRepresentation::kSimd128, rep);
127     return IsIntegerConstant(node) &&
128            (GetIntegerConstantValue(node) == ElementSizeLog2Of(rep));
129   }
130 
131  private:
IsLoadStoreImmediate(int64_t value,LSDataSize size)132   bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
133     return Assembler::IsImmLSScaled(value, size) ||
134            Assembler::IsImmLSUnscaled(value);
135   }
136 };
137 
138 
139 namespace {
140 
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)141 void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
142   Arm64OperandGenerator g(selector);
143   selector->Emit(opcode, g.DefineAsRegister(node),
144                  g.UseRegister(node->InputAt(0)));
145 }
146 
147 
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)148 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
149   Arm64OperandGenerator g(selector);
150   selector->Emit(opcode, g.DefineAsRegister(node),
151                  g.UseRegister(node->InputAt(0)),
152                  g.UseRegister(node->InputAt(1)));
153 }
154 
155 
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node,ImmediateMode operand_mode)156 void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
157               ImmediateMode operand_mode) {
158   Arm64OperandGenerator g(selector);
159   selector->Emit(opcode, g.DefineAsRegister(node),
160                  g.UseRegister(node->InputAt(0)),
161                  g.UseOperand(node->InputAt(1), operand_mode));
162 }
163 
164 struct ExtendingLoadMatcher {
ExtendingLoadMatcherv8::internal::compiler::__anon47426b030111::ExtendingLoadMatcher165   ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
166       : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
167     Initialize(node);
168   }
169 
Matchesv8::internal::compiler::__anon47426b030111::ExtendingLoadMatcher170   bool Matches() const { return matches_; }
171 
basev8::internal::compiler::__anon47426b030111::ExtendingLoadMatcher172   Node* base() const {
173     DCHECK(Matches());
174     return base_;
175   }
immediatev8::internal::compiler::__anon47426b030111::ExtendingLoadMatcher176   int64_t immediate() const {
177     DCHECK(Matches());
178     return immediate_;
179   }
opcodev8::internal::compiler::__anon47426b030111::ExtendingLoadMatcher180   ArchOpcode opcode() const {
181     DCHECK(Matches());
182     return opcode_;
183   }
184 
185  private:
186   bool matches_;
187   InstructionSelector* selector_;
188   Node* base_;
189   int64_t immediate_;
190   ArchOpcode opcode_;
191 
Initializev8::internal::compiler::__anon47426b030111::ExtendingLoadMatcher192   void Initialize(Node* node) {
193     Int64BinopMatcher m(node);
194     // When loading a 64-bit value and shifting by 32, we should
195     // just load and sign-extend the interesting 4 bytes instead.
196     // This happens, for example, when we're loading and untagging SMIs.
197     DCHECK(m.IsWord64Sar());
198     if (m.left().IsLoad() && m.right().Is(32) &&
199         selector_->CanCover(m.node(), m.left().node())) {
200       Arm64OperandGenerator g(selector_);
201       Node* load = m.left().node();
202       Node* offset = load->InputAt(1);
203       base_ = load->InputAt(0);
204       opcode_ = kArm64Ldrsw;
205       if (g.IsIntegerConstant(offset)) {
206         immediate_ = g.GetIntegerConstantValue(offset) + 4;
207         matches_ = g.CanBeImmediate(immediate_, kLoadStoreImm32);
208       }
209     }
210   }
211 };
212 
TryMatchExtendingLoad(InstructionSelector * selector,Node * node)213 bool TryMatchExtendingLoad(InstructionSelector* selector, Node* node) {
214   ExtendingLoadMatcher m(node, selector);
215   return m.Matches();
216 }
217 
TryEmitExtendingLoad(InstructionSelector * selector,Node * node)218 bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
219   ExtendingLoadMatcher m(node, selector);
220   Arm64OperandGenerator g(selector);
221   if (m.Matches()) {
222     InstructionOperand inputs[2];
223     inputs[0] = g.UseRegister(m.base());
224     InstructionCode opcode =
225         m.opcode() | AddressingModeField::encode(kMode_MRI);
226     DCHECK(is_int32(m.immediate()));
227     inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
228     InstructionOperand outputs[] = {g.DefineAsRegister(node)};
229     selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
230                    inputs);
231     return true;
232   }
233   return false;
234 }
235 
TryMatchAnyShift(InstructionSelector * selector,Node * node,Node * input_node,InstructionCode * opcode,bool try_ror)236 bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
237                       Node* input_node, InstructionCode* opcode, bool try_ror) {
238   Arm64OperandGenerator g(selector);
239 
240   if (!selector->CanCover(node, input_node)) return false;
241   if (input_node->InputCount() != 2) return false;
242   if (!g.IsIntegerConstant(input_node->InputAt(1))) return false;
243 
244   switch (input_node->opcode()) {
245     case IrOpcode::kWord32Shl:
246     case IrOpcode::kWord64Shl:
247       *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
248       return true;
249     case IrOpcode::kWord32Shr:
250     case IrOpcode::kWord64Shr:
251       *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
252       return true;
253     case IrOpcode::kWord32Sar:
254       *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
255       return true;
256     case IrOpcode::kWord64Sar:
257       if (TryMatchExtendingLoad(selector, input_node)) return false;
258       *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
259       return true;
260     case IrOpcode::kWord32Ror:
261     case IrOpcode::kWord64Ror:
262       if (try_ror) {
263         *opcode |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
264         return true;
265       }
266       return false;
267     default:
268       return false;
269   }
270 }
271 
272 
TryMatchAnyExtend(Arm64OperandGenerator * g,InstructionSelector * selector,Node * node,Node * left_node,Node * right_node,InstructionOperand * left_op,InstructionOperand * right_op,InstructionCode * opcode)273 bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
274                        Node* node, Node* left_node, Node* right_node,
275                        InstructionOperand* left_op,
276                        InstructionOperand* right_op, InstructionCode* opcode) {
277   if (!selector->CanCover(node, right_node)) return false;
278 
279   NodeMatcher nm(right_node);
280 
281   if (nm.IsWord32And()) {
282     Int32BinopMatcher mright(right_node);
283     if (mright.right().Is(0xff) || mright.right().Is(0xffff)) {
284       int32_t mask = mright.right().Value();
285       *left_op = g->UseRegister(left_node);
286       *right_op = g->UseRegister(mright.left().node());
287       *opcode |= AddressingModeField::encode(
288           (mask == 0xff) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
289       return true;
290     }
291   } else if (nm.IsWord32Sar()) {
292     Int32BinopMatcher mright(right_node);
293     if (selector->CanCover(mright.node(), mright.left().node()) &&
294         mright.left().IsWord32Shl()) {
295       Int32BinopMatcher mleft_of_right(mright.left().node());
296       if ((mright.right().Is(16) && mleft_of_right.right().Is(16)) ||
297           (mright.right().Is(24) && mleft_of_right.right().Is(24))) {
298         int32_t shift = mright.right().Value();
299         *left_op = g->UseRegister(left_node);
300         *right_op = g->UseRegister(mleft_of_right.left().node());
301         *opcode |= AddressingModeField::encode(
302             (shift == 24) ? kMode_Operand2_R_SXTB : kMode_Operand2_R_SXTH);
303         return true;
304       }
305     }
306   }
307   return false;
308 }
309 
TryMatchLoadStoreShift(Arm64OperandGenerator * g,InstructionSelector * selector,MachineRepresentation rep,Node * node,Node * index,InstructionOperand * index_op,InstructionOperand * shift_immediate_op)310 bool TryMatchLoadStoreShift(Arm64OperandGenerator* g,
311                             InstructionSelector* selector,
312                             MachineRepresentation rep, Node* node, Node* index,
313                             InstructionOperand* index_op,
314                             InstructionOperand* shift_immediate_op) {
315   if (!selector->CanCover(node, index)) return false;
316   if (index->InputCount() != 2) return false;
317   Node* left = index->InputAt(0);
318   Node* right = index->InputAt(1);
319   switch (index->opcode()) {
320     case IrOpcode::kWord32Shl:
321     case IrOpcode::kWord64Shl:
322       if (!g->CanBeLoadStoreShiftImmediate(right, rep)) {
323         return false;
324       }
325       *index_op = g->UseRegister(left);
326       *shift_immediate_op = g->UseImmediate(right);
327       return true;
328     default:
329       return false;
330   }
331 }
332 
333 // Bitfields describing binary operator properties:
334 // CanCommuteField is true if we can switch the two operands, potentially
335 // requiring commuting the flags continuation condition.
336 typedef BitField8<bool, 1, 1> CanCommuteField;
337 // MustCommuteCondField is true when we need to commute the flags continuation
338 // condition in order to switch the operands.
339 typedef BitField8<bool, 2, 1> MustCommuteCondField;
340 // IsComparisonField is true when the operation is a comparison and has no other
341 // result other than the condition.
342 typedef BitField8<bool, 3, 1> IsComparisonField;
343 // IsAddSubField is true when an instruction is encoded as ADD or SUB.
344 typedef BitField8<bool, 4, 1> IsAddSubField;
345 
346 // Get properties of a binary operator.
GetBinopProperties(InstructionCode opcode)347 uint8_t GetBinopProperties(InstructionCode opcode) {
348   uint8_t result = 0;
349   switch (opcode) {
350     case kArm64Cmp32:
351     case kArm64Cmp:
352       // We can commute CMP by switching the inputs and commuting
353       // the flags continuation.
354       result = CanCommuteField::update(result, true);
355       result = MustCommuteCondField::update(result, true);
356       result = IsComparisonField::update(result, true);
357       // The CMP and CMN instructions are encoded as SUB or ADD
358       // with zero output register, and therefore support the same
359       // operand modes.
360       result = IsAddSubField::update(result, true);
361       break;
362     case kArm64Cmn32:
363     case kArm64Cmn:
364       result = CanCommuteField::update(result, true);
365       result = IsComparisonField::update(result, true);
366       result = IsAddSubField::update(result, true);
367       break;
368     case kArm64Add32:
369     case kArm64Add:
370       result = CanCommuteField::update(result, true);
371       result = IsAddSubField::update(result, true);
372       break;
373     case kArm64Sub32:
374     case kArm64Sub:
375       result = IsAddSubField::update(result, true);
376       break;
377     case kArm64Tst32:
378     case kArm64Tst:
379       result = CanCommuteField::update(result, true);
380       result = IsComparisonField::update(result, true);
381       break;
382     case kArm64And32:
383     case kArm64And:
384     case kArm64Or32:
385     case kArm64Or:
386     case kArm64Eor32:
387     case kArm64Eor:
388       result = CanCommuteField::update(result, true);
389       break;
390     default:
391       UNREACHABLE();
392       return 0;
393   }
394   DCHECK_IMPLIES(MustCommuteCondField::decode(result),
395                  CanCommuteField::decode(result));
396   return result;
397 }
398 
399 // Shared routine for multiple binary operations.
400 template <typename Matcher>
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,ImmediateMode operand_mode,FlagsContinuation * cont)401 void VisitBinop(InstructionSelector* selector, Node* node,
402                 InstructionCode opcode, ImmediateMode operand_mode,
403                 FlagsContinuation* cont) {
404   Arm64OperandGenerator g(selector);
405   InstructionOperand inputs[5];
406   size_t input_count = 0;
407   InstructionOperand outputs[2];
408   size_t output_count = 0;
409 
410   Node* left_node = node->InputAt(0);
411   Node* right_node = node->InputAt(1);
412 
413   uint8_t properties = GetBinopProperties(opcode);
414   bool can_commute = CanCommuteField::decode(properties);
415   bool must_commute_cond = MustCommuteCondField::decode(properties);
416   bool is_add_sub = IsAddSubField::decode(properties);
417 
418   if (g.CanBeImmediate(right_node, operand_mode)) {
419     inputs[input_count++] = g.UseRegister(left_node);
420     inputs[input_count++] = g.UseImmediate(right_node);
421   } else if (can_commute && g.CanBeImmediate(left_node, operand_mode)) {
422     if (must_commute_cond) cont->Commute();
423     inputs[input_count++] = g.UseRegister(right_node);
424     inputs[input_count++] = g.UseImmediate(left_node);
425   } else if (is_add_sub &&
426              TryMatchAnyExtend(&g, selector, node, left_node, right_node,
427                                &inputs[0], &inputs[1], &opcode)) {
428     input_count += 2;
429   } else if (is_add_sub && can_commute &&
430              TryMatchAnyExtend(&g, selector, node, right_node, left_node,
431                                &inputs[0], &inputs[1], &opcode)) {
432     if (must_commute_cond) cont->Commute();
433     input_count += 2;
434   } else if (TryMatchAnyShift(selector, node, right_node, &opcode,
435                               !is_add_sub)) {
436     Matcher m_shift(right_node);
437     inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
438     inputs[input_count++] = g.UseRegister(m_shift.left().node());
439     inputs[input_count++] = g.UseImmediate(m_shift.right().node());
440   } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
441                                              !is_add_sub)) {
442     if (must_commute_cond) cont->Commute();
443     Matcher m_shift(left_node);
444     inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
445     inputs[input_count++] = g.UseRegister(m_shift.left().node());
446     inputs[input_count++] = g.UseImmediate(m_shift.right().node());
447   } else {
448     inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
449     inputs[input_count++] = g.UseRegister(right_node);
450   }
451 
452   if (cont->IsBranch()) {
453     inputs[input_count++] = g.Label(cont->true_block());
454     inputs[input_count++] = g.Label(cont->false_block());
455   }
456 
457   if (!IsComparisonField::decode(properties)) {
458     outputs[output_count++] = g.DefineAsRegister(node);
459   }
460 
461   if (cont->IsSet()) {
462     outputs[output_count++] = g.DefineAsRegister(cont->result());
463   }
464 
465   DCHECK_NE(0u, input_count);
466   DCHECK((output_count != 0) || IsComparisonField::decode(properties));
467   DCHECK_GE(arraysize(inputs), input_count);
468   DCHECK_GE(arraysize(outputs), output_count);
469 
470   opcode = cont->Encode(opcode);
471   if (cont->IsDeoptimize()) {
472     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
473                              cont->reason(), cont->frame_state());
474   } else {
475     selector->Emit(opcode, output_count, outputs, input_count, inputs);
476   }
477 }
478 
479 
480 // Shared routine for multiple binary operations.
481 template <typename Matcher>
VisitBinop(InstructionSelector * selector,Node * node,ArchOpcode opcode,ImmediateMode operand_mode)482 void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
483                 ImmediateMode operand_mode) {
484   FlagsContinuation cont;
485   VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
486 }
487 
488 
489 template <typename Matcher>
VisitAddSub(InstructionSelector * selector,Node * node,ArchOpcode opcode,ArchOpcode negate_opcode)490 void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
491                  ArchOpcode negate_opcode) {
492   Arm64OperandGenerator g(selector);
493   Matcher m(node);
494   if (m.right().HasValue() && (m.right().Value() < 0) &&
495       g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
496     selector->Emit(negate_opcode, g.DefineAsRegister(node),
497                    g.UseRegister(m.left().node()),
498                    g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
499   } else {
500     VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
501   }
502 }
503 
504 
505 // For multiplications by immediate of the form x * (2^k + 1), where k > 0,
506 // return the value of k, otherwise return zero. This is used to reduce the
507 // multiplication to addition with left shift: x + (x << k).
508 template <typename Matcher>
LeftShiftForReducedMultiply(Matcher * m)509 int32_t LeftShiftForReducedMultiply(Matcher* m) {
510   DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
511   if (m->right().HasValue() && m->right().Value() >= 3) {
512     uint64_t value_minus_one = m->right().Value() - 1;
513     if (base::bits::IsPowerOfTwo64(value_minus_one)) {
514       return WhichPowerOf2_64(value_minus_one);
515     }
516   }
517   return 0;
518 }
519 
520 }  // namespace
521 
EmitLoad(InstructionSelector * selector,Node * node,InstructionCode opcode,ImmediateMode immediate_mode,MachineRepresentation rep,Node * output=nullptr)522 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
523               ImmediateMode immediate_mode, MachineRepresentation rep,
524               Node* output = nullptr) {
525   Arm64OperandGenerator g(selector);
526   Node* base = node->InputAt(0);
527   Node* index = node->InputAt(1);
528   InstructionOperand inputs[3];
529   size_t input_count = 0;
530   InstructionOperand outputs[1];
531 
532   // If output is not nullptr, use that as the output register. This
533   // is used when we merge a conversion into the load.
534   outputs[0] = g.DefineAsRegister(output == nullptr ? node : output);
535   inputs[0] = g.UseRegister(base);
536 
537   if (g.CanBeImmediate(index, immediate_mode)) {
538     input_count = 2;
539     inputs[1] = g.UseImmediate(index);
540     opcode |= AddressingModeField::encode(kMode_MRI);
541   } else if (TryMatchLoadStoreShift(&g, selector, rep, node, index, &inputs[1],
542                                     &inputs[2])) {
543     input_count = 3;
544     opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
545   } else {
546     input_count = 2;
547     inputs[1] = g.UseRegister(index);
548     opcode |= AddressingModeField::encode(kMode_MRR);
549   }
550 
551   selector->Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
552 }
553 
VisitLoad(Node * node)554 void InstructionSelector::VisitLoad(Node* node) {
555   InstructionCode opcode = kArchNop;
556   ImmediateMode immediate_mode = kNoImmediate;
557   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
558   MachineRepresentation rep = load_rep.representation();
559   switch (rep) {
560     case MachineRepresentation::kFloat32:
561       opcode = kArm64LdrS;
562       immediate_mode = kLoadStoreImm32;
563       break;
564     case MachineRepresentation::kFloat64:
565       opcode = kArm64LdrD;
566       immediate_mode = kLoadStoreImm64;
567       break;
568     case MachineRepresentation::kBit:  // Fall through.
569     case MachineRepresentation::kWord8:
570       opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
571       immediate_mode = kLoadStoreImm8;
572       break;
573     case MachineRepresentation::kWord16:
574       opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
575       immediate_mode = kLoadStoreImm16;
576       break;
577     case MachineRepresentation::kWord32:
578       opcode = kArm64LdrW;
579       immediate_mode = kLoadStoreImm32;
580       break;
581     case MachineRepresentation::kTaggedSigned:   // Fall through.
582     case MachineRepresentation::kTaggedPointer:  // Fall through.
583     case MachineRepresentation::kTagged:  // Fall through.
584     case MachineRepresentation::kWord64:
585       opcode = kArm64Ldr;
586       immediate_mode = kLoadStoreImm64;
587       break;
588     case MachineRepresentation::kSimd128:  // Fall through.
589     case MachineRepresentation::kNone:
590       UNREACHABLE();
591       return;
592   }
593   EmitLoad(this, node, opcode, immediate_mode, rep);
594 }
595 
VisitProtectedLoad(Node * node)596 void InstructionSelector::VisitProtectedLoad(Node* node) {
597   // TODO(eholk)
598   UNIMPLEMENTED();
599 }
600 
VisitStore(Node * node)601 void InstructionSelector::VisitStore(Node* node) {
602   Arm64OperandGenerator g(this);
603   Node* base = node->InputAt(0);
604   Node* index = node->InputAt(1);
605   Node* value = node->InputAt(2);
606 
607   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
608   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
609   MachineRepresentation rep = store_rep.representation();
610 
611   // TODO(arm64): I guess this could be done in a better way.
612   if (write_barrier_kind != kNoWriteBarrier) {
613     DCHECK(CanBeTaggedPointer(rep));
614     AddressingMode addressing_mode;
615     InstructionOperand inputs[3];
616     size_t input_count = 0;
617     inputs[input_count++] = g.UseUniqueRegister(base);
618     // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
619     // must check kArithmeticImm as well as kLoadStoreImm64.
620     if (g.CanBeImmediate(index, kArithmeticImm) &&
621         g.CanBeImmediate(index, kLoadStoreImm64)) {
622       inputs[input_count++] = g.UseImmediate(index);
623       addressing_mode = kMode_MRI;
624     } else {
625       inputs[input_count++] = g.UseUniqueRegister(index);
626       addressing_mode = kMode_MRR;
627     }
628     inputs[input_count++] = g.UseUniqueRegister(value);
629     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
630     switch (write_barrier_kind) {
631       case kNoWriteBarrier:
632         UNREACHABLE();
633         break;
634       case kMapWriteBarrier:
635         record_write_mode = RecordWriteMode::kValueIsMap;
636         break;
637       case kPointerWriteBarrier:
638         record_write_mode = RecordWriteMode::kValueIsPointer;
639         break;
640       case kFullWriteBarrier:
641         record_write_mode = RecordWriteMode::kValueIsAny;
642         break;
643     }
644     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
645     size_t const temp_count = arraysize(temps);
646     InstructionCode code = kArchStoreWithWriteBarrier;
647     code |= AddressingModeField::encode(addressing_mode);
648     code |= MiscField::encode(static_cast<int>(record_write_mode));
649     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
650   } else {
651     InstructionOperand inputs[4];
652     size_t input_count = 0;
653     InstructionCode opcode = kArchNop;
654     ImmediateMode immediate_mode = kNoImmediate;
655     switch (rep) {
656       case MachineRepresentation::kFloat32:
657         opcode = kArm64StrS;
658         immediate_mode = kLoadStoreImm32;
659         break;
660       case MachineRepresentation::kFloat64:
661         opcode = kArm64StrD;
662         immediate_mode = kLoadStoreImm64;
663         break;
664       case MachineRepresentation::kBit:  // Fall through.
665       case MachineRepresentation::kWord8:
666         opcode = kArm64Strb;
667         immediate_mode = kLoadStoreImm8;
668         break;
669       case MachineRepresentation::kWord16:
670         opcode = kArm64Strh;
671         immediate_mode = kLoadStoreImm16;
672         break;
673       case MachineRepresentation::kWord32:
674         opcode = kArm64StrW;
675         immediate_mode = kLoadStoreImm32;
676         break;
677       case MachineRepresentation::kTaggedSigned:   // Fall through.
678       case MachineRepresentation::kTaggedPointer:  // Fall through.
679       case MachineRepresentation::kTagged:  // Fall through.
680       case MachineRepresentation::kWord64:
681         opcode = kArm64Str;
682         immediate_mode = kLoadStoreImm64;
683         break;
684       case MachineRepresentation::kSimd128:  // Fall through.
685       case MachineRepresentation::kNone:
686         UNREACHABLE();
687         return;
688     }
689 
690     inputs[0] = g.UseRegisterOrImmediateZero(value);
691     inputs[1] = g.UseRegister(base);
692 
693     if (g.CanBeImmediate(index, immediate_mode)) {
694       input_count = 3;
695       inputs[2] = g.UseImmediate(index);
696       opcode |= AddressingModeField::encode(kMode_MRI);
697     } else if (TryMatchLoadStoreShift(&g, this, rep, node, index, &inputs[2],
698                                       &inputs[3])) {
699       input_count = 4;
700       opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
701     } else {
702       input_count = 3;
703       inputs[2] = g.UseRegister(index);
704       opcode |= AddressingModeField::encode(kMode_MRR);
705     }
706 
707     Emit(opcode, 0, nullptr, input_count, inputs);
708   }
709 }
710 
711 // Architecture supports unaligned access, therefore VisitLoad is used instead
VisitUnalignedLoad(Node * node)712 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
713 
714 // Architecture supports unaligned access, therefore VisitStore is used instead
VisitUnalignedStore(Node * node)715 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
716 
VisitCheckedLoad(Node * node)717 void InstructionSelector::VisitCheckedLoad(Node* node) {
718   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
719   Arm64OperandGenerator g(this);
720   Node* const buffer = node->InputAt(0);
721   Node* const offset = node->InputAt(1);
722   Node* const length = node->InputAt(2);
723   ArchOpcode opcode = kArchNop;
724   switch (load_rep.representation()) {
725     case MachineRepresentation::kWord8:
726       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
727       break;
728     case MachineRepresentation::kWord16:
729       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
730       break;
731     case MachineRepresentation::kWord32:
732       opcode = kCheckedLoadWord32;
733       break;
734     case MachineRepresentation::kWord64:
735       opcode = kCheckedLoadWord64;
736       break;
737     case MachineRepresentation::kFloat32:
738       opcode = kCheckedLoadFloat32;
739       break;
740     case MachineRepresentation::kFloat64:
741       opcode = kCheckedLoadFloat64;
742       break;
743     case MachineRepresentation::kBit:      // Fall through.
744     case MachineRepresentation::kTaggedSigned:   // Fall through.
745     case MachineRepresentation::kTaggedPointer:  // Fall through.
746     case MachineRepresentation::kTagged:   // Fall through.
747     case MachineRepresentation::kSimd128:  // Fall through.
748     case MachineRepresentation::kNone:
749       UNREACHABLE();
750       return;
751   }
752   // If the length is a constant power of two, allow the code generator to
753   // pick a more efficient bounds check sequence by passing the length as an
754   // immediate.
755   if (length->opcode() == IrOpcode::kInt32Constant) {
756     Int32Matcher m(length);
757     if (m.IsPowerOf2()) {
758       Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
759            g.UseRegister(offset), g.UseImmediate(length));
760       return;
761     }
762   }
763   Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
764        g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
765 }
766 
767 
VisitCheckedStore(Node * node)768 void InstructionSelector::VisitCheckedStore(Node* node) {
769   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
770   Arm64OperandGenerator g(this);
771   Node* const buffer = node->InputAt(0);
772   Node* const offset = node->InputAt(1);
773   Node* const length = node->InputAt(2);
774   Node* const value = node->InputAt(3);
775   ArchOpcode opcode = kArchNop;
776   switch (rep) {
777     case MachineRepresentation::kWord8:
778       opcode = kCheckedStoreWord8;
779       break;
780     case MachineRepresentation::kWord16:
781       opcode = kCheckedStoreWord16;
782       break;
783     case MachineRepresentation::kWord32:
784       opcode = kCheckedStoreWord32;
785       break;
786     case MachineRepresentation::kWord64:
787       opcode = kCheckedStoreWord64;
788       break;
789     case MachineRepresentation::kFloat32:
790       opcode = kCheckedStoreFloat32;
791       break;
792     case MachineRepresentation::kFloat64:
793       opcode = kCheckedStoreFloat64;
794       break;
795     case MachineRepresentation::kBit:      // Fall through.
796     case MachineRepresentation::kTaggedSigned:   // Fall through.
797     case MachineRepresentation::kTaggedPointer:  // Fall through.
798     case MachineRepresentation::kTagged:   // Fall through.
799     case MachineRepresentation::kSimd128:  // Fall through.
800     case MachineRepresentation::kNone:
801       UNREACHABLE();
802       return;
803   }
804   // If the length is a constant power of two, allow the code generator to
805   // pick a more efficient bounds check sequence by passing the length as an
806   // immediate.
807   if (length->opcode() == IrOpcode::kInt32Constant) {
808     Int32Matcher m(length);
809     if (m.IsPowerOf2()) {
810       Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
811            g.UseImmediate(length), g.UseRegisterOrImmediateZero(value));
812       return;
813     }
814   }
815   Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
816        g.UseOperand(length, kArithmeticImm),
817        g.UseRegisterOrImmediateZero(value));
818 }
819 
820 
821 template <typename Matcher>
VisitLogical(InstructionSelector * selector,Node * node,Matcher * m,ArchOpcode opcode,bool left_can_cover,bool right_can_cover,ImmediateMode imm_mode)822 static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
823                          ArchOpcode opcode, bool left_can_cover,
824                          bool right_can_cover, ImmediateMode imm_mode) {
825   Arm64OperandGenerator g(selector);
826 
827   // Map instruction to equivalent operation with inverted right input.
828   ArchOpcode inv_opcode = opcode;
829   switch (opcode) {
830     case kArm64And32:
831       inv_opcode = kArm64Bic32;
832       break;
833     case kArm64And:
834       inv_opcode = kArm64Bic;
835       break;
836     case kArm64Or32:
837       inv_opcode = kArm64Orn32;
838       break;
839     case kArm64Or:
840       inv_opcode = kArm64Orn;
841       break;
842     case kArm64Eor32:
843       inv_opcode = kArm64Eon32;
844       break;
845     case kArm64Eor:
846       inv_opcode = kArm64Eon;
847       break;
848     default:
849       UNREACHABLE();
850   }
851 
852   // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
853   if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
854     Matcher mleft(m->left().node());
855     if (mleft.right().Is(-1)) {
856       // TODO(all): support shifted operand on right.
857       selector->Emit(inv_opcode, g.DefineAsRegister(node),
858                      g.UseRegister(m->right().node()),
859                      g.UseRegister(mleft.left().node()));
860       return;
861     }
862   }
863 
864   // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
865   if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
866       right_can_cover) {
867     Matcher mright(m->right().node());
868     if (mright.right().Is(-1)) {
869       // TODO(all): support shifted operand on right.
870       selector->Emit(inv_opcode, g.DefineAsRegister(node),
871                      g.UseRegister(m->left().node()),
872                      g.UseRegister(mright.left().node()));
873       return;
874     }
875   }
876 
877   if (m->IsWord32Xor() && m->right().Is(-1)) {
878     selector->Emit(kArm64Not32, g.DefineAsRegister(node),
879                    g.UseRegister(m->left().node()));
880   } else if (m->IsWord64Xor() && m->right().Is(-1)) {
881     selector->Emit(kArm64Not, g.DefineAsRegister(node),
882                    g.UseRegister(m->left().node()));
883   } else {
884     VisitBinop<Matcher>(selector, node, opcode, imm_mode);
885   }
886 }
887 
888 
VisitWord32And(Node * node)889 void InstructionSelector::VisitWord32And(Node* node) {
890   Arm64OperandGenerator g(this);
891   Int32BinopMatcher m(node);
892   if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
893       m.right().HasValue()) {
894     uint32_t mask = m.right().Value();
895     uint32_t mask_width = base::bits::CountPopulation32(mask);
896     uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
897     if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
898       // The mask must be contiguous, and occupy the least-significant bits.
899       DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
900 
901       // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
902       // significant bits.
903       Int32BinopMatcher mleft(m.left().node());
904       if (mleft.right().HasValue()) {
905         // Any shift value can match; int32 shifts use `value % 32`.
906         uint32_t lsb = mleft.right().Value() & 0x1f;
907 
908         // Ubfx cannot extract bits past the register size, however since
909         // shifting the original value would have introduced some zeros we can
910         // still use ubfx with a smaller mask and the remaining bits will be
911         // zeros.
912         if (lsb + mask_width > 32) mask_width = 32 - lsb;
913 
914         Emit(kArm64Ubfx32, g.DefineAsRegister(node),
915              g.UseRegister(mleft.left().node()),
916              g.UseImmediateOrTemp(mleft.right().node(), lsb),
917              g.TempImmediate(mask_width));
918         return;
919       }
920       // Other cases fall through to the normal And operation.
921     }
922   }
923   VisitLogical<Int32BinopMatcher>(
924       this, node, &m, kArm64And32, CanCover(node, m.left().node()),
925       CanCover(node, m.right().node()), kLogical32Imm);
926 }
927 
928 
VisitWord64And(Node * node)929 void InstructionSelector::VisitWord64And(Node* node) {
930   Arm64OperandGenerator g(this);
931   Int64BinopMatcher m(node);
932   if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
933       m.right().HasValue()) {
934     uint64_t mask = m.right().Value();
935     uint64_t mask_width = base::bits::CountPopulation64(mask);
936     uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
937     if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
938       // The mask must be contiguous, and occupy the least-significant bits.
939       DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
940 
941       // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
942       // significant bits.
943       Int64BinopMatcher mleft(m.left().node());
944       if (mleft.right().HasValue()) {
945         // Any shift value can match; int64 shifts use `value % 64`.
946         uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
947 
948         // Ubfx cannot extract bits past the register size, however since
949         // shifting the original value would have introduced some zeros we can
950         // still use ubfx with a smaller mask and the remaining bits will be
951         // zeros.
952         if (lsb + mask_width > 64) mask_width = 64 - lsb;
953 
954         Emit(kArm64Ubfx, g.DefineAsRegister(node),
955              g.UseRegister(mleft.left().node()),
956              g.UseImmediateOrTemp(mleft.right().node(), lsb),
957              g.TempImmediate(static_cast<int32_t>(mask_width)));
958         return;
959       }
960       // Other cases fall through to the normal And operation.
961     }
962   }
963   VisitLogical<Int64BinopMatcher>(
964       this, node, &m, kArm64And, CanCover(node, m.left().node()),
965       CanCover(node, m.right().node()), kLogical64Imm);
966 }
967 
968 
VisitWord32Or(Node * node)969 void InstructionSelector::VisitWord32Or(Node* node) {
970   Int32BinopMatcher m(node);
971   VisitLogical<Int32BinopMatcher>(
972       this, node, &m, kArm64Or32, CanCover(node, m.left().node()),
973       CanCover(node, m.right().node()), kLogical32Imm);
974 }
975 
976 
VisitWord64Or(Node * node)977 void InstructionSelector::VisitWord64Or(Node* node) {
978   Int64BinopMatcher m(node);
979   VisitLogical<Int64BinopMatcher>(
980       this, node, &m, kArm64Or, CanCover(node, m.left().node()),
981       CanCover(node, m.right().node()), kLogical64Imm);
982 }
983 
984 
VisitWord32Xor(Node * node)985 void InstructionSelector::VisitWord32Xor(Node* node) {
986   Int32BinopMatcher m(node);
987   VisitLogical<Int32BinopMatcher>(
988       this, node, &m, kArm64Eor32, CanCover(node, m.left().node()),
989       CanCover(node, m.right().node()), kLogical32Imm);
990 }
991 
992 
VisitWord64Xor(Node * node)993 void InstructionSelector::VisitWord64Xor(Node* node) {
994   Int64BinopMatcher m(node);
995   VisitLogical<Int64BinopMatcher>(
996       this, node, &m, kArm64Eor, CanCover(node, m.left().node()),
997       CanCover(node, m.right().node()), kLogical64Imm);
998 }
999 
1000 
VisitWord32Shl(Node * node)1001 void InstructionSelector::VisitWord32Shl(Node* node) {
1002   Int32BinopMatcher m(node);
1003   if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
1004       m.right().IsInRange(1, 31)) {
1005     Arm64OperandGenerator g(this);
1006     Int32BinopMatcher mleft(m.left().node());
1007     if (mleft.right().HasValue()) {
1008       uint32_t mask = mleft.right().Value();
1009       uint32_t mask_width = base::bits::CountPopulation32(mask);
1010       uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
1011       if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
1012         uint32_t shift = m.right().Value();
1013         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
1014         DCHECK_NE(0u, shift);
1015 
1016         if ((shift + mask_width) >= 32) {
1017           // If the mask is contiguous and reaches or extends beyond the top
1018           // bit, only the shift is needed.
1019           Emit(kArm64Lsl32, g.DefineAsRegister(node),
1020                g.UseRegister(mleft.left().node()),
1021                g.UseImmediate(m.right().node()));
1022           return;
1023         } else {
1024           // Select Ubfiz for Shl(And(x, mask), imm) where the mask is
1025           // contiguous, and the shift immediate non-zero.
1026           Emit(kArm64Ubfiz32, g.DefineAsRegister(node),
1027                g.UseRegister(mleft.left().node()),
1028                g.UseImmediate(m.right().node()), g.TempImmediate(mask_width));
1029           return;
1030         }
1031       }
1032     }
1033   }
1034   VisitRRO(this, kArm64Lsl32, node, kShift32Imm);
1035 }
1036 
1037 
VisitWord64Shl(Node * node)1038 void InstructionSelector::VisitWord64Shl(Node* node) {
1039   Arm64OperandGenerator g(this);
1040   Int64BinopMatcher m(node);
1041   if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
1042       m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
1043     // There's no need to sign/zero-extend to 64-bit if we shift out the upper
1044     // 32 bits anyway.
1045     Emit(kArm64Lsl, g.DefineAsRegister(node),
1046          g.UseRegister(m.left().node()->InputAt(0)),
1047          g.UseImmediate(m.right().node()));
1048     return;
1049   }
1050   VisitRRO(this, kArm64Lsl, node, kShift64Imm);
1051 }
1052 
1053 
1054 namespace {
1055 
TryEmitBitfieldExtract32(InstructionSelector * selector,Node * node)1056 bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
1057   Arm64OperandGenerator g(selector);
1058   Int32BinopMatcher m(node);
1059   if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
1060     // Select Ubfx or Sbfx for (x << (K & 0x1f)) OP (K & 0x1f), where
1061     // OP is >>> or >> and (K & 0x1f) != 0.
1062     Int32BinopMatcher mleft(m.left().node());
1063     if (mleft.right().HasValue() && m.right().HasValue() &&
1064         (mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
1065       DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
1066       ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
1067 
1068       int right_val = m.right().Value() & 0x1f;
1069       DCHECK_NE(right_val, 0);
1070 
1071       selector->Emit(opcode, g.DefineAsRegister(node),
1072                      g.UseRegister(mleft.left().node()), g.TempImmediate(0),
1073                      g.TempImmediate(32 - right_val));
1074       return true;
1075     }
1076   }
1077   return false;
1078 }
1079 
1080 }  // namespace
1081 
1082 
VisitWord32Shr(Node * node)1083 void InstructionSelector::VisitWord32Shr(Node* node) {
1084   Int32BinopMatcher m(node);
1085   if (m.left().IsWord32And() && m.right().HasValue()) {
1086     uint32_t lsb = m.right().Value() & 0x1f;
1087     Int32BinopMatcher mleft(m.left().node());
1088     if (mleft.right().HasValue()) {
1089       // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
1090       // shifted into the least-significant bits.
1091       uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
1092       unsigned mask_width = base::bits::CountPopulation32(mask);
1093       unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
1094       if ((mask_msb + mask_width + lsb) == 32) {
1095         Arm64OperandGenerator g(this);
1096         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
1097         Emit(kArm64Ubfx32, g.DefineAsRegister(node),
1098              g.UseRegister(mleft.left().node()),
1099              g.UseImmediateOrTemp(m.right().node(), lsb),
1100              g.TempImmediate(mask_width));
1101         return;
1102       }
1103     }
1104   } else if (TryEmitBitfieldExtract32(this, node)) {
1105     return;
1106   }
1107 
1108   if (m.left().IsUint32MulHigh() && m.right().HasValue() &&
1109       CanCover(node, node->InputAt(0))) {
1110     // Combine this shift with the multiply and shift that would be generated
1111     // by Uint32MulHigh.
1112     Arm64OperandGenerator g(this);
1113     Node* left = m.left().node();
1114     int shift = m.right().Value() & 0x1f;
1115     InstructionOperand const smull_operand = g.TempRegister();
1116     Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
1117          g.UseRegister(left->InputAt(1)));
1118     Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand,
1119          g.TempImmediate(32 + shift));
1120     return;
1121   }
1122 
1123   VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
1124 }
1125 
1126 
VisitWord64Shr(Node * node)1127 void InstructionSelector::VisitWord64Shr(Node* node) {
1128   Int64BinopMatcher m(node);
1129   if (m.left().IsWord64And() && m.right().HasValue()) {
1130     uint32_t lsb = m.right().Value() & 0x3f;
1131     Int64BinopMatcher mleft(m.left().node());
1132     if (mleft.right().HasValue()) {
1133       // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
1134       // shifted into the least-significant bits.
1135       uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
1136       unsigned mask_width = base::bits::CountPopulation64(mask);
1137       unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
1138       if ((mask_msb + mask_width + lsb) == 64) {
1139         Arm64OperandGenerator g(this);
1140         DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
1141         Emit(kArm64Ubfx, g.DefineAsRegister(node),
1142              g.UseRegister(mleft.left().node()),
1143              g.UseImmediateOrTemp(m.right().node(), lsb),
1144              g.TempImmediate(mask_width));
1145         return;
1146       }
1147     }
1148   }
1149   VisitRRO(this, kArm64Lsr, node, kShift64Imm);
1150 }
1151 
1152 
VisitWord32Sar(Node * node)1153 void InstructionSelector::VisitWord32Sar(Node* node) {
1154   if (TryEmitBitfieldExtract32(this, node)) {
1155     return;
1156   }
1157 
1158   Int32BinopMatcher m(node);
1159   if (m.left().IsInt32MulHigh() && m.right().HasValue() &&
1160       CanCover(node, node->InputAt(0))) {
1161     // Combine this shift with the multiply and shift that would be generated
1162     // by Int32MulHigh.
1163     Arm64OperandGenerator g(this);
1164     Node* left = m.left().node();
1165     int shift = m.right().Value() & 0x1f;
1166     InstructionOperand const smull_operand = g.TempRegister();
1167     Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
1168          g.UseRegister(left->InputAt(1)));
1169     Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand,
1170          g.TempImmediate(32 + shift));
1171     return;
1172   }
1173 
1174   if (m.left().IsInt32Add() && m.right().HasValue() &&
1175       CanCover(node, node->InputAt(0))) {
1176     Node* add_node = m.left().node();
1177     Int32BinopMatcher madd_node(add_node);
1178     if (madd_node.left().IsInt32MulHigh() &&
1179         CanCover(add_node, madd_node.left().node())) {
1180       // Combine the shift that would be generated by Int32MulHigh with the add
1181       // on the left of this Sar operation. We do it here, as the result of the
1182       // add potentially has 33 bits, so we have to ensure the result is
1183       // truncated by being the input to this 32-bit Sar operation.
1184       Arm64OperandGenerator g(this);
1185       Node* mul_node = madd_node.left().node();
1186 
1187       InstructionOperand const smull_operand = g.TempRegister();
1188       Emit(kArm64Smull, smull_operand, g.UseRegister(mul_node->InputAt(0)),
1189            g.UseRegister(mul_node->InputAt(1)));
1190 
1191       InstructionOperand const add_operand = g.TempRegister();
1192       Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_ASR_I),
1193            add_operand, g.UseRegister(add_node->InputAt(1)), smull_operand,
1194            g.TempImmediate(32));
1195 
1196       Emit(kArm64Asr32, g.DefineAsRegister(node), add_operand,
1197            g.UseImmediate(node->InputAt(1)));
1198       return;
1199     }
1200   }
1201 
1202   VisitRRO(this, kArm64Asr32, node, kShift32Imm);
1203 }
1204 
1205 
VisitWord64Sar(Node * node)1206 void InstructionSelector::VisitWord64Sar(Node* node) {
1207   if (TryEmitExtendingLoad(this, node)) return;
1208   VisitRRO(this, kArm64Asr, node, kShift64Imm);
1209 }
1210 
1211 
VisitWord32Ror(Node * node)1212 void InstructionSelector::VisitWord32Ror(Node* node) {
1213   VisitRRO(this, kArm64Ror32, node, kShift32Imm);
1214 }
1215 
1216 
VisitWord64Ror(Node * node)1217 void InstructionSelector::VisitWord64Ror(Node* node) {
1218   VisitRRO(this, kArm64Ror, node, kShift64Imm);
1219 }
1220 
1221 
VisitWord64Clz(Node * node)1222 void InstructionSelector::VisitWord64Clz(Node* node) {
1223   Arm64OperandGenerator g(this);
1224   Emit(kArm64Clz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1225 }
1226 
1227 
VisitWord32Clz(Node * node)1228 void InstructionSelector::VisitWord32Clz(Node* node) {
1229   Arm64OperandGenerator g(this);
1230   Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1231 }
1232 
1233 
VisitWord32Ctz(Node * node)1234 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
1235 
1236 
VisitWord64Ctz(Node * node)1237 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
1238 
1239 
VisitWord32ReverseBits(Node * node)1240 void InstructionSelector::VisitWord32ReverseBits(Node* node) {
1241   VisitRR(this, kArm64Rbit32, node);
1242 }
1243 
1244 
VisitWord64ReverseBits(Node * node)1245 void InstructionSelector::VisitWord64ReverseBits(Node* node) {
1246   VisitRR(this, kArm64Rbit, node);
1247 }
1248 
VisitWord64ReverseBytes(Node * node)1249 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
1250 
VisitWord32ReverseBytes(Node * node)1251 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
1252 
VisitWord32Popcnt(Node * node)1253 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
1254 
1255 
VisitWord64Popcnt(Node * node)1256 void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
1257 
1258 
VisitInt32Add(Node * node)1259 void InstructionSelector::VisitInt32Add(Node* node) {
1260   Arm64OperandGenerator g(this);
1261   Int32BinopMatcher m(node);
1262   // Select Madd(x, y, z) for Add(Mul(x, y), z).
1263   if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
1264     Int32BinopMatcher mleft(m.left().node());
1265     // Check multiply can't be later reduced to addition with shift.
1266     if (LeftShiftForReducedMultiply(&mleft) == 0) {
1267       Emit(kArm64Madd32, g.DefineAsRegister(node),
1268            g.UseRegister(mleft.left().node()),
1269            g.UseRegister(mleft.right().node()),
1270            g.UseRegister(m.right().node()));
1271       return;
1272     }
1273   }
1274   // Select Madd(x, y, z) for Add(z, Mul(x, y)).
1275   if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
1276     Int32BinopMatcher mright(m.right().node());
1277     // Check multiply can't be later reduced to addition with shift.
1278     if (LeftShiftForReducedMultiply(&mright) == 0) {
1279       Emit(kArm64Madd32, g.DefineAsRegister(node),
1280            g.UseRegister(mright.left().node()),
1281            g.UseRegister(mright.right().node()),
1282            g.UseRegister(m.left().node()));
1283       return;
1284     }
1285   }
1286   VisitAddSub<Int32BinopMatcher>(this, node, kArm64Add32, kArm64Sub32);
1287 }
1288 
1289 
VisitInt64Add(Node * node)1290 void InstructionSelector::VisitInt64Add(Node* node) {
1291   Arm64OperandGenerator g(this);
1292   Int64BinopMatcher m(node);
1293   // Select Madd(x, y, z) for Add(Mul(x, y), z).
1294   if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
1295     Int64BinopMatcher mleft(m.left().node());
1296     // Check multiply can't be later reduced to addition with shift.
1297     if (LeftShiftForReducedMultiply(&mleft) == 0) {
1298       Emit(kArm64Madd, g.DefineAsRegister(node),
1299            g.UseRegister(mleft.left().node()),
1300            g.UseRegister(mleft.right().node()),
1301            g.UseRegister(m.right().node()));
1302       return;
1303     }
1304   }
1305   // Select Madd(x, y, z) for Add(z, Mul(x, y)).
1306   if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
1307     Int64BinopMatcher mright(m.right().node());
1308     // Check multiply can't be later reduced to addition with shift.
1309     if (LeftShiftForReducedMultiply(&mright) == 0) {
1310       Emit(kArm64Madd, g.DefineAsRegister(node),
1311            g.UseRegister(mright.left().node()),
1312            g.UseRegister(mright.right().node()),
1313            g.UseRegister(m.left().node()));
1314       return;
1315     }
1316   }
1317   VisitAddSub<Int64BinopMatcher>(this, node, kArm64Add, kArm64Sub);
1318 }
1319 
1320 
VisitInt32Sub(Node * node)1321 void InstructionSelector::VisitInt32Sub(Node* node) {
1322   Arm64OperandGenerator g(this);
1323   Int32BinopMatcher m(node);
1324 
1325   // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
1326   if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
1327     Int32BinopMatcher mright(m.right().node());
1328     // Check multiply can't be later reduced to addition with shift.
1329     if (LeftShiftForReducedMultiply(&mright) == 0) {
1330       Emit(kArm64Msub32, g.DefineAsRegister(node),
1331            g.UseRegister(mright.left().node()),
1332            g.UseRegister(mright.right().node()),
1333            g.UseRegister(m.left().node()));
1334       return;
1335     }
1336   }
1337 
1338   VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
1339 }
1340 
1341 
VisitInt64Sub(Node * node)1342 void InstructionSelector::VisitInt64Sub(Node* node) {
1343   Arm64OperandGenerator g(this);
1344   Int64BinopMatcher m(node);
1345 
1346   // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
1347   if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
1348     Int64BinopMatcher mright(m.right().node());
1349     // Check multiply can't be later reduced to addition with shift.
1350     if (LeftShiftForReducedMultiply(&mright) == 0) {
1351       Emit(kArm64Msub, g.DefineAsRegister(node),
1352            g.UseRegister(mright.left().node()),
1353            g.UseRegister(mright.right().node()),
1354            g.UseRegister(m.left().node()));
1355       return;
1356     }
1357   }
1358 
1359   VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
1360 }
1361 
1362 namespace {
1363 
EmitInt32MulWithOverflow(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1364 void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
1365                               FlagsContinuation* cont) {
1366   Arm64OperandGenerator g(selector);
1367   Int32BinopMatcher m(node);
1368   InstructionOperand result = g.DefineAsRegister(node);
1369   InstructionOperand left = g.UseRegister(m.left().node());
1370   InstructionOperand right = g.UseRegister(m.right().node());
1371   selector->Emit(kArm64Smull, result, left, right);
1372 
1373   InstructionCode opcode = cont->Encode(kArm64Cmp) |
1374                            AddressingModeField::encode(kMode_Operand2_R_SXTW);
1375   if (cont->IsBranch()) {
1376     selector->Emit(opcode, g.NoOutput(), result, result,
1377                    g.Label(cont->true_block()), g.Label(cont->false_block()));
1378   } else if (cont->IsDeoptimize()) {
1379     InstructionOperand in[] = {result, result};
1380     selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->reason(),
1381                              cont->frame_state());
1382   } else {
1383     DCHECK(cont->IsSet());
1384     selector->Emit(opcode, g.DefineAsRegister(cont->result()), result, result);
1385   }
1386 }
1387 
1388 }  // namespace
1389 
VisitInt32Mul(Node * node)1390 void InstructionSelector::VisitInt32Mul(Node* node) {
1391   Arm64OperandGenerator g(this);
1392   Int32BinopMatcher m(node);
1393 
1394   // First, try to reduce the multiplication to addition with left shift.
1395   // x * (2^k + 1) -> x + (x << k)
1396   int32_t shift = LeftShiftForReducedMultiply(&m);
1397   if (shift > 0) {
1398     Emit(kArm64Add32 | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1399          g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1400          g.UseRegister(m.left().node()), g.TempImmediate(shift));
1401     return;
1402   }
1403 
1404   if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
1405     Int32BinopMatcher mleft(m.left().node());
1406 
1407     // Select Mneg(x, y) for Mul(Sub(0, x), y).
1408     if (mleft.left().Is(0)) {
1409       Emit(kArm64Mneg32, g.DefineAsRegister(node),
1410            g.UseRegister(mleft.right().node()),
1411            g.UseRegister(m.right().node()));
1412       return;
1413     }
1414   }
1415 
1416   if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
1417     Int32BinopMatcher mright(m.right().node());
1418 
1419     // Select Mneg(x, y) for Mul(x, Sub(0, y)).
1420     if (mright.left().Is(0)) {
1421       Emit(kArm64Mneg32, g.DefineAsRegister(node),
1422            g.UseRegister(m.left().node()),
1423            g.UseRegister(mright.right().node()));
1424       return;
1425     }
1426   }
1427 
1428   VisitRRR(this, kArm64Mul32, node);
1429 }
1430 
1431 
VisitInt64Mul(Node * node)1432 void InstructionSelector::VisitInt64Mul(Node* node) {
1433   Arm64OperandGenerator g(this);
1434   Int64BinopMatcher m(node);
1435 
1436   // First, try to reduce the multiplication to addition with left shift.
1437   // x * (2^k + 1) -> x + (x << k)
1438   int32_t shift = LeftShiftForReducedMultiply(&m);
1439   if (shift > 0) {
1440     Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1441          g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1442          g.UseRegister(m.left().node()), g.TempImmediate(shift));
1443     return;
1444   }
1445 
1446   if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
1447     Int64BinopMatcher mleft(m.left().node());
1448 
1449     // Select Mneg(x, y) for Mul(Sub(0, x), y).
1450     if (mleft.left().Is(0)) {
1451       Emit(kArm64Mneg, g.DefineAsRegister(node),
1452            g.UseRegister(mleft.right().node()),
1453            g.UseRegister(m.right().node()));
1454       return;
1455     }
1456   }
1457 
1458   if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
1459     Int64BinopMatcher mright(m.right().node());
1460 
1461     // Select Mneg(x, y) for Mul(x, Sub(0, y)).
1462     if (mright.left().Is(0)) {
1463       Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1464            g.UseRegister(mright.right().node()));
1465       return;
1466     }
1467   }
1468 
1469   VisitRRR(this, kArm64Mul, node);
1470 }
1471 
VisitInt32MulHigh(Node * node)1472 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1473   Arm64OperandGenerator g(this);
1474   InstructionOperand const smull_operand = g.TempRegister();
1475   Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)),
1476        g.UseRegister(node->InputAt(1)));
1477   Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
1478 }
1479 
1480 
VisitUint32MulHigh(Node * node)1481 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1482   Arm64OperandGenerator g(this);
1483   InstructionOperand const smull_operand = g.TempRegister();
1484   Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
1485        g.UseRegister(node->InputAt(1)));
1486   Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
1487 }
1488 
1489 
VisitInt32Div(Node * node)1490 void InstructionSelector::VisitInt32Div(Node* node) {
1491   VisitRRR(this, kArm64Idiv32, node);
1492 }
1493 
1494 
VisitInt64Div(Node * node)1495 void InstructionSelector::VisitInt64Div(Node* node) {
1496   VisitRRR(this, kArm64Idiv, node);
1497 }
1498 
1499 
VisitUint32Div(Node * node)1500 void InstructionSelector::VisitUint32Div(Node* node) {
1501   VisitRRR(this, kArm64Udiv32, node);
1502 }
1503 
1504 
VisitUint64Div(Node * node)1505 void InstructionSelector::VisitUint64Div(Node* node) {
1506   VisitRRR(this, kArm64Udiv, node);
1507 }
1508 
1509 
VisitInt32Mod(Node * node)1510 void InstructionSelector::VisitInt32Mod(Node* node) {
1511   VisitRRR(this, kArm64Imod32, node);
1512 }
1513 
1514 
VisitInt64Mod(Node * node)1515 void InstructionSelector::VisitInt64Mod(Node* node) {
1516   VisitRRR(this, kArm64Imod, node);
1517 }
1518 
1519 
VisitUint32Mod(Node * node)1520 void InstructionSelector::VisitUint32Mod(Node* node) {
1521   VisitRRR(this, kArm64Umod32, node);
1522 }
1523 
1524 
VisitUint64Mod(Node * node)1525 void InstructionSelector::VisitUint64Mod(Node* node) {
1526   VisitRRR(this, kArm64Umod, node);
1527 }
1528 
1529 
VisitChangeFloat32ToFloat64(Node * node)1530 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1531   VisitRR(this, kArm64Float32ToFloat64, node);
1532 }
1533 
1534 
VisitRoundInt32ToFloat32(Node * node)1535 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1536   VisitRR(this, kArm64Int32ToFloat32, node);
1537 }
1538 
1539 
VisitRoundUint32ToFloat32(Node * node)1540 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1541   VisitRR(this, kArm64Uint32ToFloat32, node);
1542 }
1543 
1544 
VisitChangeInt32ToFloat64(Node * node)1545 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1546   VisitRR(this, kArm64Int32ToFloat64, node);
1547 }
1548 
1549 
VisitChangeUint32ToFloat64(Node * node)1550 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1551   VisitRR(this, kArm64Uint32ToFloat64, node);
1552 }
1553 
1554 
VisitTruncateFloat32ToInt32(Node * node)1555 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1556   VisitRR(this, kArm64Float32ToInt32, node);
1557 }
1558 
1559 
VisitChangeFloat64ToInt32(Node * node)1560 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1561   VisitRR(this, kArm64Float64ToInt32, node);
1562 }
1563 
1564 
VisitTruncateFloat32ToUint32(Node * node)1565 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1566   VisitRR(this, kArm64Float32ToUint32, node);
1567 }
1568 
1569 
VisitChangeFloat64ToUint32(Node * node)1570 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1571   VisitRR(this, kArm64Float64ToUint32, node);
1572 }
1573 
VisitTruncateFloat64ToUint32(Node * node)1574 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1575   VisitRR(this, kArm64Float64ToUint32, node);
1576 }
1577 
VisitTryTruncateFloat32ToInt64(Node * node)1578 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1579   Arm64OperandGenerator g(this);
1580 
1581   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1582   InstructionOperand outputs[2];
1583   size_t output_count = 0;
1584   outputs[output_count++] = g.DefineAsRegister(node);
1585 
1586   Node* success_output = NodeProperties::FindProjection(node, 1);
1587   if (success_output) {
1588     outputs[output_count++] = g.DefineAsRegister(success_output);
1589   }
1590 
1591   Emit(kArm64Float32ToInt64, output_count, outputs, 1, inputs);
1592 }
1593 
1594 
VisitTryTruncateFloat64ToInt64(Node * node)1595 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1596   Arm64OperandGenerator g(this);
1597 
1598   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1599   InstructionOperand outputs[2];
1600   size_t output_count = 0;
1601   outputs[output_count++] = g.DefineAsRegister(node);
1602 
1603   Node* success_output = NodeProperties::FindProjection(node, 1);
1604   if (success_output) {
1605     outputs[output_count++] = g.DefineAsRegister(success_output);
1606   }
1607 
1608   Emit(kArm64Float64ToInt64, output_count, outputs, 1, inputs);
1609 }
1610 
1611 
VisitTryTruncateFloat32ToUint64(Node * node)1612 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1613   Arm64OperandGenerator g(this);
1614 
1615   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1616   InstructionOperand outputs[2];
1617   size_t output_count = 0;
1618   outputs[output_count++] = g.DefineAsRegister(node);
1619 
1620   Node* success_output = NodeProperties::FindProjection(node, 1);
1621   if (success_output) {
1622     outputs[output_count++] = g.DefineAsRegister(success_output);
1623   }
1624 
1625   Emit(kArm64Float32ToUint64, output_count, outputs, 1, inputs);
1626 }
1627 
1628 
VisitTryTruncateFloat64ToUint64(Node * node)1629 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1630   Arm64OperandGenerator g(this);
1631 
1632   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1633   InstructionOperand outputs[2];
1634   size_t output_count = 0;
1635   outputs[output_count++] = g.DefineAsRegister(node);
1636 
1637   Node* success_output = NodeProperties::FindProjection(node, 1);
1638   if (success_output) {
1639     outputs[output_count++] = g.DefineAsRegister(success_output);
1640   }
1641 
1642   Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
1643 }
1644 
1645 
VisitChangeInt32ToInt64(Node * node)1646 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1647   Node* value = node->InputAt(0);
1648   if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
1649     // Generate sign-extending load.
1650     LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1651     MachineRepresentation rep = load_rep.representation();
1652     InstructionCode opcode = kArchNop;
1653     ImmediateMode immediate_mode = kNoImmediate;
1654     switch (rep) {
1655       case MachineRepresentation::kBit:  // Fall through.
1656       case MachineRepresentation::kWord8:
1657         opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
1658         immediate_mode = kLoadStoreImm8;
1659         break;
1660       case MachineRepresentation::kWord16:
1661         opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
1662         immediate_mode = kLoadStoreImm16;
1663         break;
1664       case MachineRepresentation::kWord32:
1665         opcode = kArm64Ldrsw;
1666         immediate_mode = kLoadStoreImm32;
1667         break;
1668       default:
1669         UNREACHABLE();
1670         return;
1671     }
1672     EmitLoad(this, value, opcode, immediate_mode, rep, node);
1673   } else {
1674     VisitRR(this, kArm64Sxtw, node);
1675   }
1676 }
1677 
1678 
VisitChangeUint32ToUint64(Node * node)1679 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1680   Arm64OperandGenerator g(this);
1681   Node* value = node->InputAt(0);
1682   switch (value->opcode()) {
1683     case IrOpcode::kWord32And:
1684     case IrOpcode::kWord32Or:
1685     case IrOpcode::kWord32Xor:
1686     case IrOpcode::kWord32Shl:
1687     case IrOpcode::kWord32Shr:
1688     case IrOpcode::kWord32Sar:
1689     case IrOpcode::kWord32Ror:
1690     case IrOpcode::kWord32Equal:
1691     case IrOpcode::kInt32Add:
1692     case IrOpcode::kInt32AddWithOverflow:
1693     case IrOpcode::kInt32Sub:
1694     case IrOpcode::kInt32SubWithOverflow:
1695     case IrOpcode::kInt32Mul:
1696     case IrOpcode::kInt32MulHigh:
1697     case IrOpcode::kInt32Div:
1698     case IrOpcode::kInt32Mod:
1699     case IrOpcode::kInt32LessThan:
1700     case IrOpcode::kInt32LessThanOrEqual:
1701     case IrOpcode::kUint32Div:
1702     case IrOpcode::kUint32LessThan:
1703     case IrOpcode::kUint32LessThanOrEqual:
1704     case IrOpcode::kUint32Mod:
1705     case IrOpcode::kUint32MulHigh: {
1706       // 32-bit operations will write their result in a W register (implicitly
1707       // clearing the top 32-bit of the corresponding X register) so the
1708       // zero-extension is a no-op.
1709       Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1710       return;
1711     }
1712     case IrOpcode::kLoad: {
1713       // As for the operations above, a 32-bit load will implicitly clear the
1714       // top 32 bits of the destination register.
1715       LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1716       switch (load_rep.representation()) {
1717         case MachineRepresentation::kWord8:
1718         case MachineRepresentation::kWord16:
1719         case MachineRepresentation::kWord32:
1720           Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1721           return;
1722         default:
1723           break;
1724       }
1725     }
1726     default:
1727       break;
1728   }
1729   Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
1730 }
1731 
1732 
VisitTruncateFloat64ToFloat32(Node * node)1733 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1734   VisitRR(this, kArm64Float64ToFloat32, node);
1735 }
1736 
VisitTruncateFloat64ToWord32(Node * node)1737 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1738   VisitRR(this, kArchTruncateDoubleToI, node);
1739 }
1740 
VisitRoundFloat64ToInt32(Node * node)1741 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1742   VisitRR(this, kArm64Float64ToInt32, node);
1743 }
1744 
1745 
VisitTruncateInt64ToInt32(Node * node)1746 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1747   Arm64OperandGenerator g(this);
1748   Node* value = node->InputAt(0);
1749   // The top 32 bits in the 64-bit register will be undefined, and
1750   // must not be used by a dependent node.
1751   Emit(kArchNop, g.DefineSameAsFirst(node), g.UseRegister(value));
1752 }
1753 
1754 
VisitRoundInt64ToFloat32(Node * node)1755 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1756   VisitRR(this, kArm64Int64ToFloat32, node);
1757 }
1758 
1759 
VisitRoundInt64ToFloat64(Node * node)1760 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1761   VisitRR(this, kArm64Int64ToFloat64, node);
1762 }
1763 
1764 
VisitRoundUint64ToFloat32(Node * node)1765 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1766   VisitRR(this, kArm64Uint64ToFloat32, node);
1767 }
1768 
1769 
VisitRoundUint64ToFloat64(Node * node)1770 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1771   VisitRR(this, kArm64Uint64ToFloat64, node);
1772 }
1773 
1774 
VisitBitcastFloat32ToInt32(Node * node)1775 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1776   VisitRR(this, kArm64Float64ExtractLowWord32, node);
1777 }
1778 
1779 
VisitBitcastFloat64ToInt64(Node * node)1780 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1781   VisitRR(this, kArm64U64MoveFloat64, node);
1782 }
1783 
1784 
VisitBitcastInt32ToFloat32(Node * node)1785 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1786   VisitRR(this, kArm64Float64MoveU64, node);
1787 }
1788 
1789 
VisitBitcastInt64ToFloat64(Node * node)1790 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1791   VisitRR(this, kArm64Float64MoveU64, node);
1792 }
1793 
1794 
VisitFloat32Add(Node * node)1795 void InstructionSelector::VisitFloat32Add(Node* node) {
1796   VisitRRR(this, kArm64Float32Add, node);
1797 }
1798 
1799 
VisitFloat64Add(Node * node)1800 void InstructionSelector::VisitFloat64Add(Node* node) {
1801   VisitRRR(this, kArm64Float64Add, node);
1802 }
1803 
1804 
VisitFloat32Sub(Node * node)1805 void InstructionSelector::VisitFloat32Sub(Node* node) {
1806   VisitRRR(this, kArm64Float32Sub, node);
1807 }
1808 
VisitFloat64Sub(Node * node)1809 void InstructionSelector::VisitFloat64Sub(Node* node) {
1810   VisitRRR(this, kArm64Float64Sub, node);
1811 }
1812 
VisitFloat32Mul(Node * node)1813 void InstructionSelector::VisitFloat32Mul(Node* node) {
1814   VisitRRR(this, kArm64Float32Mul, node);
1815 }
1816 
1817 
VisitFloat64Mul(Node * node)1818 void InstructionSelector::VisitFloat64Mul(Node* node) {
1819   VisitRRR(this, kArm64Float64Mul, node);
1820 }
1821 
1822 
VisitFloat32Div(Node * node)1823 void InstructionSelector::VisitFloat32Div(Node* node) {
1824   VisitRRR(this, kArm64Float32Div, node);
1825 }
1826 
1827 
VisitFloat64Div(Node * node)1828 void InstructionSelector::VisitFloat64Div(Node* node) {
1829   VisitRRR(this, kArm64Float64Div, node);
1830 }
1831 
1832 
VisitFloat64Mod(Node * node)1833 void InstructionSelector::VisitFloat64Mod(Node* node) {
1834   Arm64OperandGenerator g(this);
1835   Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
1836        g.UseFixed(node->InputAt(0), d0),
1837        g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
1838 }
1839 
VisitFloat32Max(Node * node)1840 void InstructionSelector::VisitFloat32Max(Node* node) {
1841   VisitRRR(this, kArm64Float32Max, node);
1842 }
1843 
VisitFloat64Max(Node * node)1844 void InstructionSelector::VisitFloat64Max(Node* node) {
1845   VisitRRR(this, kArm64Float64Max, node);
1846 }
1847 
VisitFloat32Min(Node * node)1848 void InstructionSelector::VisitFloat32Min(Node* node) {
1849   VisitRRR(this, kArm64Float32Min, node);
1850 }
1851 
VisitFloat64Min(Node * node)1852 void InstructionSelector::VisitFloat64Min(Node* node) {
1853   VisitRRR(this, kArm64Float64Min, node);
1854 }
1855 
1856 
VisitFloat32Abs(Node * node)1857 void InstructionSelector::VisitFloat32Abs(Node* node) {
1858   VisitRR(this, kArm64Float32Abs, node);
1859 }
1860 
1861 
VisitFloat64Abs(Node * node)1862 void InstructionSelector::VisitFloat64Abs(Node* node) {
1863   VisitRR(this, kArm64Float64Abs, node);
1864 }
1865 
VisitFloat32Sqrt(Node * node)1866 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1867   VisitRR(this, kArm64Float32Sqrt, node);
1868 }
1869 
1870 
VisitFloat64Sqrt(Node * node)1871 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1872   VisitRR(this, kArm64Float64Sqrt, node);
1873 }
1874 
1875 
VisitFloat32RoundDown(Node * node)1876 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1877   VisitRR(this, kArm64Float32RoundDown, node);
1878 }
1879 
1880 
VisitFloat64RoundDown(Node * node)1881 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1882   VisitRR(this, kArm64Float64RoundDown, node);
1883 }
1884 
1885 
VisitFloat32RoundUp(Node * node)1886 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1887   VisitRR(this, kArm64Float32RoundUp, node);
1888 }
1889 
1890 
VisitFloat64RoundUp(Node * node)1891 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1892   VisitRR(this, kArm64Float64RoundUp, node);
1893 }
1894 
1895 
VisitFloat32RoundTruncate(Node * node)1896 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1897   VisitRR(this, kArm64Float32RoundTruncate, node);
1898 }
1899 
1900 
VisitFloat64RoundTruncate(Node * node)1901 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1902   VisitRR(this, kArm64Float64RoundTruncate, node);
1903 }
1904 
1905 
VisitFloat64RoundTiesAway(Node * node)1906 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1907   VisitRR(this, kArm64Float64RoundTiesAway, node);
1908 }
1909 
1910 
VisitFloat32RoundTiesEven(Node * node)1911 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1912   VisitRR(this, kArm64Float32RoundTiesEven, node);
1913 }
1914 
1915 
VisitFloat64RoundTiesEven(Node * node)1916 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1917   VisitRR(this, kArm64Float64RoundTiesEven, node);
1918 }
1919 
VisitFloat32Neg(Node * node)1920 void InstructionSelector::VisitFloat32Neg(Node* node) {
1921   VisitRR(this, kArm64Float32Neg, node);
1922 }
1923 
VisitFloat64Neg(Node * node)1924 void InstructionSelector::VisitFloat64Neg(Node* node) {
1925   VisitRR(this, kArm64Float64Neg, node);
1926 }
1927 
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1928 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1929                                                    InstructionCode opcode) {
1930   Arm64OperandGenerator g(this);
1931   Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
1932        g.UseFixed(node->InputAt(1), d1))
1933       ->MarkAsCall();
1934 }
1935 
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1936 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1937                                                   InstructionCode opcode) {
1938   Arm64OperandGenerator g(this);
1939   Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
1940       ->MarkAsCall();
1941 }
1942 
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * descriptor,Node * node)1943 void InstructionSelector::EmitPrepareArguments(
1944     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1945     Node* node) {
1946   Arm64OperandGenerator g(this);
1947 
1948   bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
1949   bool to_native_stack = descriptor->UseNativeStack();
1950 
1951   bool always_claim = to_native_stack != from_native_stack;
1952 
1953   int claim_count = static_cast<int>(arguments->size());
1954   int slot = claim_count - 1;
1955   // Bump the stack pointer(s).
1956   if (claim_count > 0 || always_claim) {
1957     // TODO(titzer): claim and poke probably take small immediates.
1958     // TODO(titzer): it would be better to bump the csp here only
1959     //                and emit paired stores with increment for non c frames.
1960     ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
1961     // Claim(0) isn't a nop if there is a mismatch between CSP and JSSP.
1962     Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
1963   }
1964 
1965   // Poke the arguments into the stack.
1966   ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP;
1967   while (slot >= 0) {
1968     Emit(poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
1969          g.TempImmediate(slot));
1970     slot--;
1971     // TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
1972     //              same type.
1973     // Emit(kArm64PokePair, g.NoOutput(), g.UseRegister((*arguments)[slot]),
1974     //      g.UseRegister((*arguments)[slot - 1]), g.TempImmediate(slot));
1975     // slot -= 2;
1976   }
1977 }
1978 
1979 
IsTailCallAddressImmediate()1980 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1981 
GetTempsCountForTailCallFromJSFunction()1982 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1983 
1984 namespace {
1985 
1986 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1987 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1988                   InstructionOperand left, InstructionOperand right,
1989                   FlagsContinuation* cont) {
1990   Arm64OperandGenerator g(selector);
1991   opcode = cont->Encode(opcode);
1992   if (cont->IsBranch()) {
1993     selector->Emit(opcode, g.NoOutput(), left, right,
1994                    g.Label(cont->true_block()), g.Label(cont->false_block()));
1995   } else if (cont->IsDeoptimize()) {
1996     selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
1997                              cont->frame_state());
1998   } else {
1999     DCHECK(cont->IsSet());
2000     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
2001   }
2002 }
2003 
2004 
2005 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative,ImmediateMode immediate_mode)2006 void VisitWordCompare(InstructionSelector* selector, Node* node,
2007                       InstructionCode opcode, FlagsContinuation* cont,
2008                       bool commutative, ImmediateMode immediate_mode) {
2009   Arm64OperandGenerator g(selector);
2010   Node* left = node->InputAt(0);
2011   Node* right = node->InputAt(1);
2012 
2013   // Match immediates on left or right side of comparison.
2014   if (g.CanBeImmediate(right, immediate_mode)) {
2015     VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
2016                  cont);
2017   } else if (g.CanBeImmediate(left, immediate_mode)) {
2018     if (!commutative) cont->Commute();
2019     VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
2020                  cont);
2021   } else {
2022     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
2023                  cont);
2024   }
2025 }
2026 
2027 // This function checks whether we can convert:
2028 // ((a <op> b) cmp 0), b.<cond>
2029 // to:
2030 // (a <ops> b), b.<cond'>
2031 // where <ops> is the flag setting version of <op>.
2032 // We only generate conditions <cond'> that are a combination of the N
2033 // and Z flags. This avoids the need to make this function dependent on
2034 // the flag-setting operation.
CanUseFlagSettingBinop(FlagsCondition cond)2035 bool CanUseFlagSettingBinop(FlagsCondition cond) {
2036   switch (cond) {
2037     case kEqual:
2038     case kNotEqual:
2039     case kSignedLessThan:
2040     case kSignedGreaterThanOrEqual:
2041     case kUnsignedLessThanOrEqual:  // x <= 0 -> x == 0
2042     case kUnsignedGreaterThan:      // x > 0 -> x != 0
2043       return true;
2044     default:
2045       return false;
2046   }
2047 }
2048 
2049 // Map <cond> to <cond'> so that the following transformation is possible:
2050 // ((a <op> b) cmp 0), b.<cond>
2051 // to:
2052 // (a <ops> b), b.<cond'>
2053 // where <ops> is the flag setting version of <op>.
MapForFlagSettingBinop(FlagsCondition cond)2054 FlagsCondition MapForFlagSettingBinop(FlagsCondition cond) {
2055   DCHECK(CanUseFlagSettingBinop(cond));
2056   switch (cond) {
2057     case kEqual:
2058     case kNotEqual:
2059       return cond;
2060     case kSignedLessThan:
2061       return kNegative;
2062     case kSignedGreaterThanOrEqual:
2063       return kPositiveOrZero;
2064     case kUnsignedLessThanOrEqual:  // x <= 0 -> x == 0
2065       return kEqual;
2066     case kUnsignedGreaterThan:  // x > 0 -> x != 0
2067       return kNotEqual;
2068     default:
2069       UNREACHABLE();
2070       return cond;
2071   }
2072 }
2073 
2074 // This function checks if we can perform the transformation:
2075 // ((a <op> b) cmp 0), b.<cond>
2076 // to:
2077 // (a <ops> b), b.<cond'>
2078 // where <ops> is the flag setting version of <op>, and if so,
2079 // updates {node}, {opcode} and {cont} accordingly.
MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelector * selector,Node ** node,Node * binop,ArchOpcode * opcode,FlagsCondition cond,FlagsContinuation * cont,ImmediateMode * immediate_mode)2080 void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelector* selector,
2081                                              Node** node, Node* binop,
2082                                              ArchOpcode* opcode,
2083                                              FlagsCondition cond,
2084                                              FlagsContinuation* cont,
2085                                              ImmediateMode* immediate_mode) {
2086   ArchOpcode binop_opcode;
2087   ArchOpcode no_output_opcode;
2088   ImmediateMode binop_immediate_mode;
2089   switch (binop->opcode()) {
2090     case IrOpcode::kInt32Add:
2091       binop_opcode = kArm64Add32;
2092       no_output_opcode = kArm64Cmn32;
2093       binop_immediate_mode = kArithmeticImm;
2094       break;
2095     case IrOpcode::kWord32And:
2096       binop_opcode = kArm64And32;
2097       no_output_opcode = kArm64Tst32;
2098       binop_immediate_mode = kLogical32Imm;
2099       break;
2100     default:
2101       UNREACHABLE();
2102       return;
2103   }
2104   if (selector->CanCover(*node, binop)) {
2105     // The comparison is the only user of the add or and, so we can generate
2106     // a cmn or tst instead.
2107     cont->Overwrite(MapForFlagSettingBinop(cond));
2108     *opcode = no_output_opcode;
2109     *node = binop;
2110     *immediate_mode = binop_immediate_mode;
2111   } else if (selector->IsOnlyUserOfNodeInSameBlock(*node, binop)) {
2112     // We can also handle the case where the add and the compare are in the
2113     // same basic block, and the compare is the only use of add in this basic
2114     // block (the add has users in other basic blocks).
2115     cont->Overwrite(MapForFlagSettingBinop(cond));
2116     *opcode = binop_opcode;
2117     *node = binop;
2118     *immediate_mode = binop_immediate_mode;
2119   }
2120 }
2121 
2122 // Map {cond} to kEqual or kNotEqual, so that we can select
2123 // either TBZ or TBNZ when generating code for:
2124 // (x cmp 0), b.{cond}
MapForTbz(FlagsCondition cond)2125 FlagsCondition MapForTbz(FlagsCondition cond) {
2126   switch (cond) {
2127     case kSignedLessThan:  // generate TBNZ
2128       return kNotEqual;
2129     case kSignedGreaterThanOrEqual:  // generate TBZ
2130       return kEqual;
2131     default:
2132       UNREACHABLE();
2133       return cond;
2134   }
2135 }
2136 
2137 // Map {cond} to kEqual or kNotEqual, so that we can select
2138 // either CBZ or CBNZ when generating code for:
2139 // (x cmp 0), b.{cond}
MapForCbz(FlagsCondition cond)2140 FlagsCondition MapForCbz(FlagsCondition cond) {
2141   switch (cond) {
2142     case kEqual:     // generate CBZ
2143     case kNotEqual:  // generate CBNZ
2144       return cond;
2145     case kUnsignedLessThanOrEqual:  // generate CBZ
2146       return kEqual;
2147     case kUnsignedGreaterThan:  // generate CBNZ
2148       return kNotEqual;
2149     default:
2150       UNREACHABLE();
2151       return cond;
2152   }
2153 }
2154 
EmitBranchOrDeoptimize(InstructionSelector * selector,InstructionCode opcode,InstructionOperand value,FlagsContinuation * cont)2155 void EmitBranchOrDeoptimize(InstructionSelector* selector,
2156                             InstructionCode opcode, InstructionOperand value,
2157                             FlagsContinuation* cont) {
2158   Arm64OperandGenerator g(selector);
2159   if (cont->IsBranch()) {
2160     selector->Emit(cont->Encode(opcode), g.NoOutput(), value,
2161                    g.Label(cont->true_block()), g.Label(cont->false_block()));
2162   } else {
2163     DCHECK(cont->IsDeoptimize());
2164     selector->EmitDeoptimize(cont->Encode(opcode), g.NoOutput(), value,
2165                              cont->reason(), cont->frame_state());
2166   }
2167 }
2168 
2169 // Try to emit TBZ, TBNZ, CBZ or CBNZ for certain comparisons of {node}
2170 // against zero, depending on the condition.
TryEmitCbzOrTbz(InstructionSelector * selector,Node * node,Node * user,FlagsCondition cond,FlagsContinuation * cont)2171 bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
2172                      FlagsCondition cond, FlagsContinuation* cont) {
2173   Int32BinopMatcher m_user(user);
2174   USE(m_user);
2175   DCHECK(m_user.right().Is(0) || m_user.left().Is(0));
2176 
2177   // Only handle branches and deoptimisations.
2178   if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
2179 
2180   switch (cond) {
2181     case kSignedLessThan:
2182     case kSignedGreaterThanOrEqual: {
2183       // We don't generate TBZ/TBNZ for deoptimisations, as they have a
2184       // shorter range than conditional branches and generating them for
2185       // deoptimisations results in more veneers.
2186       if (cont->IsDeoptimize()) return false;
2187       Arm64OperandGenerator g(selector);
2188       cont->Overwrite(MapForTbz(cond));
2189       Int32Matcher m(node);
2190       if (m.IsFloat64ExtractHighWord32() && selector->CanCover(user, node)) {
2191         // SignedLessThan(Float64ExtractHighWord32(x), 0) and
2192         // SignedGreaterThanOrEqual(Float64ExtractHighWord32(x), 0) essentially
2193         // check the sign bit of a 64-bit floating point value.
2194         InstructionOperand temp = g.TempRegister();
2195         selector->Emit(kArm64U64MoveFloat64, temp,
2196                        g.UseRegister(node->InputAt(0)));
2197         selector->Emit(cont->Encode(kArm64TestAndBranch), g.NoOutput(), temp,
2198                        g.TempImmediate(63), g.Label(cont->true_block()),
2199                        g.Label(cont->false_block()));
2200         return true;
2201       }
2202       selector->Emit(cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
2203                      g.UseRegister(node), g.TempImmediate(31),
2204                      g.Label(cont->true_block()), g.Label(cont->false_block()));
2205       return true;
2206     }
2207     case kEqual:
2208     case kNotEqual:
2209     case kUnsignedLessThanOrEqual:
2210     case kUnsignedGreaterThan: {
2211       Arm64OperandGenerator g(selector);
2212       cont->Overwrite(MapForCbz(cond));
2213       EmitBranchOrDeoptimize(selector, kArm64CompareAndBranch32,
2214                              g.UseRegister(node), cont);
2215       return true;
2216     }
2217     default:
2218       return false;
2219   }
2220 }
2221 
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)2222 void VisitWord32Compare(InstructionSelector* selector, Node* node,
2223                         FlagsContinuation* cont) {
2224   Int32BinopMatcher m(node);
2225   ArchOpcode opcode = kArm64Cmp32;
2226   FlagsCondition cond = cont->condition();
2227   if (m.right().Is(0)) {
2228     if (TryEmitCbzOrTbz(selector, m.left().node(), node, cond, cont)) return;
2229   } else if (m.left().Is(0)) {
2230     FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
2231     if (TryEmitCbzOrTbz(selector, m.right().node(), node, commuted_cond, cont))
2232       return;
2233   }
2234   ImmediateMode immediate_mode = kArithmeticImm;
2235   if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32And())) {
2236     // Emit flag setting add/and instructions for comparisons against zero.
2237     if (CanUseFlagSettingBinop(cond)) {
2238       Node* binop = m.left().node();
2239       MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
2240                                               cond, cont, &immediate_mode);
2241     }
2242   } else if (m.left().Is(0) &&
2243              (m.right().IsInt32Add() || m.right().IsWord32And())) {
2244     // Same as above, but we need to commute the condition before we
2245     // continue with the rest of the checks.
2246     FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
2247     if (CanUseFlagSettingBinop(commuted_cond)) {
2248       Node* binop = m.right().node();
2249       MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
2250                                               commuted_cond, cont,
2251                                               &immediate_mode);
2252     }
2253   } else if (m.right().IsInt32Sub() && (cond == kEqual || cond == kNotEqual)) {
2254     // Select negated compare for comparisons with negated right input.
2255     // Only do this for kEqual and kNotEqual, which do not depend on the
2256     // C and V flags, as those flags will be different with CMN when the
2257     // right-hand side of the original subtraction is INT_MIN.
2258     Node* sub = m.right().node();
2259     Int32BinopMatcher msub(sub);
2260     if (msub.left().Is(0)) {
2261       bool can_cover = selector->CanCover(node, sub);
2262       node->ReplaceInput(1, msub.right().node());
2263       // Even if the comparison node covers the subtraction, after the input
2264       // replacement above, the node still won't cover the input to the
2265       // subtraction; the subtraction still uses it.
2266       // In order to get shifted operations to work, we must remove the rhs
2267       // input to the subtraction, as TryMatchAnyShift requires this node to
2268       // cover the input shift. We do this by setting it to the lhs input,
2269       // as we know it's zero, and the result of the subtraction isn't used by
2270       // any other node.
2271       if (can_cover) sub->ReplaceInput(1, msub.left().node());
2272       opcode = kArm64Cmn32;
2273     }
2274   }
2275   VisitBinop<Int32BinopMatcher>(selector, node, opcode, immediate_mode, cont);
2276 }
2277 
2278 
VisitWordTest(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)2279 void VisitWordTest(InstructionSelector* selector, Node* node,
2280                    InstructionCode opcode, FlagsContinuation* cont) {
2281   Arm64OperandGenerator g(selector);
2282   VisitCompare(selector, opcode, g.UseRegister(node), g.UseRegister(node),
2283                cont);
2284 }
2285 
2286 
VisitWord32Test(InstructionSelector * selector,Node * node,FlagsContinuation * cont)2287 void VisitWord32Test(InstructionSelector* selector, Node* node,
2288                      FlagsContinuation* cont) {
2289   VisitWordTest(selector, node, kArm64Tst32, cont);
2290 }
2291 
2292 
VisitWord64Test(InstructionSelector * selector,Node * node,FlagsContinuation * cont)2293 void VisitWord64Test(InstructionSelector* selector, Node* node,
2294                      FlagsContinuation* cont) {
2295   VisitWordTest(selector, node, kArm64Tst, cont);
2296 }
2297 
2298 template <typename Matcher, ArchOpcode kOpcode>
TryEmitTestAndBranch(InstructionSelector * selector,Node * node,FlagsContinuation * cont)2299 bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
2300                           FlagsContinuation* cont) {
2301   Arm64OperandGenerator g(selector);
2302   Matcher m(node);
2303   if (cont->IsBranch() && m.right().HasValue() &&
2304       (base::bits::CountPopulation(m.right().Value()) == 1)) {
2305     // If the mask has only one bit set, we can use tbz/tbnz.
2306     DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
2307     selector->Emit(
2308         cont->Encode(kOpcode), g.NoOutput(), g.UseRegister(m.left().node()),
2309         g.TempImmediate(base::bits::CountTrailingZeros(m.right().Value())),
2310         g.Label(cont->true_block()), g.Label(cont->false_block()));
2311     return true;
2312   }
2313   return false;
2314 }
2315 
2316 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)2317 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
2318                          FlagsContinuation* cont) {
2319   Arm64OperandGenerator g(selector);
2320   Float32BinopMatcher m(node);
2321   if (m.right().Is(0.0f)) {
2322     VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
2323                  g.UseImmediate(m.right().node()), cont);
2324   } else if (m.left().Is(0.0f)) {
2325     cont->Commute();
2326     VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.right().node()),
2327                  g.UseImmediate(m.left().node()), cont);
2328   } else {
2329     VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
2330                  g.UseRegister(m.right().node()), cont);
2331   }
2332 }
2333 
2334 
2335 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)2336 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
2337                          FlagsContinuation* cont) {
2338   Arm64OperandGenerator g(selector);
2339   Float64BinopMatcher m(node);
2340   if (m.right().Is(0.0)) {
2341     VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
2342                  g.UseImmediate(m.right().node()), cont);
2343   } else if (m.left().Is(0.0)) {
2344     cont->Commute();
2345     VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.right().node()),
2346                  g.UseImmediate(m.left().node()), cont);
2347   } else {
2348     VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
2349                  g.UseRegister(m.right().node()), cont);
2350   }
2351 }
2352 
VisitWordCompareZero(InstructionSelector * selector,Node * user,Node * value,FlagsContinuation * cont)2353 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
2354                           Node* value, FlagsContinuation* cont) {
2355   Arm64OperandGenerator g(selector);
2356   // Try to combine with comparisons against 0 by simply inverting the branch.
2357   while (value->opcode() == IrOpcode::kWord32Equal &&
2358          selector->CanCover(user, value)) {
2359     Int32BinopMatcher m(value);
2360     if (!m.right().Is(0)) break;
2361 
2362     user = value;
2363     value = m.left().node();
2364     cont->Negate();
2365   }
2366 
2367   if (selector->CanCover(user, value)) {
2368     switch (value->opcode()) {
2369       case IrOpcode::kWord32Equal:
2370         cont->OverwriteAndNegateIfEqual(kEqual);
2371         return VisitWord32Compare(selector, value, cont);
2372       case IrOpcode::kInt32LessThan:
2373         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2374         return VisitWord32Compare(selector, value, cont);
2375       case IrOpcode::kInt32LessThanOrEqual:
2376         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2377         return VisitWord32Compare(selector, value, cont);
2378       case IrOpcode::kUint32LessThan:
2379         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2380         return VisitWord32Compare(selector, value, cont);
2381       case IrOpcode::kUint32LessThanOrEqual:
2382         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2383         return VisitWord32Compare(selector, value, cont);
2384       case IrOpcode::kWord64Equal: {
2385         cont->OverwriteAndNegateIfEqual(kEqual);
2386         Int64BinopMatcher m(value);
2387         if (m.right().Is(0)) {
2388           Node* const left = m.left().node();
2389           if (selector->CanCover(value, left) &&
2390               left->opcode() == IrOpcode::kWord64And) {
2391             // Attempt to merge the Word64Equal(Word64And(x, y), 0) comparison
2392             // into a tbz/tbnz instruction.
2393             if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
2394                     selector, left, cont)) {
2395               return;
2396             }
2397             return VisitWordCompare(selector, left, kArm64Tst, cont, true,
2398                                     kLogical64Imm);
2399           }
2400           // Merge the Word64Equal(x, 0) comparison into a cbz instruction.
2401           if (cont->IsBranch() || cont->IsDeoptimize()) {
2402             EmitBranchOrDeoptimize(selector,
2403                                    cont->Encode(kArm64CompareAndBranch),
2404                                    g.UseRegister(left), cont);
2405             return;
2406           }
2407         }
2408         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
2409                                 kArithmeticImm);
2410       }
2411       case IrOpcode::kInt64LessThan:
2412         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2413         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
2414                                 kArithmeticImm);
2415       case IrOpcode::kInt64LessThanOrEqual:
2416         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2417         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
2418                                 kArithmeticImm);
2419       case IrOpcode::kUint64LessThan:
2420         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2421         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
2422                                 kArithmeticImm);
2423       case IrOpcode::kUint64LessThanOrEqual:
2424         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2425         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
2426                                 kArithmeticImm);
2427       case IrOpcode::kFloat32Equal:
2428         cont->OverwriteAndNegateIfEqual(kEqual);
2429         return VisitFloat32Compare(selector, value, cont);
2430       case IrOpcode::kFloat32LessThan:
2431         cont->OverwriteAndNegateIfEqual(kFloatLessThan);
2432         return VisitFloat32Compare(selector, value, cont);
2433       case IrOpcode::kFloat32LessThanOrEqual:
2434         cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
2435         return VisitFloat32Compare(selector, value, cont);
2436       case IrOpcode::kFloat64Equal:
2437         cont->OverwriteAndNegateIfEqual(kEqual);
2438         return VisitFloat64Compare(selector, value, cont);
2439       case IrOpcode::kFloat64LessThan:
2440         cont->OverwriteAndNegateIfEqual(kFloatLessThan);
2441         return VisitFloat64Compare(selector, value, cont);
2442       case IrOpcode::kFloat64LessThanOrEqual:
2443         cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
2444         return VisitFloat64Compare(selector, value, cont);
2445       case IrOpcode::kProjection:
2446         // Check if this is the overflow output projection of an
2447         // <Operation>WithOverflow node.
2448         if (ProjectionIndexOf(value->op()) == 1u) {
2449           // We cannot combine the <Operation>WithOverflow with this branch
2450           // unless the 0th projection (the use of the actual value of the
2451           // <Operation> is either nullptr, which means there's no use of the
2452           // actual value, or was already defined, which means it is scheduled
2453           // *AFTER* this branch).
2454           Node* const node = value->InputAt(0);
2455           Node* const result = NodeProperties::FindProjection(node, 0);
2456           if (result == nullptr || selector->IsDefined(result)) {
2457             switch (node->opcode()) {
2458               case IrOpcode::kInt32AddWithOverflow:
2459                 cont->OverwriteAndNegateIfEqual(kOverflow);
2460                 return VisitBinop<Int32BinopMatcher>(
2461                     selector, node, kArm64Add32, kArithmeticImm, cont);
2462               case IrOpcode::kInt32SubWithOverflow:
2463                 cont->OverwriteAndNegateIfEqual(kOverflow);
2464                 return VisitBinop<Int32BinopMatcher>(
2465                     selector, node, kArm64Sub32, kArithmeticImm, cont);
2466               case IrOpcode::kInt32MulWithOverflow:
2467                 // ARM64 doesn't set the overflow flag for multiplication, so we
2468                 // need to test on kNotEqual. Here is the code sequence used:
2469                 //   smull result, left, right
2470                 //   cmp result.X(), Operand(result, SXTW)
2471                 cont->OverwriteAndNegateIfEqual(kNotEqual);
2472                 return EmitInt32MulWithOverflow(selector, node, cont);
2473               case IrOpcode::kInt64AddWithOverflow:
2474                 cont->OverwriteAndNegateIfEqual(kOverflow);
2475                 return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Add,
2476                                                      kArithmeticImm, cont);
2477               case IrOpcode::kInt64SubWithOverflow:
2478                 cont->OverwriteAndNegateIfEqual(kOverflow);
2479                 return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Sub,
2480                                                      kArithmeticImm, cont);
2481               default:
2482                 break;
2483             }
2484           }
2485         }
2486         break;
2487       case IrOpcode::kInt32Add:
2488         return VisitWordCompare(selector, value, kArm64Cmn32, cont, true,
2489                                 kArithmeticImm);
2490       case IrOpcode::kInt32Sub:
2491         return VisitWord32Compare(selector, value, cont);
2492       case IrOpcode::kWord32And:
2493         if (TryEmitTestAndBranch<Uint32BinopMatcher, kArm64TestAndBranch32>(
2494                 selector, value, cont)) {
2495           return;
2496         }
2497         return VisitWordCompare(selector, value, kArm64Tst32, cont, true,
2498                                 kLogical32Imm);
2499       case IrOpcode::kWord64And:
2500         if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
2501                 selector, value, cont)) {
2502           return;
2503         }
2504         return VisitWordCompare(selector, value, kArm64Tst, cont, true,
2505                                 kLogical64Imm);
2506       default:
2507         break;
2508     }
2509   }
2510 
2511   // Branch could not be combined with a compare, compare against 0 and branch.
2512   if (cont->IsBranch()) {
2513     selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
2514                    g.UseRegister(value), g.Label(cont->true_block()),
2515                    g.Label(cont->false_block()));
2516   } else {
2517     DCHECK(cont->IsDeoptimize());
2518     selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
2519                              g.UseRegister(value), g.UseRegister(value),
2520                              cont->reason(), cont->frame_state());
2521   }
2522 }
2523 
2524 }  // namespace
2525 
VisitBranch(Node * branch,BasicBlock * tbranch,BasicBlock * fbranch)2526 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
2527                                       BasicBlock* fbranch) {
2528   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
2529   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
2530 }
2531 
VisitDeoptimizeIf(Node * node)2532 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
2533   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
2534       kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
2535   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
2536 }
2537 
VisitDeoptimizeUnless(Node * node)2538 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
2539   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
2540       kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
2541   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
2542 }
2543 
VisitSwitch(Node * node,const SwitchInfo & sw)2544 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2545   Arm64OperandGenerator g(this);
2546   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2547 
2548   // Emit either ArchTableSwitch or ArchLookupSwitch.
2549   size_t table_space_cost = 4 + sw.value_range;
2550   size_t table_time_cost = 3;
2551   size_t lookup_space_cost = 3 + 2 * sw.case_count;
2552   size_t lookup_time_cost = sw.case_count;
2553   if (sw.case_count > 0 &&
2554       table_space_cost + 3 * table_time_cost <=
2555           lookup_space_cost + 3 * lookup_time_cost &&
2556       sw.min_value > std::numeric_limits<int32_t>::min()) {
2557     InstructionOperand index_operand = value_operand;
2558     if (sw.min_value) {
2559       index_operand = g.TempRegister();
2560       Emit(kArm64Sub32, index_operand, value_operand,
2561            g.TempImmediate(sw.min_value));
2562     }
2563     // Generate a table lookup.
2564     return EmitTableSwitch(sw, index_operand);
2565   }
2566 
2567   // Generate a sequence of conditional jumps.
2568   return EmitLookupSwitch(sw, value_operand);
2569 }
2570 
2571 
VisitWord32Equal(Node * const node)2572 void InstructionSelector::VisitWord32Equal(Node* const node) {
2573   Node* const user = node;
2574   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2575   Int32BinopMatcher m(user);
2576   if (m.right().Is(0)) {
2577     Node* const value = m.left().node();
2578     if (CanCover(user, value)) {
2579       switch (value->opcode()) {
2580         case IrOpcode::kInt32Add:
2581         case IrOpcode::kWord32And:
2582           return VisitWord32Compare(this, node, &cont);
2583         case IrOpcode::kInt32Sub:
2584           return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
2585                                   kArithmeticImm);
2586         case IrOpcode::kWord32Equal: {
2587           // Word32Equal(Word32Equal(x, y), 0) => Word32Compare(x, y, ne).
2588           Int32BinopMatcher mequal(value);
2589           node->ReplaceInput(0, mequal.left().node());
2590           node->ReplaceInput(1, mequal.right().node());
2591           cont.Negate();
2592           // {node} still does not cover its new operands, because {mequal} is
2593           // still using them.
2594           // Since we won't generate any more code for {mequal}, set its
2595           // operands to zero to make sure {node} can cover them.
2596           // This improves pattern matching in VisitWord32Compare.
2597           mequal.node()->ReplaceInput(0, m.right().node());
2598           mequal.node()->ReplaceInput(1, m.right().node());
2599           return VisitWord32Compare(this, node, &cont);
2600         }
2601         default:
2602           break;
2603       }
2604       return VisitWord32Test(this, value, &cont);
2605     }
2606   }
2607   VisitWord32Compare(this, node, &cont);
2608 }
2609 
2610 
VisitInt32LessThan(Node * node)2611 void InstructionSelector::VisitInt32LessThan(Node* node) {
2612   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2613   VisitWord32Compare(this, node, &cont);
2614 }
2615 
2616 
VisitInt32LessThanOrEqual(Node * node)2617 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2618   FlagsContinuation cont =
2619       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2620   VisitWord32Compare(this, node, &cont);
2621 }
2622 
2623 
VisitUint32LessThan(Node * node)2624 void InstructionSelector::VisitUint32LessThan(Node* node) {
2625   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2626   VisitWord32Compare(this, node, &cont);
2627 }
2628 
2629 
VisitUint32LessThanOrEqual(Node * node)2630 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2631   FlagsContinuation cont =
2632       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2633   VisitWord32Compare(this, node, &cont);
2634 }
2635 
2636 
VisitWord64Equal(Node * const node)2637 void InstructionSelector::VisitWord64Equal(Node* const node) {
2638   Node* const user = node;
2639   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2640   Int64BinopMatcher m(user);
2641   if (m.right().Is(0)) {
2642     Node* const value = m.left().node();
2643     if (CanCover(user, value)) {
2644       switch (value->opcode()) {
2645         case IrOpcode::kWord64And:
2646           return VisitWordCompare(this, value, kArm64Tst, &cont, true,
2647                                   kLogical64Imm);
2648         default:
2649           break;
2650       }
2651       return VisitWord64Test(this, value, &cont);
2652     }
2653   }
2654   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2655 }
2656 
2657 
VisitInt32AddWithOverflow(Node * node)2658 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2659   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2660     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2661     return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
2662                                          kArithmeticImm, &cont);
2663   }
2664   FlagsContinuation cont;
2665   VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, &cont);
2666 }
2667 
2668 
VisitInt32SubWithOverflow(Node * node)2669 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2670   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2671     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2672     return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
2673                                          kArithmeticImm, &cont);
2674   }
2675   FlagsContinuation cont;
2676   VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, &cont);
2677 }
2678 
VisitInt32MulWithOverflow(Node * node)2679 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
2680   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2681     // ARM64 doesn't set the overflow flag for multiplication, so we need to
2682     // test on kNotEqual. Here is the code sequence used:
2683     //   smull result, left, right
2684     //   cmp result.X(), Operand(result, SXTW)
2685     FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
2686     return EmitInt32MulWithOverflow(this, node, &cont);
2687   }
2688   FlagsContinuation cont;
2689   EmitInt32MulWithOverflow(this, node, &cont);
2690 }
2691 
VisitInt64AddWithOverflow(Node * node)2692 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2693   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2694     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2695     return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm,
2696                                          &cont);
2697   }
2698   FlagsContinuation cont;
2699   VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm, &cont);
2700 }
2701 
2702 
VisitInt64SubWithOverflow(Node * node)2703 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2704   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2705     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2706     return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm,
2707                                          &cont);
2708   }
2709   FlagsContinuation cont;
2710   VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm, &cont);
2711 }
2712 
2713 
VisitInt64LessThan(Node * node)2714 void InstructionSelector::VisitInt64LessThan(Node* node) {
2715   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2716   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2717 }
2718 
2719 
VisitInt64LessThanOrEqual(Node * node)2720 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2721   FlagsContinuation cont =
2722       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2723   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2724 }
2725 
2726 
VisitUint64LessThan(Node * node)2727 void InstructionSelector::VisitUint64LessThan(Node* node) {
2728   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2729   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2730 }
2731 
2732 
VisitUint64LessThanOrEqual(Node * node)2733 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2734   FlagsContinuation cont =
2735       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2736   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2737 }
2738 
2739 
VisitFloat32Equal(Node * node)2740 void InstructionSelector::VisitFloat32Equal(Node* node) {
2741   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2742   VisitFloat32Compare(this, node, &cont);
2743 }
2744 
2745 
VisitFloat32LessThan(Node * node)2746 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2747   FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
2748   VisitFloat32Compare(this, node, &cont);
2749 }
2750 
2751 
VisitFloat32LessThanOrEqual(Node * node)2752 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2753   FlagsContinuation cont =
2754       FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
2755   VisitFloat32Compare(this, node, &cont);
2756 }
2757 
2758 
VisitFloat64Equal(Node * node)2759 void InstructionSelector::VisitFloat64Equal(Node* node) {
2760   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2761   VisitFloat64Compare(this, node, &cont);
2762 }
2763 
2764 
VisitFloat64LessThan(Node * node)2765 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2766   FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
2767   VisitFloat64Compare(this, node, &cont);
2768 }
2769 
2770 
VisitFloat64LessThanOrEqual(Node * node)2771 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2772   FlagsContinuation cont =
2773       FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
2774   VisitFloat64Compare(this, node, &cont);
2775 }
2776 
2777 
VisitFloat64ExtractLowWord32(Node * node)2778 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2779   Arm64OperandGenerator g(this);
2780   Emit(kArm64Float64ExtractLowWord32, g.DefineAsRegister(node),
2781        g.UseRegister(node->InputAt(0)));
2782 }
2783 
2784 
VisitFloat64ExtractHighWord32(Node * node)2785 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2786   Arm64OperandGenerator g(this);
2787   Emit(kArm64Float64ExtractHighWord32, g.DefineAsRegister(node),
2788        g.UseRegister(node->InputAt(0)));
2789 }
2790 
2791 
VisitFloat64InsertLowWord32(Node * node)2792 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2793   Arm64OperandGenerator g(this);
2794   Node* left = node->InputAt(0);
2795   Node* right = node->InputAt(1);
2796   if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
2797       CanCover(node, left)) {
2798     Node* right_of_left = left->InputAt(1);
2799     Emit(kArm64Bfi, g.DefineSameAsFirst(right), g.UseRegister(right),
2800          g.UseRegister(right_of_left), g.TempImmediate(32),
2801          g.TempImmediate(32));
2802     Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(right));
2803     return;
2804   }
2805   Emit(kArm64Float64InsertLowWord32, g.DefineAsRegister(node),
2806        g.UseRegister(left), g.UseRegister(right));
2807 }
2808 
2809 
VisitFloat64InsertHighWord32(Node * node)2810 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2811   Arm64OperandGenerator g(this);
2812   Node* left = node->InputAt(0);
2813   Node* right = node->InputAt(1);
2814   if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
2815       CanCover(node, left)) {
2816     Node* right_of_left = left->InputAt(1);
2817     Emit(kArm64Bfi, g.DefineSameAsFirst(left), g.UseRegister(right_of_left),
2818          g.UseRegister(right), g.TempImmediate(32), g.TempImmediate(32));
2819     Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(left));
2820     return;
2821   }
2822   Emit(kArm64Float64InsertHighWord32, g.DefineAsRegister(node),
2823        g.UseRegister(left), g.UseRegister(right));
2824 }
2825 
VisitFloat64SilenceNaN(Node * node)2826 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2827   VisitRR(this, kArm64Float64SilenceNaN, node);
2828 }
2829 
VisitAtomicLoad(Node * node)2830 void InstructionSelector::VisitAtomicLoad(Node* node) {
2831   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2832   Arm64OperandGenerator g(this);
2833   Node* base = node->InputAt(0);
2834   Node* index = node->InputAt(1);
2835   ArchOpcode opcode = kArchNop;
2836   switch (load_rep.representation()) {
2837     case MachineRepresentation::kWord8:
2838       opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
2839       break;
2840     case MachineRepresentation::kWord16:
2841       opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
2842       break;
2843     case MachineRepresentation::kWord32:
2844       opcode = kAtomicLoadWord32;
2845       break;
2846     default:
2847       UNREACHABLE();
2848       return;
2849   }
2850   Emit(opcode | AddressingModeField::encode(kMode_MRR),
2851        g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
2852 }
2853 
VisitAtomicStore(Node * node)2854 void InstructionSelector::VisitAtomicStore(Node* node) {
2855   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2856   Arm64OperandGenerator g(this);
2857   Node* base = node->InputAt(0);
2858   Node* index = node->InputAt(1);
2859   Node* value = node->InputAt(2);
2860   ArchOpcode opcode = kArchNop;
2861   switch (rep) {
2862     case MachineRepresentation::kWord8:
2863       opcode = kAtomicStoreWord8;
2864       break;
2865     case MachineRepresentation::kWord16:
2866       opcode = kAtomicStoreWord16;
2867       break;
2868     case MachineRepresentation::kWord32:
2869       opcode = kAtomicStoreWord32;
2870       break;
2871     default:
2872       UNREACHABLE();
2873       return;
2874   }
2875 
2876   AddressingMode addressing_mode = kMode_MRR;
2877   InstructionOperand inputs[3];
2878   size_t input_count = 0;
2879   inputs[input_count++] = g.UseUniqueRegister(base);
2880   inputs[input_count++] = g.UseUniqueRegister(index);
2881   inputs[input_count++] = g.UseUniqueRegister(value);
2882   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2883   Emit(code, 0, nullptr, input_count, inputs);
2884 }
2885 
2886 // static
2887 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2888 InstructionSelector::SupportedMachineOperatorFlags() {
2889   return MachineOperatorBuilder::kFloat32RoundDown |
2890          MachineOperatorBuilder::kFloat64RoundDown |
2891          MachineOperatorBuilder::kFloat32RoundUp |
2892          MachineOperatorBuilder::kFloat64RoundUp |
2893          MachineOperatorBuilder::kFloat32RoundTruncate |
2894          MachineOperatorBuilder::kFloat64RoundTruncate |
2895          MachineOperatorBuilder::kFloat64RoundTiesAway |
2896          MachineOperatorBuilder::kFloat32RoundTiesEven |
2897          MachineOperatorBuilder::kFloat64RoundTiesEven |
2898          MachineOperatorBuilder::kWord32ShiftIsSafe |
2899          MachineOperatorBuilder::kInt32DivIsSafe |
2900          MachineOperatorBuilder::kUint32DivIsSafe |
2901          MachineOperatorBuilder::kWord32ReverseBits |
2902          MachineOperatorBuilder::kWord64ReverseBits;
2903 }
2904 
2905 // static
2906 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2907 InstructionSelector::AlignmentRequirements() {
2908   return MachineOperatorBuilder::AlignmentRequirements::
2909       FullUnalignedAccessSupport();
2910 }
2911 
2912 }  // namespace compiler
2913 }  // namespace internal
2914 }  // namespace v8
2915