1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/adapters.h"
6 #include "src/base/bits.h"
7 #include "src/compiler/instruction-selector-impl.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/compiler/node-properties.h"
10 
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14 
15 #define TRACE_UNIMPL() \
16   PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
17 
18 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
19 
20 
21 // Adds Mips-specific methods for generating InstructionOperands.
22 class Mips64OperandGenerator final : public OperandGenerator {
23  public:
Mips64OperandGenerator(InstructionSelector * selector)24   explicit Mips64OperandGenerator(InstructionSelector* selector)
25       : OperandGenerator(selector) {}
26 
UseOperand(Node * node,InstructionCode opcode)27   InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
28     if (CanBeImmediate(node, opcode)) {
29       return UseImmediate(node);
30     }
31     return UseRegister(node);
32   }
33 
CanBeImmediate(Node * node,InstructionCode opcode)34   bool CanBeImmediate(Node* node, InstructionCode opcode) {
35     int64_t value;
36     if (node->opcode() == IrOpcode::kInt32Constant)
37       value = OpParameter<int32_t>(node);
38     else if (node->opcode() == IrOpcode::kInt64Constant)
39       value = OpParameter<int64_t>(node);
40     else
41       return false;
42     switch (ArchOpcodeField::decode(opcode)) {
43       case kMips64Shl:
44       case kMips64Sar:
45       case kMips64Shr:
46         return is_uint5(value);
47       case kMips64Dshl:
48       case kMips64Dsar:
49       case kMips64Dshr:
50         return is_uint6(value);
51       case kMips64Xor:
52         return is_uint16(value);
53       case kMips64Ldc1:
54       case kMips64Sdc1:
55         return is_int16(value + kIntSize);
56       default:
57         return is_int16(value);
58     }
59   }
60 
61  private:
ImmediateFitsAddrMode1Instruction(int32_t imm) const62   bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
63     TRACE_UNIMPL();
64     return false;
65   }
66 };
67 
68 
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)69 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
70                     Node* node) {
71   Mips64OperandGenerator g(selector);
72   selector->Emit(opcode, g.DefineAsRegister(node),
73                  g.UseRegister(node->InputAt(0)));
74 }
75 
76 
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)77 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
78                      Node* node) {
79   Mips64OperandGenerator g(selector);
80   selector->Emit(opcode, g.DefineAsRegister(node),
81                  g.UseRegister(node->InputAt(0)),
82                  g.UseRegister(node->InputAt(1)));
83 }
84 
85 
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node)86 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
87                      Node* node) {
88   Mips64OperandGenerator g(selector);
89   selector->Emit(opcode, g.DefineAsRegister(node),
90                  g.UseRegister(node->InputAt(0)),
91                  g.UseOperand(node->InputAt(1), opcode));
92 }
93 
94 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)95 static void VisitBinop(InstructionSelector* selector, Node* node,
96                        InstructionCode opcode, FlagsContinuation* cont) {
97   Mips64OperandGenerator g(selector);
98   Int32BinopMatcher m(node);
99   InstructionOperand inputs[4];
100   size_t input_count = 0;
101   InstructionOperand outputs[2];
102   size_t output_count = 0;
103 
104   inputs[input_count++] = g.UseRegister(m.left().node());
105   inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
106 
107   if (cont->IsBranch()) {
108     inputs[input_count++] = g.Label(cont->true_block());
109     inputs[input_count++] = g.Label(cont->false_block());
110   }
111 
112   outputs[output_count++] = g.DefineAsRegister(node);
113   if (cont->IsSet()) {
114     outputs[output_count++] = g.DefineAsRegister(cont->result());
115   }
116 
117   DCHECK_NE(0u, input_count);
118   DCHECK_NE(0u, output_count);
119   DCHECK_GE(arraysize(inputs), input_count);
120   DCHECK_GE(arraysize(outputs), output_count);
121 
122   selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
123                  inputs);
124 }
125 
126 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)127 static void VisitBinop(InstructionSelector* selector, Node* node,
128                        InstructionCode opcode) {
129   FlagsContinuation cont;
130   VisitBinop(selector, node, opcode, &cont);
131 }
132 
133 
VisitLoad(Node * node)134 void InstructionSelector::VisitLoad(Node* node) {
135   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
136   Mips64OperandGenerator g(this);
137   Node* base = node->InputAt(0);
138   Node* index = node->InputAt(1);
139 
140   ArchOpcode opcode = kArchNop;
141   switch (load_rep.representation()) {
142     case MachineRepresentation::kFloat32:
143       opcode = kMips64Lwc1;
144       break;
145     case MachineRepresentation::kFloat64:
146       opcode = kMips64Ldc1;
147       break;
148     case MachineRepresentation::kBit:  // Fall through.
149     case MachineRepresentation::kWord8:
150       opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
151       break;
152     case MachineRepresentation::kWord16:
153       opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
154       break;
155     case MachineRepresentation::kWord32:
156       opcode = kMips64Lw;
157       break;
158     case MachineRepresentation::kTagged:  // Fall through.
159     case MachineRepresentation::kWord64:
160       opcode = kMips64Ld;
161       break;
162     case MachineRepresentation::kNone:
163       UNREACHABLE();
164       return;
165   }
166 
167   if (g.CanBeImmediate(index, opcode)) {
168     Emit(opcode | AddressingModeField::encode(kMode_MRI),
169          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
170   } else {
171     InstructionOperand addr_reg = g.TempRegister();
172     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
173          g.UseRegister(index), g.UseRegister(base));
174     // Emit desired load opcode, using temp addr_reg.
175     Emit(opcode | AddressingModeField::encode(kMode_MRI),
176          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
177   }
178 }
179 
180 
VisitStore(Node * node)181 void InstructionSelector::VisitStore(Node* node) {
182   Mips64OperandGenerator g(this);
183   Node* base = node->InputAt(0);
184   Node* index = node->InputAt(1);
185   Node* value = node->InputAt(2);
186 
187   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
188   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
189   MachineRepresentation rep = store_rep.representation();
190 
191   // TODO(mips): I guess this could be done in a better way.
192   if (write_barrier_kind != kNoWriteBarrier) {
193     DCHECK_EQ(MachineRepresentation::kTagged, rep);
194     InstructionOperand inputs[3];
195     size_t input_count = 0;
196     inputs[input_count++] = g.UseUniqueRegister(base);
197     inputs[input_count++] = g.UseUniqueRegister(index);
198     inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
199                                 ? g.UseRegister(value)
200                                 : g.UseUniqueRegister(value);
201     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
202     switch (write_barrier_kind) {
203       case kNoWriteBarrier:
204         UNREACHABLE();
205         break;
206       case kMapWriteBarrier:
207         record_write_mode = RecordWriteMode::kValueIsMap;
208         break;
209       case kPointerWriteBarrier:
210         record_write_mode = RecordWriteMode::kValueIsPointer;
211         break;
212       case kFullWriteBarrier:
213         record_write_mode = RecordWriteMode::kValueIsAny;
214         break;
215     }
216     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
217     size_t const temp_count = arraysize(temps);
218     InstructionCode code = kArchStoreWithWriteBarrier;
219     code |= MiscField::encode(static_cast<int>(record_write_mode));
220     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
221   } else {
222     ArchOpcode opcode = kArchNop;
223     switch (rep) {
224       case MachineRepresentation::kFloat32:
225         opcode = kMips64Swc1;
226         break;
227       case MachineRepresentation::kFloat64:
228         opcode = kMips64Sdc1;
229         break;
230       case MachineRepresentation::kBit:  // Fall through.
231       case MachineRepresentation::kWord8:
232         opcode = kMips64Sb;
233         break;
234       case MachineRepresentation::kWord16:
235         opcode = kMips64Sh;
236         break;
237       case MachineRepresentation::kWord32:
238         opcode = kMips64Sw;
239         break;
240       case MachineRepresentation::kTagged:  // Fall through.
241       case MachineRepresentation::kWord64:
242         opcode = kMips64Sd;
243         break;
244       case MachineRepresentation::kNone:
245         UNREACHABLE();
246         return;
247     }
248 
249     if (g.CanBeImmediate(index, opcode)) {
250       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
251            g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
252     } else {
253       InstructionOperand addr_reg = g.TempRegister();
254       Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
255            g.UseRegister(index), g.UseRegister(base));
256       // Emit desired store opcode, using temp addr_reg.
257       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
258            addr_reg, g.TempImmediate(0), g.UseRegister(value));
259     }
260   }
261 }
262 
263 
VisitWord32And(Node * node)264 void InstructionSelector::VisitWord32And(Node* node) {
265   Mips64OperandGenerator g(this);
266   Int32BinopMatcher m(node);
267   if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
268       m.right().HasValue()) {
269     uint32_t mask = m.right().Value();
270     uint32_t mask_width = base::bits::CountPopulation32(mask);
271     uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
272     if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
273       // The mask must be contiguous, and occupy the least-significant bits.
274       DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
275 
276       // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
277       // significant bits.
278       Int32BinopMatcher mleft(m.left().node());
279       if (mleft.right().HasValue()) {
280         // Any shift value can match; int32 shifts use `value % 32`.
281         uint32_t lsb = mleft.right().Value() & 0x1f;
282 
283         // Ext cannot extract bits past the register size, however since
284         // shifting the original value would have introduced some zeros we can
285         // still use Ext with a smaller mask and the remaining bits will be
286         // zeros.
287         if (lsb + mask_width > 32) mask_width = 32 - lsb;
288 
289         Emit(kMips64Ext, g.DefineAsRegister(node),
290              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
291              g.TempImmediate(mask_width));
292         return;
293       }
294       // Other cases fall through to the normal And operation.
295     }
296   }
297   if (m.right().HasValue()) {
298     uint32_t mask = m.right().Value();
299     uint32_t shift = base::bits::CountPopulation32(~mask);
300     uint32_t msb = base::bits::CountLeadingZeros32(~mask);
301     if (shift != 0 && shift != 32 && msb + shift == 32) {
302       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
303       // and remove constant loading of inverted mask.
304       Emit(kMips64Ins, g.DefineSameAsFirst(node),
305            g.UseRegister(m.left().node()), g.TempImmediate(0),
306            g.TempImmediate(shift));
307       return;
308     }
309   }
310   VisitBinop(this, node, kMips64And);
311 }
312 
313 
VisitWord64And(Node * node)314 void InstructionSelector::VisitWord64And(Node* node) {
315   Mips64OperandGenerator g(this);
316   Int64BinopMatcher m(node);
317   if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
318       m.right().HasValue()) {
319     uint64_t mask = m.right().Value();
320     uint32_t mask_width = base::bits::CountPopulation64(mask);
321     uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
322     if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
323       // The mask must be contiguous, and occupy the least-significant bits.
324       DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
325 
326       // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
327       // significant bits.
328       Int64BinopMatcher mleft(m.left().node());
329       if (mleft.right().HasValue()) {
330         // Any shift value can match; int64 shifts use `value % 64`.
331         uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
332 
333         // Dext cannot extract bits past the register size, however since
334         // shifting the original value would have introduced some zeros we can
335         // still use Dext with a smaller mask and the remaining bits will be
336         // zeros.
337         if (lsb + mask_width > 64) mask_width = 64 - lsb;
338 
339         Emit(kMips64Dext, g.DefineAsRegister(node),
340              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
341              g.TempImmediate(static_cast<int32_t>(mask_width)));
342         return;
343       }
344       // Other cases fall through to the normal And operation.
345     }
346   }
347   if (m.right().HasValue()) {
348     uint64_t mask = m.right().Value();
349     uint32_t shift = base::bits::CountPopulation64(~mask);
350     uint32_t msb = base::bits::CountLeadingZeros64(~mask);
351     if (shift != 0 && shift < 32 && msb + shift == 64) {
352       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
353       // and remove constant loading of inverted mask. Dins cannot insert bits
354       // past word size, so shifts smaller than 32 are covered.
355       Emit(kMips64Dins, g.DefineSameAsFirst(node),
356            g.UseRegister(m.left().node()), g.TempImmediate(0),
357            g.TempImmediate(shift));
358       return;
359     }
360   }
361   VisitBinop(this, node, kMips64And);
362 }
363 
364 
VisitWord32Or(Node * node)365 void InstructionSelector::VisitWord32Or(Node* node) {
366   VisitBinop(this, node, kMips64Or);
367 }
368 
369 
VisitWord64Or(Node * node)370 void InstructionSelector::VisitWord64Or(Node* node) {
371   VisitBinop(this, node, kMips64Or);
372 }
373 
374 
VisitWord32Xor(Node * node)375 void InstructionSelector::VisitWord32Xor(Node* node) {
376   Int32BinopMatcher m(node);
377   if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
378       m.right().Is(-1)) {
379     Int32BinopMatcher mleft(m.left().node());
380     if (!mleft.right().HasValue()) {
381       Mips64OperandGenerator g(this);
382       Emit(kMips64Nor, g.DefineAsRegister(node),
383            g.UseRegister(mleft.left().node()),
384            g.UseRegister(mleft.right().node()));
385       return;
386     }
387   }
388   if (m.right().Is(-1)) {
389     // Use Nor for bit negation and eliminate constant loading for xori.
390     Mips64OperandGenerator g(this);
391     Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
392          g.TempImmediate(0));
393     return;
394   }
395   VisitBinop(this, node, kMips64Xor);
396 }
397 
398 
VisitWord64Xor(Node * node)399 void InstructionSelector::VisitWord64Xor(Node* node) {
400   Int64BinopMatcher m(node);
401   if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
402       m.right().Is(-1)) {
403     Int64BinopMatcher mleft(m.left().node());
404     if (!mleft.right().HasValue()) {
405       Mips64OperandGenerator g(this);
406       Emit(kMips64Nor, g.DefineAsRegister(node),
407            g.UseRegister(mleft.left().node()),
408            g.UseRegister(mleft.right().node()));
409       return;
410     }
411   }
412   if (m.right().Is(-1)) {
413     // Use Nor for bit negation and eliminate constant loading for xori.
414     Mips64OperandGenerator g(this);
415     Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
416          g.TempImmediate(0));
417     return;
418   }
419   VisitBinop(this, node, kMips64Xor);
420 }
421 
422 
VisitWord32Shl(Node * node)423 void InstructionSelector::VisitWord32Shl(Node* node) {
424   Int32BinopMatcher m(node);
425   if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
426       m.right().IsInRange(1, 31)) {
427     Mips64OperandGenerator g(this);
428     Int32BinopMatcher mleft(m.left().node());
429     // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
430     // contiguous, and the shift immediate non-zero.
431     if (mleft.right().HasValue()) {
432       uint32_t mask = mleft.right().Value();
433       uint32_t mask_width = base::bits::CountPopulation32(mask);
434       uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
435       if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
436         uint32_t shift = m.right().Value();
437         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
438         DCHECK_NE(0u, shift);
439         if ((shift + mask_width) >= 32) {
440           // If the mask is contiguous and reaches or extends beyond the top
441           // bit, only the shift is needed.
442           Emit(kMips64Shl, g.DefineAsRegister(node),
443                g.UseRegister(mleft.left().node()),
444                g.UseImmediate(m.right().node()));
445           return;
446         }
447       }
448     }
449   }
450   VisitRRO(this, kMips64Shl, node);
451 }
452 
453 
VisitWord32Shr(Node * node)454 void InstructionSelector::VisitWord32Shr(Node* node) {
455   Int32BinopMatcher m(node);
456   if (m.left().IsWord32And() && m.right().HasValue()) {
457     uint32_t lsb = m.right().Value() & 0x1f;
458     Int32BinopMatcher mleft(m.left().node());
459     if (mleft.right().HasValue()) {
460       // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
461       // shifted into the least-significant bits.
462       uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
463       unsigned mask_width = base::bits::CountPopulation32(mask);
464       unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
465       if ((mask_msb + mask_width + lsb) == 32) {
466         Mips64OperandGenerator g(this);
467         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
468         Emit(kMips64Ext, g.DefineAsRegister(node),
469              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
470              g.TempImmediate(mask_width));
471         return;
472       }
473     }
474   }
475   VisitRRO(this, kMips64Shr, node);
476 }
477 
478 
VisitWord32Sar(Node * node)479 void InstructionSelector::VisitWord32Sar(Node* node) {
480   VisitRRO(this, kMips64Sar, node);
481 }
482 
483 
VisitWord64Shl(Node * node)484 void InstructionSelector::VisitWord64Shl(Node* node) {
485   Mips64OperandGenerator g(this);
486   Int64BinopMatcher m(node);
487   if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
488       m.right().IsInRange(32, 63)) {
489     // There's no need to sign/zero-extend to 64-bit if we shift out the upper
490     // 32 bits anyway.
491     Emit(kMips64Dshl, g.DefineSameAsFirst(node),
492          g.UseRegister(m.left().node()->InputAt(0)),
493          g.UseImmediate(m.right().node()));
494     return;
495   }
496   if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
497       m.right().IsInRange(1, 63)) {
498     // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
499     // contiguous, and the shift immediate non-zero.
500     Int64BinopMatcher mleft(m.left().node());
501     if (mleft.right().HasValue()) {
502       uint64_t mask = mleft.right().Value();
503       uint32_t mask_width = base::bits::CountPopulation64(mask);
504       uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
505       if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
506         uint64_t shift = m.right().Value();
507         DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
508         DCHECK_NE(0u, shift);
509 
510         if ((shift + mask_width) >= 64) {
511           // If the mask is contiguous and reaches or extends beyond the top
512           // bit, only the shift is needed.
513           Emit(kMips64Dshl, g.DefineAsRegister(node),
514                g.UseRegister(mleft.left().node()),
515                g.UseImmediate(m.right().node()));
516           return;
517         }
518       }
519     }
520   }
521   VisitRRO(this, kMips64Dshl, node);
522 }
523 
524 
VisitWord64Shr(Node * node)525 void InstructionSelector::VisitWord64Shr(Node* node) {
526   Int64BinopMatcher m(node);
527   if (m.left().IsWord64And() && m.right().HasValue()) {
528     uint32_t lsb = m.right().Value() & 0x3f;
529     Int64BinopMatcher mleft(m.left().node());
530     if (mleft.right().HasValue()) {
531       // Select Dext for Shr(And(x, mask), imm) where the result of the mask is
532       // shifted into the least-significant bits.
533       uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
534       unsigned mask_width = base::bits::CountPopulation64(mask);
535       unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
536       if ((mask_msb + mask_width + lsb) == 64) {
537         Mips64OperandGenerator g(this);
538         DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
539         Emit(kMips64Dext, g.DefineAsRegister(node),
540              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
541              g.TempImmediate(mask_width));
542         return;
543       }
544     }
545   }
546   VisitRRO(this, kMips64Dshr, node);
547 }
548 
549 
VisitWord64Sar(Node * node)550 void InstructionSelector::VisitWord64Sar(Node* node) {
551   VisitRRO(this, kMips64Dsar, node);
552 }
553 
554 
VisitWord32Ror(Node * node)555 void InstructionSelector::VisitWord32Ror(Node* node) {
556   VisitRRO(this, kMips64Ror, node);
557 }
558 
559 
VisitWord32Clz(Node * node)560 void InstructionSelector::VisitWord32Clz(Node* node) {
561   VisitRR(this, kMips64Clz, node);
562 }
563 
564 
VisitWord32Ctz(Node * node)565 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
566 
567 
VisitWord64Ctz(Node * node)568 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
569 
570 
VisitWord32Popcnt(Node * node)571 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
572 
573 
VisitWord64Popcnt(Node * node)574 void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
575 
576 
VisitWord64Ror(Node * node)577 void InstructionSelector::VisitWord64Ror(Node* node) {
578   VisitRRO(this, kMips64Dror, node);
579 }
580 
581 
VisitWord64Clz(Node * node)582 void InstructionSelector::VisitWord64Clz(Node* node) {
583   VisitRR(this, kMips64Dclz, node);
584 }
585 
586 
VisitInt32Add(Node * node)587 void InstructionSelector::VisitInt32Add(Node* node) {
588   Mips64OperandGenerator g(this);
589   // TODO(plind): Consider multiply & add optimization from arm port.
590   VisitBinop(this, node, kMips64Add);
591 }
592 
593 
VisitInt64Add(Node * node)594 void InstructionSelector::VisitInt64Add(Node* node) {
595   Mips64OperandGenerator g(this);
596   // TODO(plind): Consider multiply & add optimization from arm port.
597   VisitBinop(this, node, kMips64Dadd);
598 }
599 
600 
VisitInt32Sub(Node * node)601 void InstructionSelector::VisitInt32Sub(Node* node) {
602   VisitBinop(this, node, kMips64Sub);
603 }
604 
605 
VisitInt64Sub(Node * node)606 void InstructionSelector::VisitInt64Sub(Node* node) {
607   VisitBinop(this, node, kMips64Dsub);
608 }
609 
610 
VisitInt32Mul(Node * node)611 void InstructionSelector::VisitInt32Mul(Node* node) {
612   Mips64OperandGenerator g(this);
613   Int32BinopMatcher m(node);
614   if (m.right().HasValue() && m.right().Value() > 0) {
615     int32_t value = m.right().Value();
616     if (base::bits::IsPowerOfTwo32(value)) {
617       Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
618            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
619            g.TempImmediate(WhichPowerOf2(value)));
620       return;
621     }
622     if (base::bits::IsPowerOfTwo32(value - 1)) {
623       InstructionOperand temp = g.TempRegister();
624       Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
625            g.UseRegister(m.left().node()),
626            g.TempImmediate(WhichPowerOf2(value - 1)));
627       Emit(kMips64Add | AddressingModeField::encode(kMode_None),
628            g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
629       return;
630     }
631     if (base::bits::IsPowerOfTwo32(value + 1)) {
632       InstructionOperand temp = g.TempRegister();
633       Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
634            g.UseRegister(m.left().node()),
635            g.TempImmediate(WhichPowerOf2(value + 1)));
636       Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
637            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
638       return;
639     }
640   }
641   Node* left = node->InputAt(0);
642   Node* right = node->InputAt(1);
643   if (CanCover(node, left) && CanCover(node, right)) {
644     if (left->opcode() == IrOpcode::kWord64Sar &&
645         right->opcode() == IrOpcode::kWord64Sar) {
646       Int64BinopMatcher leftInput(left), rightInput(right);
647       if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
648         // Combine untagging shifts with Dmul high.
649         Emit(kMips64DMulHigh, g.DefineSameAsFirst(node),
650              g.UseRegister(leftInput.left().node()),
651              g.UseRegister(rightInput.left().node()));
652         return;
653       }
654     }
655   }
656   VisitRRR(this, kMips64Mul, node);
657 }
658 
659 
VisitInt32MulHigh(Node * node)660 void InstructionSelector::VisitInt32MulHigh(Node* node) {
661   VisitRRR(this, kMips64MulHigh, node);
662 }
663 
664 
VisitUint32MulHigh(Node * node)665 void InstructionSelector::VisitUint32MulHigh(Node* node) {
666   VisitRRR(this, kMips64MulHighU, node);
667 }
668 
669 
VisitInt64Mul(Node * node)670 void InstructionSelector::VisitInt64Mul(Node* node) {
671   Mips64OperandGenerator g(this);
672   Int64BinopMatcher m(node);
673   // TODO(dusmil): Add optimization for shifts larger than 32.
674   if (m.right().HasValue() && m.right().Value() > 0) {
675     int32_t value = static_cast<int32_t>(m.right().Value());
676     if (base::bits::IsPowerOfTwo32(value)) {
677       Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
678            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
679            g.TempImmediate(WhichPowerOf2(value)));
680       return;
681     }
682     if (base::bits::IsPowerOfTwo32(value - 1)) {
683       InstructionOperand temp = g.TempRegister();
684       Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
685            g.UseRegister(m.left().node()),
686            g.TempImmediate(WhichPowerOf2(value - 1)));
687       Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
688            g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
689       return;
690     }
691     if (base::bits::IsPowerOfTwo32(value + 1)) {
692       InstructionOperand temp = g.TempRegister();
693       Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
694            g.UseRegister(m.left().node()),
695            g.TempImmediate(WhichPowerOf2(value + 1)));
696       Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
697            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
698       return;
699     }
700   }
701   Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
702        g.UseRegister(m.right().node()));
703 }
704 
705 
VisitInt32Div(Node * node)706 void InstructionSelector::VisitInt32Div(Node* node) {
707   Mips64OperandGenerator g(this);
708   Int32BinopMatcher m(node);
709   Node* left = node->InputAt(0);
710   Node* right = node->InputAt(1);
711   if (CanCover(node, left) && CanCover(node, right)) {
712     if (left->opcode() == IrOpcode::kWord64Sar &&
713         right->opcode() == IrOpcode::kWord64Sar) {
714       Int64BinopMatcher rightInput(right), leftInput(left);
715       if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
716         // Combine both shifted operands with Ddiv.
717         Emit(kMips64Ddiv, g.DefineSameAsFirst(node),
718              g.UseRegister(leftInput.left().node()),
719              g.UseRegister(rightInput.left().node()));
720         return;
721       }
722     }
723   }
724   Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
725        g.UseRegister(m.right().node()));
726 }
727 
728 
VisitUint32Div(Node * node)729 void InstructionSelector::VisitUint32Div(Node* node) {
730   Mips64OperandGenerator g(this);
731   Int32BinopMatcher m(node);
732   Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
733        g.UseRegister(m.right().node()));
734 }
735 
736 
VisitInt32Mod(Node * node)737 void InstructionSelector::VisitInt32Mod(Node* node) {
738   Mips64OperandGenerator g(this);
739   Int32BinopMatcher m(node);
740   Node* left = node->InputAt(0);
741   Node* right = node->InputAt(1);
742   if (CanCover(node, left) && CanCover(node, right)) {
743     if (left->opcode() == IrOpcode::kWord64Sar &&
744         right->opcode() == IrOpcode::kWord64Sar) {
745       Int64BinopMatcher rightInput(right), leftInput(left);
746       if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
747         // Combine both shifted operands with Dmod.
748         Emit(kMips64Dmod, g.DefineSameAsFirst(node),
749              g.UseRegister(leftInput.left().node()),
750              g.UseRegister(rightInput.left().node()));
751         return;
752       }
753     }
754   }
755   Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
756        g.UseRegister(m.right().node()));
757 }
758 
759 
VisitUint32Mod(Node * node)760 void InstructionSelector::VisitUint32Mod(Node* node) {
761   Mips64OperandGenerator g(this);
762   Int32BinopMatcher m(node);
763   Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
764        g.UseRegister(m.right().node()));
765 }
766 
767 
VisitInt64Div(Node * node)768 void InstructionSelector::VisitInt64Div(Node* node) {
769   Mips64OperandGenerator g(this);
770   Int64BinopMatcher m(node);
771   Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
772        g.UseRegister(m.right().node()));
773 }
774 
775 
VisitUint64Div(Node * node)776 void InstructionSelector::VisitUint64Div(Node* node) {
777   Mips64OperandGenerator g(this);
778   Int64BinopMatcher m(node);
779   Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
780        g.UseRegister(m.right().node()));
781 }
782 
783 
VisitInt64Mod(Node * node)784 void InstructionSelector::VisitInt64Mod(Node* node) {
785   Mips64OperandGenerator g(this);
786   Int64BinopMatcher m(node);
787   Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
788        g.UseRegister(m.right().node()));
789 }
790 
791 
VisitUint64Mod(Node * node)792 void InstructionSelector::VisitUint64Mod(Node* node) {
793   Mips64OperandGenerator g(this);
794   Int64BinopMatcher m(node);
795   Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
796        g.UseRegister(m.right().node()));
797 }
798 
799 
VisitChangeFloat32ToFloat64(Node * node)800 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
801   VisitRR(this, kMips64CvtDS, node);
802 }
803 
804 
VisitChangeInt32ToFloat64(Node * node)805 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
806   VisitRR(this, kMips64CvtDW, node);
807 }
808 
809 
VisitChangeUint32ToFloat64(Node * node)810 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
811   VisitRR(this, kMips64CvtDUw, node);
812 }
813 
814 
VisitChangeFloat64ToInt32(Node * node)815 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
816   Mips64OperandGenerator g(this);
817   Node* value = node->InputAt(0);
818   // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
819   // which does rounding and conversion to integer format.
820   if (CanCover(node, value)) {
821     switch (value->opcode()) {
822       case IrOpcode::kFloat64RoundDown:
823         Emit(kMips64FloorWD, g.DefineAsRegister(node),
824              g.UseRegister(value->InputAt(0)));
825         return;
826       case IrOpcode::kFloat64RoundUp:
827         Emit(kMips64CeilWD, g.DefineAsRegister(node),
828              g.UseRegister(value->InputAt(0)));
829         return;
830       case IrOpcode::kFloat64RoundTiesEven:
831         Emit(kMips64RoundWD, g.DefineAsRegister(node),
832              g.UseRegister(value->InputAt(0)));
833         return;
834       case IrOpcode::kFloat64RoundTruncate:
835         Emit(kMips64TruncWD, g.DefineAsRegister(node),
836              g.UseRegister(value->InputAt(0)));
837         return;
838       default:
839         break;
840     }
841     if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
842       Node* next = value->InputAt(0);
843       if (CanCover(value, next)) {
844         // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
845         switch (next->opcode()) {
846           case IrOpcode::kFloat32RoundDown:
847             Emit(kMips64FloorWS, g.DefineAsRegister(node),
848                  g.UseRegister(next->InputAt(0)));
849             return;
850           case IrOpcode::kFloat32RoundUp:
851             Emit(kMips64CeilWS, g.DefineAsRegister(node),
852                  g.UseRegister(next->InputAt(0)));
853             return;
854           case IrOpcode::kFloat32RoundTiesEven:
855             Emit(kMips64RoundWS, g.DefineAsRegister(node),
856                  g.UseRegister(next->InputAt(0)));
857             return;
858           case IrOpcode::kFloat32RoundTruncate:
859             Emit(kMips64TruncWS, g.DefineAsRegister(node),
860                  g.UseRegister(next->InputAt(0)));
861             return;
862           default:
863             Emit(kMips64TruncWS, g.DefineAsRegister(node),
864                  g.UseRegister(value->InputAt(0)));
865             return;
866         }
867       } else {
868         // Match float32 -> float64 -> int32 representation change path.
869         Emit(kMips64TruncWS, g.DefineAsRegister(node),
870              g.UseRegister(value->InputAt(0)));
871         return;
872       }
873     }
874   }
875   VisitRR(this, kMips64TruncWD, node);
876 }
877 
878 
VisitChangeFloat64ToUint32(Node * node)879 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
880   VisitRR(this, kMips64TruncUwD, node);
881 }
882 
883 
VisitTryTruncateFloat32ToInt64(Node * node)884 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
885   Mips64OperandGenerator g(this);
886   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
887   InstructionOperand outputs[2];
888   size_t output_count = 0;
889   outputs[output_count++] = g.DefineAsRegister(node);
890 
891   Node* success_output = NodeProperties::FindProjection(node, 1);
892   if (success_output) {
893     outputs[output_count++] = g.DefineAsRegister(success_output);
894   }
895 
896   this->Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
897 }
898 
899 
VisitTryTruncateFloat64ToInt64(Node * node)900 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
901   Mips64OperandGenerator g(this);
902   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
903   InstructionOperand outputs[2];
904   size_t output_count = 0;
905   outputs[output_count++] = g.DefineAsRegister(node);
906 
907   Node* success_output = NodeProperties::FindProjection(node, 1);
908   if (success_output) {
909     outputs[output_count++] = g.DefineAsRegister(success_output);
910   }
911 
912   Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
913 }
914 
915 
VisitTryTruncateFloat32ToUint64(Node * node)916 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
917   Mips64OperandGenerator g(this);
918   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
919   InstructionOperand outputs[2];
920   size_t output_count = 0;
921   outputs[output_count++] = g.DefineAsRegister(node);
922 
923   Node* success_output = NodeProperties::FindProjection(node, 1);
924   if (success_output) {
925     outputs[output_count++] = g.DefineAsRegister(success_output);
926   }
927 
928   Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
929 }
930 
931 
VisitTryTruncateFloat64ToUint64(Node * node)932 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
933   Mips64OperandGenerator g(this);
934 
935   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
936   InstructionOperand outputs[2];
937   size_t output_count = 0;
938   outputs[output_count++] = g.DefineAsRegister(node);
939 
940   Node* success_output = NodeProperties::FindProjection(node, 1);
941   if (success_output) {
942     outputs[output_count++] = g.DefineAsRegister(success_output);
943   }
944 
945   Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
946 }
947 
948 
VisitChangeInt32ToInt64(Node * node)949 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
950   Mips64OperandGenerator g(this);
951   Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
952        g.TempImmediate(0));
953 }
954 
955 
VisitChangeUint32ToUint64(Node * node)956 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
957   Mips64OperandGenerator g(this);
958   Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
959        g.TempImmediate(0), g.TempImmediate(32));
960 }
961 
962 
VisitTruncateInt64ToInt32(Node * node)963 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
964   Mips64OperandGenerator g(this);
965   Node* value = node->InputAt(0);
966   if (CanCover(node, value)) {
967     switch (value->opcode()) {
968       case IrOpcode::kWord64Sar: {
969         Int64BinopMatcher m(value);
970         if (m.right().IsInRange(32, 63)) {
971           // After smi untagging no need for truncate. Combine sequence.
972           Emit(kMips64Dsar, g.DefineSameAsFirst(node),
973                g.UseRegister(m.left().node()),
974                g.UseImmediate(m.right().node()));
975           return;
976         }
977         break;
978       }
979       default:
980         break;
981     }
982   }
983   Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
984        g.TempImmediate(0), g.TempImmediate(32));
985 }
986 
987 
VisitTruncateFloat64ToFloat32(Node * node)988 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
989   Mips64OperandGenerator g(this);
990   Node* value = node->InputAt(0);
991   // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
992   // instruction.
993   if (CanCover(node, value) &&
994       value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
995     Emit(kMips64CvtSW, g.DefineAsRegister(node),
996          g.UseRegister(value->InputAt(0)));
997     return;
998   }
999   VisitRR(this, kMips64CvtSD, node);
1000 }
1001 
1002 
VisitTruncateFloat64ToInt32(Node * node)1003 void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
1004   switch (TruncationModeOf(node->op())) {
1005     case TruncationMode::kJavaScript:
1006       return VisitRR(this, kArchTruncateDoubleToI, node);
1007     case TruncationMode::kRoundToZero:
1008       return VisitRR(this, kMips64TruncWD, node);
1009   }
1010   UNREACHABLE();
1011 }
1012 
1013 
VisitRoundInt64ToFloat32(Node * node)1014 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1015   VisitRR(this, kMips64CvtSL, node);
1016 }
1017 
1018 
VisitRoundInt64ToFloat64(Node * node)1019 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1020   VisitRR(this, kMips64CvtDL, node);
1021 }
1022 
1023 
VisitRoundUint64ToFloat32(Node * node)1024 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1025   VisitRR(this, kMips64CvtSUl, node);
1026 }
1027 
1028 
VisitRoundUint64ToFloat64(Node * node)1029 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1030   VisitRR(this, kMips64CvtDUl, node);
1031 }
1032 
1033 
VisitBitcastFloat32ToInt32(Node * node)1034 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1035   VisitRR(this, kMips64Float64ExtractLowWord32, node);
1036 }
1037 
1038 
VisitBitcastFloat64ToInt64(Node * node)1039 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1040   VisitRR(this, kMips64BitcastDL, node);
1041 }
1042 
1043 
VisitBitcastInt32ToFloat32(Node * node)1044 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1045   Mips64OperandGenerator g(this);
1046   Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
1047        ImmediateOperand(ImmediateOperand::INLINE, 0),
1048        g.UseRegister(node->InputAt(0)));
1049 }
1050 
1051 
VisitBitcastInt64ToFloat64(Node * node)1052 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1053   VisitRR(this, kMips64BitcastLD, node);
1054 }
1055 
1056 
VisitFloat32Add(Node * node)1057 void InstructionSelector::VisitFloat32Add(Node* node) {
1058   VisitRRR(this, kMips64AddS, node);
1059 }
1060 
1061 
VisitFloat64Add(Node * node)1062 void InstructionSelector::VisitFloat64Add(Node* node) {
1063   VisitRRR(this, kMips64AddD, node);
1064 }
1065 
1066 
VisitFloat32Sub(Node * node)1067 void InstructionSelector::VisitFloat32Sub(Node* node) {
1068   VisitRRR(this, kMips64SubS, node);
1069 }
1070 
1071 
VisitFloat64Sub(Node * node)1072 void InstructionSelector::VisitFloat64Sub(Node* node) {
1073   Mips64OperandGenerator g(this);
1074   Float64BinopMatcher m(node);
1075   if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
1076       CanCover(m.node(), m.right().node())) {
1077     if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
1078         CanCover(m.right().node(), m.right().InputAt(0))) {
1079       Float64BinopMatcher mright0(m.right().InputAt(0));
1080       if (mright0.left().IsMinusZero()) {
1081         Emit(kMips64Float64RoundUp, g.DefineAsRegister(node),
1082              g.UseRegister(mright0.right().node()));
1083         return;
1084       }
1085     }
1086   }
1087   VisitRRR(this, kMips64SubD, node);
1088 }
1089 
1090 
VisitFloat32Mul(Node * node)1091 void InstructionSelector::VisitFloat32Mul(Node* node) {
1092   VisitRRR(this, kMips64MulS, node);
1093 }
1094 
1095 
VisitFloat64Mul(Node * node)1096 void InstructionSelector::VisitFloat64Mul(Node* node) {
1097   VisitRRR(this, kMips64MulD, node);
1098 }
1099 
1100 
VisitFloat32Div(Node * node)1101 void InstructionSelector::VisitFloat32Div(Node* node) {
1102   VisitRRR(this, kMips64DivS, node);
1103 }
1104 
1105 
VisitFloat64Div(Node * node)1106 void InstructionSelector::VisitFloat64Div(Node* node) {
1107   VisitRRR(this, kMips64DivD, node);
1108 }
1109 
1110 
VisitFloat64Mod(Node * node)1111 void InstructionSelector::VisitFloat64Mod(Node* node) {
1112   Mips64OperandGenerator g(this);
1113   Emit(kMips64ModD, g.DefineAsFixed(node, f0),
1114        g.UseFixed(node->InputAt(0), f12),
1115        g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
1116 }
1117 
1118 
VisitFloat32Max(Node * node)1119 void InstructionSelector::VisitFloat32Max(Node* node) {
1120   Mips64OperandGenerator g(this);
1121   if (kArchVariant == kMips64r6) {
1122     Emit(kMips64Float32Max, g.DefineAsRegister(node),
1123          g.UseUniqueRegister(node->InputAt(0)),
1124          g.UseUniqueRegister(node->InputAt(1)));
1125 
1126   } else {
1127     // Reverse operands, and use same reg. for result and right operand.
1128     Emit(kMips64Float32Max, g.DefineSameAsFirst(node),
1129          g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
1130   }
1131 }
1132 
1133 
VisitFloat64Max(Node * node)1134 void InstructionSelector::VisitFloat64Max(Node* node) {
1135   Mips64OperandGenerator g(this);
1136   if (kArchVariant == kMips64r6) {
1137     Emit(kMips64Float64Max, g.DefineAsRegister(node),
1138          g.UseUniqueRegister(node->InputAt(0)),
1139          g.UseUniqueRegister(node->InputAt(1)));
1140 
1141   } else {
1142     // Reverse operands, and use same reg. for result and right operand.
1143     Emit(kMips64Float64Max, g.DefineSameAsFirst(node),
1144          g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
1145   }
1146 }
1147 
1148 
VisitFloat32Min(Node * node)1149 void InstructionSelector::VisitFloat32Min(Node* node) {
1150   Mips64OperandGenerator g(this);
1151   if (kArchVariant == kMips64r6) {
1152     Emit(kMips64Float32Min, g.DefineAsRegister(node),
1153          g.UseUniqueRegister(node->InputAt(0)),
1154          g.UseUniqueRegister(node->InputAt(1)));
1155 
1156   } else {
1157     // Reverse operands, and use same reg. for result and right operand.
1158     Emit(kMips64Float32Min, g.DefineSameAsFirst(node),
1159          g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
1160   }
1161 }
1162 
1163 
VisitFloat64Min(Node * node)1164 void InstructionSelector::VisitFloat64Min(Node* node) {
1165   Mips64OperandGenerator g(this);
1166   if (kArchVariant == kMips64r6) {
1167     Emit(kMips64Float64Min, g.DefineAsRegister(node),
1168          g.UseUniqueRegister(node->InputAt(0)),
1169          g.UseUniqueRegister(node->InputAt(1)));
1170 
1171   } else {
1172     // Reverse operands, and use same reg. for result and right operand.
1173     Emit(kMips64Float64Min, g.DefineSameAsFirst(node),
1174          g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
1175   }
1176 }
1177 
1178 
VisitFloat32Abs(Node * node)1179 void InstructionSelector::VisitFloat32Abs(Node* node) {
1180   VisitRR(this, kMips64AbsS, node);
1181 }
1182 
1183 
VisitFloat64Abs(Node * node)1184 void InstructionSelector::VisitFloat64Abs(Node* node) {
1185   VisitRR(this, kMips64AbsD, node);
1186 }
1187 
1188 
VisitFloat32Sqrt(Node * node)1189 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1190   VisitRR(this, kMips64SqrtS, node);
1191 }
1192 
1193 
VisitFloat64Sqrt(Node * node)1194 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1195   VisitRR(this, kMips64SqrtD, node);
1196 }
1197 
1198 
VisitFloat32RoundDown(Node * node)1199 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1200   VisitRR(this, kMips64Float32RoundDown, node);
1201 }
1202 
1203 
VisitFloat64RoundDown(Node * node)1204 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1205   VisitRR(this, kMips64Float64RoundDown, node);
1206 }
1207 
1208 
VisitFloat32RoundUp(Node * node)1209 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1210   VisitRR(this, kMips64Float32RoundUp, node);
1211 }
1212 
1213 
VisitFloat64RoundUp(Node * node)1214 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1215   VisitRR(this, kMips64Float64RoundUp, node);
1216 }
1217 
1218 
VisitFloat32RoundTruncate(Node * node)1219 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1220   VisitRR(this, kMips64Float32RoundTruncate, node);
1221 }
1222 
1223 
VisitFloat64RoundTruncate(Node * node)1224 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1225   VisitRR(this, kMips64Float64RoundTruncate, node);
1226 }
1227 
1228 
VisitFloat64RoundTiesAway(Node * node)1229 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1230   UNREACHABLE();
1231 }
1232 
1233 
VisitFloat32RoundTiesEven(Node * node)1234 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1235   VisitRR(this, kMips64Float32RoundTiesEven, node);
1236 }
1237 
1238 
VisitFloat64RoundTiesEven(Node * node)1239 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1240   VisitRR(this, kMips64Float64RoundTiesEven, node);
1241 }
1242 
1243 
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * descriptor,Node * node)1244 void InstructionSelector::EmitPrepareArguments(
1245     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1246     Node* node) {
1247   Mips64OperandGenerator g(this);
1248 
1249   // Prepare for C function call.
1250   if (descriptor->IsCFunctionCall()) {
1251     Emit(kArchPrepareCallCFunction |
1252              MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
1253          0, nullptr, 0, nullptr);
1254 
1255     // Poke any stack arguments.
1256     int slot = kCArgSlotCount;
1257     for (PushParameter input : (*arguments)) {
1258       Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
1259            g.TempImmediate(slot << kPointerSizeLog2));
1260       ++slot;
1261     }
1262   } else {
1263     int push_count = static_cast<int>(descriptor->StackParameterCount());
1264     if (push_count > 0) {
1265       Emit(kMips64StackClaim, g.NoOutput(),
1266            g.TempImmediate(push_count << kPointerSizeLog2));
1267     }
1268     for (size_t n = 0; n < arguments->size(); ++n) {
1269       PushParameter input = (*arguments)[n];
1270       if (input.node()) {
1271         Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
1272              g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
1273       }
1274     }
1275   }
1276 }
1277 
1278 
IsTailCallAddressImmediate()1279 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1280 
1281 
VisitCheckedLoad(Node * node)1282 void InstructionSelector::VisitCheckedLoad(Node* node) {
1283   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
1284   Mips64OperandGenerator g(this);
1285   Node* const buffer = node->InputAt(0);
1286   Node* const offset = node->InputAt(1);
1287   Node* const length = node->InputAt(2);
1288   ArchOpcode opcode = kArchNop;
1289   switch (load_rep.representation()) {
1290     case MachineRepresentation::kWord8:
1291       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
1292       break;
1293     case MachineRepresentation::kWord16:
1294       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
1295       break;
1296     case MachineRepresentation::kWord32:
1297       opcode = kCheckedLoadWord32;
1298       break;
1299     case MachineRepresentation::kWord64:
1300       opcode = kCheckedLoadWord64;
1301       break;
1302     case MachineRepresentation::kFloat32:
1303       opcode = kCheckedLoadFloat32;
1304       break;
1305     case MachineRepresentation::kFloat64:
1306       opcode = kCheckedLoadFloat64;
1307       break;
1308     case MachineRepresentation::kBit:
1309     case MachineRepresentation::kTagged:
1310     case MachineRepresentation::kNone:
1311       UNREACHABLE();
1312       return;
1313   }
1314   InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
1315                                           ? g.UseImmediate(offset)
1316                                           : g.UseRegister(offset);
1317 
1318   InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
1319                                           ? g.CanBeImmediate(length, opcode)
1320                                                 ? g.UseImmediate(length)
1321                                                 : g.UseRegister(length)
1322                                           : g.UseRegister(length);
1323 
1324   Emit(opcode | AddressingModeField::encode(kMode_MRI),
1325        g.DefineAsRegister(node), offset_operand, length_operand,
1326        g.UseRegister(buffer));
1327 }
1328 
1329 
VisitCheckedStore(Node * node)1330 void InstructionSelector::VisitCheckedStore(Node* node) {
1331   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
1332   Mips64OperandGenerator g(this);
1333   Node* const buffer = node->InputAt(0);
1334   Node* const offset = node->InputAt(1);
1335   Node* const length = node->InputAt(2);
1336   Node* const value = node->InputAt(3);
1337   ArchOpcode opcode = kArchNop;
1338   switch (rep) {
1339     case MachineRepresentation::kWord8:
1340       opcode = kCheckedStoreWord8;
1341       break;
1342     case MachineRepresentation::kWord16:
1343       opcode = kCheckedStoreWord16;
1344       break;
1345     case MachineRepresentation::kWord32:
1346       opcode = kCheckedStoreWord32;
1347       break;
1348     case MachineRepresentation::kWord64:
1349       opcode = kCheckedStoreWord64;
1350       break;
1351     case MachineRepresentation::kFloat32:
1352       opcode = kCheckedStoreFloat32;
1353       break;
1354     case MachineRepresentation::kFloat64:
1355       opcode = kCheckedStoreFloat64;
1356       break;
1357     case MachineRepresentation::kBit:
1358     case MachineRepresentation::kTagged:
1359     case MachineRepresentation::kNone:
1360       UNREACHABLE();
1361       return;
1362   }
1363   InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
1364                                           ? g.UseImmediate(offset)
1365                                           : g.UseRegister(offset);
1366 
1367   InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
1368                                           ? g.CanBeImmediate(length, opcode)
1369                                                 ? g.UseImmediate(length)
1370                                                 : g.UseRegister(length)
1371                                           : g.UseRegister(length);
1372 
1373   Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1374        offset_operand, length_operand, g.UseRegister(value),
1375        g.UseRegister(buffer));
1376 }
1377 
1378 
1379 namespace {
1380 
1381 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1382 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1383                          InstructionOperand left, InstructionOperand right,
1384                          FlagsContinuation* cont) {
1385   Mips64OperandGenerator g(selector);
1386   opcode = cont->Encode(opcode);
1387   if (cont->IsBranch()) {
1388     selector->Emit(opcode, g.NoOutput(), left, right,
1389                    g.Label(cont->true_block()), g.Label(cont->false_block()));
1390   } else {
1391     DCHECK(cont->IsSet());
1392     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1393   }
1394 }
1395 
1396 
1397 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1398 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1399                          FlagsContinuation* cont) {
1400   Mips64OperandGenerator g(selector);
1401   Float32BinopMatcher m(node);
1402   InstructionOperand lhs, rhs;
1403 
1404   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1405                           : g.UseRegister(m.left().node());
1406   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1407                            : g.UseRegister(m.right().node());
1408   VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
1409 }
1410 
1411 
1412 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1413 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1414                          FlagsContinuation* cont) {
1415   Mips64OperandGenerator g(selector);
1416   Float64BinopMatcher m(node);
1417   InstructionOperand lhs, rhs;
1418 
1419   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1420                           : g.UseRegister(m.left().node());
1421   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1422                            : g.UseRegister(m.right().node());
1423   VisitCompare(selector, kMips64CmpD, lhs, rhs, cont);
1424 }
1425 
1426 
1427 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative)1428 void VisitWordCompare(InstructionSelector* selector, Node* node,
1429                       InstructionCode opcode, FlagsContinuation* cont,
1430                       bool commutative) {
1431   Mips64OperandGenerator g(selector);
1432   Node* left = node->InputAt(0);
1433   Node* right = node->InputAt(1);
1434 
1435   // Match immediates on left or right side of comparison.
1436   if (g.CanBeImmediate(right, opcode)) {
1437     switch (cont->condition()) {
1438       case kEqual:
1439       case kNotEqual:
1440         if (cont->IsSet()) {
1441           VisitCompare(selector, opcode, g.UseRegister(left),
1442                        g.UseImmediate(right), cont);
1443         } else {
1444           VisitCompare(selector, opcode, g.UseRegister(left),
1445                        g.UseRegister(right), cont);
1446         }
1447         break;
1448       case kSignedLessThan:
1449       case kSignedGreaterThanOrEqual:
1450       case kUnsignedLessThan:
1451       case kUnsignedGreaterThanOrEqual:
1452         VisitCompare(selector, opcode, g.UseRegister(left),
1453                      g.UseImmediate(right), cont);
1454         break;
1455       default:
1456         VisitCompare(selector, opcode, g.UseRegister(left),
1457                      g.UseRegister(right), cont);
1458     }
1459   } else if (g.CanBeImmediate(left, opcode)) {
1460     if (!commutative) cont->Commute();
1461     switch (cont->condition()) {
1462       case kEqual:
1463       case kNotEqual:
1464         if (cont->IsSet()) {
1465           VisitCompare(selector, opcode, g.UseRegister(right),
1466                        g.UseImmediate(left), cont);
1467         } else {
1468           VisitCompare(selector, opcode, g.UseRegister(right),
1469                        g.UseRegister(left), cont);
1470         }
1471         break;
1472       case kSignedLessThan:
1473       case kSignedGreaterThanOrEqual:
1474       case kUnsignedLessThan:
1475       case kUnsignedGreaterThanOrEqual:
1476         VisitCompare(selector, opcode, g.UseRegister(right),
1477                      g.UseImmediate(left), cont);
1478         break;
1479       default:
1480         VisitCompare(selector, opcode, g.UseRegister(right),
1481                      g.UseRegister(left), cont);
1482     }
1483   } else {
1484     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1485                  cont);
1486   }
1487 }
1488 
1489 
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1490 void VisitWord32Compare(InstructionSelector* selector, Node* node,
1491                         FlagsContinuation* cont) {
1492   VisitWordCompare(selector, node, kMips64Cmp, cont, false);
1493 }
1494 
1495 
VisitWord64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1496 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1497                         FlagsContinuation* cont) {
1498   VisitWordCompare(selector, node, kMips64Cmp, cont, false);
1499 }
1500 
1501 }  // namespace
1502 
1503 
EmitWordCompareZero(InstructionSelector * selector,Node * value,FlagsContinuation * cont)1504 void EmitWordCompareZero(InstructionSelector* selector, Node* value,
1505                          FlagsContinuation* cont) {
1506   Mips64OperandGenerator g(selector);
1507   InstructionCode opcode = cont->Encode(kMips64Cmp);
1508   InstructionOperand const value_operand = g.UseRegister(value);
1509   if (cont->IsBranch()) {
1510     selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
1511                    g.Label(cont->true_block()), g.Label(cont->false_block()));
1512   } else {
1513     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
1514                    g.TempImmediate(0));
1515   }
1516 }
1517 
1518 
1519 // Shared routine for word comparisons against zero.
VisitWordCompareZero(InstructionSelector * selector,Node * user,Node * value,FlagsContinuation * cont)1520 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1521                           Node* value, FlagsContinuation* cont) {
1522   while (selector->CanCover(user, value)) {
1523     switch (value->opcode()) {
1524       case IrOpcode::kWord32Equal: {
1525         // Combine with comparisons against 0 by simply inverting the
1526         // continuation.
1527         Int32BinopMatcher m(value);
1528         if (m.right().Is(0)) {
1529           user = value;
1530           value = m.left().node();
1531           cont->Negate();
1532           continue;
1533         }
1534         cont->OverwriteAndNegateIfEqual(kEqual);
1535         return VisitWord32Compare(selector, value, cont);
1536       }
1537       case IrOpcode::kInt32LessThan:
1538         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1539         return VisitWord32Compare(selector, value, cont);
1540       case IrOpcode::kInt32LessThanOrEqual:
1541         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1542         return VisitWord32Compare(selector, value, cont);
1543       case IrOpcode::kUint32LessThan:
1544         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1545         return VisitWord32Compare(selector, value, cont);
1546       case IrOpcode::kUint32LessThanOrEqual:
1547         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1548         return VisitWord32Compare(selector, value, cont);
1549       case IrOpcode::kWord64Equal: {
1550         // Combine with comparisons against 0 by simply inverting the
1551         // continuation.
1552         Int64BinopMatcher m(value);
1553         if (m.right().Is(0)) {
1554           user = value;
1555           value = m.left().node();
1556           cont->Negate();
1557           continue;
1558         }
1559         cont->OverwriteAndNegateIfEqual(kEqual);
1560         return VisitWord64Compare(selector, value, cont);
1561       }
1562       case IrOpcode::kInt64LessThan:
1563         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1564         return VisitWord64Compare(selector, value, cont);
1565       case IrOpcode::kInt64LessThanOrEqual:
1566         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1567         return VisitWord64Compare(selector, value, cont);
1568       case IrOpcode::kUint64LessThan:
1569         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1570         return VisitWord64Compare(selector, value, cont);
1571       case IrOpcode::kUint64LessThanOrEqual:
1572         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1573         return VisitWord64Compare(selector, value, cont);
1574       case IrOpcode::kFloat32Equal:
1575         cont->OverwriteAndNegateIfEqual(kEqual);
1576         return VisitFloat32Compare(selector, value, cont);
1577       case IrOpcode::kFloat32LessThan:
1578         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1579         return VisitFloat32Compare(selector, value, cont);
1580       case IrOpcode::kFloat32LessThanOrEqual:
1581         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1582         return VisitFloat32Compare(selector, value, cont);
1583       case IrOpcode::kFloat64Equal:
1584         cont->OverwriteAndNegateIfEqual(kEqual);
1585         return VisitFloat64Compare(selector, value, cont);
1586       case IrOpcode::kFloat64LessThan:
1587         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1588         return VisitFloat64Compare(selector, value, cont);
1589       case IrOpcode::kFloat64LessThanOrEqual:
1590         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1591         return VisitFloat64Compare(selector, value, cont);
1592       case IrOpcode::kProjection:
1593         // Check if this is the overflow output projection of an
1594         // <Operation>WithOverflow node.
1595         if (ProjectionIndexOf(value->op()) == 1u) {
1596           // We cannot combine the <Operation>WithOverflow with this branch
1597           // unless the 0th projection (the use of the actual value of the
1598           // <Operation> is either nullptr, which means there's no use of the
1599           // actual value, or was already defined, which means it is scheduled
1600           // *AFTER* this branch).
1601           Node* const node = value->InputAt(0);
1602           Node* const result = NodeProperties::FindProjection(node, 0);
1603           if (result == nullptr || selector->IsDefined(result)) {
1604             switch (node->opcode()) {
1605               case IrOpcode::kInt32AddWithOverflow:
1606                 cont->OverwriteAndNegateIfEqual(kOverflow);
1607                 return VisitBinop(selector, node, kMips64Dadd, cont);
1608               case IrOpcode::kInt32SubWithOverflow:
1609                 cont->OverwriteAndNegateIfEqual(kOverflow);
1610                 return VisitBinop(selector, node, kMips64Dsub, cont);
1611               case IrOpcode::kInt64AddWithOverflow:
1612                 cont->OverwriteAndNegateIfEqual(kOverflow);
1613                 return VisitBinop(selector, node, kMips64DaddOvf, cont);
1614               case IrOpcode::kInt64SubWithOverflow:
1615                 cont->OverwriteAndNegateIfEqual(kOverflow);
1616                 return VisitBinop(selector, node, kMips64DsubOvf, cont);
1617               default:
1618                 break;
1619             }
1620           }
1621         }
1622         break;
1623       case IrOpcode::kWord32And:
1624       case IrOpcode::kWord64And:
1625         return VisitWordCompare(selector, value, kMips64Tst, cont, true);
1626       default:
1627         break;
1628     }
1629     break;
1630   }
1631 
1632   // Continuation could not be combined with a compare, emit compare against 0.
1633   EmitWordCompareZero(selector, value, cont);
1634 }
1635 
1636 
VisitBranch(Node * branch,BasicBlock * tbranch,BasicBlock * fbranch)1637 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1638                                       BasicBlock* fbranch) {
1639   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1640   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
1641 }
1642 
1643 
VisitSwitch(Node * node,const SwitchInfo & sw)1644 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1645   Mips64OperandGenerator g(this);
1646   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1647 
1648   // Emit either ArchTableSwitch or ArchLookupSwitch.
1649   size_t table_space_cost = 10 + 2 * sw.value_range;
1650   size_t table_time_cost = 3;
1651   size_t lookup_space_cost = 2 + 2 * sw.case_count;
1652   size_t lookup_time_cost = sw.case_count;
1653   if (sw.case_count > 0 &&
1654       table_space_cost + 3 * table_time_cost <=
1655           lookup_space_cost + 3 * lookup_time_cost &&
1656       sw.min_value > std::numeric_limits<int32_t>::min()) {
1657     InstructionOperand index_operand = value_operand;
1658     if (sw.min_value) {
1659       index_operand = g.TempRegister();
1660       Emit(kMips64Sub, index_operand, value_operand,
1661            g.TempImmediate(sw.min_value));
1662     }
1663     // Generate a table lookup.
1664     return EmitTableSwitch(sw, index_operand);
1665   }
1666 
1667   // Generate a sequence of conditional jumps.
1668   return EmitLookupSwitch(sw, value_operand);
1669 }
1670 
1671 
VisitWord32Equal(Node * const node)1672 void InstructionSelector::VisitWord32Equal(Node* const node) {
1673   FlagsContinuation cont(kEqual, node);
1674   Int32BinopMatcher m(node);
1675   if (m.right().Is(0)) {
1676     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1677   }
1678 
1679   VisitWord32Compare(this, node, &cont);
1680 }
1681 
1682 
VisitInt32LessThan(Node * node)1683 void InstructionSelector::VisitInt32LessThan(Node* node) {
1684   FlagsContinuation cont(kSignedLessThan, node);
1685   VisitWord32Compare(this, node, &cont);
1686 }
1687 
1688 
VisitInt32LessThanOrEqual(Node * node)1689 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1690   FlagsContinuation cont(kSignedLessThanOrEqual, node);
1691   VisitWord32Compare(this, node, &cont);
1692 }
1693 
1694 
VisitUint32LessThan(Node * node)1695 void InstructionSelector::VisitUint32LessThan(Node* node) {
1696   FlagsContinuation cont(kUnsignedLessThan, node);
1697   VisitWord32Compare(this, node, &cont);
1698 }
1699 
1700 
VisitUint32LessThanOrEqual(Node * node)1701 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1702   FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1703   VisitWord32Compare(this, node, &cont);
1704 }
1705 
1706 
VisitInt32AddWithOverflow(Node * node)1707 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1708   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1709     FlagsContinuation cont(kOverflow, ovf);
1710     return VisitBinop(this, node, kMips64Dadd, &cont);
1711   }
1712   FlagsContinuation cont;
1713   VisitBinop(this, node, kMips64Dadd, &cont);
1714 }
1715 
1716 
VisitInt32SubWithOverflow(Node * node)1717 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1718   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1719     FlagsContinuation cont(kOverflow, ovf);
1720     return VisitBinop(this, node, kMips64Dsub, &cont);
1721   }
1722   FlagsContinuation cont;
1723   VisitBinop(this, node, kMips64Dsub, &cont);
1724 }
1725 
1726 
VisitInt64AddWithOverflow(Node * node)1727 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
1728   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1729     FlagsContinuation cont(kOverflow, ovf);
1730     return VisitBinop(this, node, kMips64DaddOvf, &cont);
1731   }
1732   FlagsContinuation cont;
1733   VisitBinop(this, node, kMips64DaddOvf, &cont);
1734 }
1735 
1736 
VisitInt64SubWithOverflow(Node * node)1737 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
1738   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1739     FlagsContinuation cont(kOverflow, ovf);
1740     return VisitBinop(this, node, kMips64DsubOvf, &cont);
1741   }
1742   FlagsContinuation cont;
1743   VisitBinop(this, node, kMips64DsubOvf, &cont);
1744 }
1745 
1746 
VisitWord64Equal(Node * const node)1747 void InstructionSelector::VisitWord64Equal(Node* const node) {
1748   FlagsContinuation cont(kEqual, node);
1749   Int64BinopMatcher m(node);
1750   if (m.right().Is(0)) {
1751     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1752   }
1753 
1754   VisitWord64Compare(this, node, &cont);
1755 }
1756 
1757 
VisitInt64LessThan(Node * node)1758 void InstructionSelector::VisitInt64LessThan(Node* node) {
1759   FlagsContinuation cont(kSignedLessThan, node);
1760   VisitWord64Compare(this, node, &cont);
1761 }
1762 
1763 
VisitInt64LessThanOrEqual(Node * node)1764 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1765   FlagsContinuation cont(kSignedLessThanOrEqual, node);
1766   VisitWord64Compare(this, node, &cont);
1767 }
1768 
1769 
VisitUint64LessThan(Node * node)1770 void InstructionSelector::VisitUint64LessThan(Node* node) {
1771   FlagsContinuation cont(kUnsignedLessThan, node);
1772   VisitWord64Compare(this, node, &cont);
1773 }
1774 
1775 
VisitUint64LessThanOrEqual(Node * node)1776 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
1777   FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1778   VisitWord64Compare(this, node, &cont);
1779 }
1780 
1781 
VisitFloat32Equal(Node * node)1782 void InstructionSelector::VisitFloat32Equal(Node* node) {
1783   FlagsContinuation cont(kEqual, node);
1784   VisitFloat32Compare(this, node, &cont);
1785 }
1786 
1787 
VisitFloat32LessThan(Node * node)1788 void InstructionSelector::VisitFloat32LessThan(Node* node) {
1789   FlagsContinuation cont(kUnsignedLessThan, node);
1790   VisitFloat32Compare(this, node, &cont);
1791 }
1792 
1793 
VisitFloat32LessThanOrEqual(Node * node)1794 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1795   FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1796   VisitFloat32Compare(this, node, &cont);
1797 }
1798 
1799 
VisitFloat64Equal(Node * node)1800 void InstructionSelector::VisitFloat64Equal(Node* node) {
1801   FlagsContinuation cont(kEqual, node);
1802   VisitFloat64Compare(this, node, &cont);
1803 }
1804 
1805 
VisitFloat64LessThan(Node * node)1806 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1807   FlagsContinuation cont(kUnsignedLessThan, node);
1808   VisitFloat64Compare(this, node, &cont);
1809 }
1810 
1811 
VisitFloat64LessThanOrEqual(Node * node)1812 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1813   FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1814   VisitFloat64Compare(this, node, &cont);
1815 }
1816 
1817 
VisitFloat64ExtractLowWord32(Node * node)1818 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1819   VisitRR(this, kMips64Float64ExtractLowWord32, node);
1820 }
1821 
1822 
VisitFloat64ExtractHighWord32(Node * node)1823 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1824   VisitRR(this, kMips64Float64ExtractHighWord32, node);
1825 }
1826 
1827 
VisitFloat64InsertLowWord32(Node * node)1828 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1829   Mips64OperandGenerator g(this);
1830   Node* left = node->InputAt(0);
1831   Node* right = node->InputAt(1);
1832   Emit(kMips64Float64InsertLowWord32, g.DefineSameAsFirst(node),
1833        g.UseRegister(left), g.UseRegister(right));
1834 }
1835 
1836 
VisitFloat64InsertHighWord32(Node * node)1837 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1838   Mips64OperandGenerator g(this);
1839   Node* left = node->InputAt(0);
1840   Node* right = node->InputAt(1);
1841   Emit(kMips64Float64InsertHighWord32, g.DefineSameAsFirst(node),
1842        g.UseRegister(left), g.UseRegister(right));
1843 }
1844 
1845 
1846 // static
1847 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()1848 InstructionSelector::SupportedMachineOperatorFlags() {
1849   return MachineOperatorBuilder::kWord32ShiftIsSafe |
1850          MachineOperatorBuilder::kInt32DivIsSafe |
1851          MachineOperatorBuilder::kUint32DivIsSafe |
1852          MachineOperatorBuilder::kFloat64Min |
1853          MachineOperatorBuilder::kFloat64Max |
1854          MachineOperatorBuilder::kFloat32Min |
1855          MachineOperatorBuilder::kFloat32Max |
1856          MachineOperatorBuilder::kFloat64RoundDown |
1857          MachineOperatorBuilder::kFloat32RoundDown |
1858          MachineOperatorBuilder::kFloat64RoundUp |
1859          MachineOperatorBuilder::kFloat32RoundUp |
1860          MachineOperatorBuilder::kFloat64RoundTruncate |
1861          MachineOperatorBuilder::kFloat32RoundTruncate |
1862          MachineOperatorBuilder::kFloat64RoundTiesEven |
1863          MachineOperatorBuilder::kFloat32RoundTiesEven;
1864 }
1865 
1866 }  // namespace compiler
1867 }  // namespace internal
1868 }  // namespace v8
1869