1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/adapters.h"
6 #include "src/base/bits.h"
7 #include "src/compiler/instruction-selector-impl.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/compiler/node-properties.h"
10 
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14 
15 #define TRACE_UNIMPL() \
16   PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
17 
18 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
19 
20 
21 // Adds Mips-specific methods for generating InstructionOperands.
22 class MipsOperandGenerator final : public OperandGenerator {
23  public:
MipsOperandGenerator(InstructionSelector * selector)24   explicit MipsOperandGenerator(InstructionSelector* selector)
25       : OperandGenerator(selector) {}
26 
UseOperand(Node * node,InstructionCode opcode)27   InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
28     if (CanBeImmediate(node, opcode)) {
29       return UseImmediate(node);
30     }
31     return UseRegister(node);
32   }
33 
34   // Use the zero register if the node has the immediate value zero, otherwise
35   // assign a register.
UseRegisterOrImmediateZero(Node * node)36   InstructionOperand UseRegisterOrImmediateZero(Node* node) {
37     if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
38         (IsFloatConstant(node) &&
39          (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
40       return UseImmediate(node);
41     }
42     return UseRegister(node);
43   }
44 
IsIntegerConstant(Node * node)45   bool IsIntegerConstant(Node* node) {
46     return (node->opcode() == IrOpcode::kInt32Constant);
47   }
48 
GetIntegerConstantValue(Node * node)49   int64_t GetIntegerConstantValue(Node* node) {
50     DCHECK(node->opcode() == IrOpcode::kInt32Constant);
51     return OpParameter<int32_t>(node);
52   }
53 
IsFloatConstant(Node * node)54   bool IsFloatConstant(Node* node) {
55     return (node->opcode() == IrOpcode::kFloat32Constant) ||
56            (node->opcode() == IrOpcode::kFloat64Constant);
57   }
58 
GetFloatConstantValue(Node * node)59   double GetFloatConstantValue(Node* node) {
60     if (node->opcode() == IrOpcode::kFloat32Constant) {
61       return OpParameter<float>(node);
62     }
63     DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
64     return OpParameter<double>(node);
65   }
66 
CanBeImmediate(Node * node,InstructionCode opcode)67   bool CanBeImmediate(Node* node, InstructionCode opcode) {
68     Int32Matcher m(node);
69     if (!m.HasValue()) return false;
70     int32_t value = m.Value();
71     switch (ArchOpcodeField::decode(opcode)) {
72       case kMipsShl:
73       case kMipsSar:
74       case kMipsShr:
75         return is_uint5(value);
76       case kMipsAdd:
77       case kMipsAnd:
78       case kMipsOr:
79       case kMipsTst:
80       case kMipsSub:
81       case kMipsXor:
82         return is_uint16(value);
83       case kMipsLb:
84       case kMipsLbu:
85       case kMipsSb:
86       case kMipsLh:
87       case kMipsLhu:
88       case kMipsSh:
89       case kMipsLw:
90       case kMipsSw:
91       case kMipsLwc1:
92       case kMipsSwc1:
93       case kMipsLdc1:
94       case kMipsSdc1:
95       case kCheckedLoadInt8:
96       case kCheckedLoadUint8:
97       case kCheckedLoadInt16:
98       case kCheckedLoadUint16:
99       case kCheckedLoadWord32:
100       case kCheckedStoreWord8:
101       case kCheckedStoreWord16:
102       case kCheckedStoreWord32:
103       case kCheckedLoadFloat32:
104       case kCheckedLoadFloat64:
105       case kCheckedStoreFloat32:
106       case kCheckedStoreFloat64:
107         // true even for 32b values, offsets > 16b
108         // are handled in assembler-mips.cc
109         return is_int32(value);
110       default:
111         return is_int16(value);
112     }
113   }
114 
115  private:
ImmediateFitsAddrMode1Instruction(int32_t imm) const116   bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
117     TRACE_UNIMPL();
118     return false;
119   }
120 };
121 
122 
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)123 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
124                      Node* node) {
125   MipsOperandGenerator g(selector);
126   selector->Emit(opcode, g.DefineAsRegister(node),
127                  g.UseRegister(node->InputAt(0)),
128                  g.UseRegister(node->InputAt(1)));
129 }
130 
131 
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)132 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
133                     Node* node) {
134   MipsOperandGenerator g(selector);
135   selector->Emit(opcode, g.DefineAsRegister(node),
136                  g.UseRegister(node->InputAt(0)));
137 }
138 
139 
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node)140 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
141                      Node* node) {
142   MipsOperandGenerator g(selector);
143   selector->Emit(opcode, g.DefineAsRegister(node),
144                  g.UseRegister(node->InputAt(0)),
145                  g.UseOperand(node->InputAt(1), opcode));
146 }
147 
TryMatchImmediate(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,size_t * input_count_return,InstructionOperand * inputs)148 bool TryMatchImmediate(InstructionSelector* selector,
149                        InstructionCode* opcode_return, Node* node,
150                        size_t* input_count_return, InstructionOperand* inputs) {
151   MipsOperandGenerator g(selector);
152   if (g.CanBeImmediate(node, *opcode_return)) {
153     *opcode_return |= AddressingModeField::encode(kMode_MRI);
154     inputs[0] = g.UseImmediate(node);
155     *input_count_return = 1;
156     return true;
157   }
158   return false;
159 }
160 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode,FlagsContinuation * cont)161 static void VisitBinop(InstructionSelector* selector, Node* node,
162                        InstructionCode opcode, bool has_reverse_opcode,
163                        InstructionCode reverse_opcode,
164                        FlagsContinuation* cont) {
165   MipsOperandGenerator g(selector);
166   Int32BinopMatcher m(node);
167   InstructionOperand inputs[4];
168   size_t input_count = 0;
169   InstructionOperand outputs[2];
170   size_t output_count = 0;
171 
172   if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
173                         &inputs[1])) {
174     inputs[0] = g.UseRegister(m.left().node());
175     input_count++;
176   }
177   if (has_reverse_opcode &&
178       TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
179                         &input_count, &inputs[1])) {
180     inputs[0] = g.UseRegister(m.right().node());
181     opcode = reverse_opcode;
182     input_count++;
183   } else {
184     inputs[input_count++] = g.UseRegister(m.left().node());
185     inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
186   }
187 
188   if (cont->IsBranch()) {
189     inputs[input_count++] = g.Label(cont->true_block());
190     inputs[input_count++] = g.Label(cont->false_block());
191   }
192 
193   if (cont->IsDeoptimize()) {
194     // If we can deoptimize as a result of the binop, we need to make sure that
195     // the deopt inputs are not overwritten by the binop result. One way
196     // to achieve that is to declare the output register as same-as-first.
197     outputs[output_count++] = g.DefineSameAsFirst(node);
198   } else {
199     outputs[output_count++] = g.DefineAsRegister(node);
200   }
201   if (cont->IsSet()) {
202     outputs[output_count++] = g.DefineAsRegister(cont->result());
203   }
204 
205   DCHECK_NE(0u, input_count);
206   DCHECK_NE(0u, output_count);
207   DCHECK_GE(arraysize(inputs), input_count);
208   DCHECK_GE(arraysize(outputs), output_count);
209 
210   opcode = cont->Encode(opcode);
211   if (cont->IsDeoptimize()) {
212     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
213                              cont->reason(), cont->frame_state());
214   } else {
215     selector->Emit(opcode, output_count, outputs, input_count, inputs);
216   }
217 }
218 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode)219 static void VisitBinop(InstructionSelector* selector, Node* node,
220                        InstructionCode opcode, bool has_reverse_opcode,
221                        InstructionCode reverse_opcode) {
222   FlagsContinuation cont;
223   VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
224 }
225 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)226 static void VisitBinop(InstructionSelector* selector, Node* node,
227                        InstructionCode opcode, FlagsContinuation* cont) {
228   VisitBinop(selector, node, opcode, false, kArchNop, cont);
229 }
230 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)231 static void VisitBinop(InstructionSelector* selector, Node* node,
232                        InstructionCode opcode) {
233   VisitBinop(selector, node, opcode, false, kArchNop);
234 }
235 
236 
VisitLoad(Node * node)237 void InstructionSelector::VisitLoad(Node* node) {
238   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
239   MipsOperandGenerator g(this);
240   Node* base = node->InputAt(0);
241   Node* index = node->InputAt(1);
242 
243   ArchOpcode opcode = kArchNop;
244   switch (load_rep.representation()) {
245     case MachineRepresentation::kFloat32:
246       opcode = kMipsLwc1;
247       break;
248     case MachineRepresentation::kFloat64:
249       opcode = kMipsLdc1;
250       break;
251     case MachineRepresentation::kBit:  // Fall through.
252     case MachineRepresentation::kWord8:
253       opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
254       break;
255     case MachineRepresentation::kWord16:
256       opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh;
257       break;
258     case MachineRepresentation::kTaggedSigned:   // Fall through.
259     case MachineRepresentation::kTaggedPointer:  // Fall through.
260     case MachineRepresentation::kTagged:  // Fall through.
261     case MachineRepresentation::kWord32:
262       opcode = kMipsLw;
263       break;
264     case MachineRepresentation::kWord64:   // Fall through.
265     case MachineRepresentation::kSimd128:  // Fall through.
266     case MachineRepresentation::kNone:
267       UNREACHABLE();
268       return;
269   }
270 
271   if (g.CanBeImmediate(index, opcode)) {
272     Emit(opcode | AddressingModeField::encode(kMode_MRI),
273          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
274   } else {
275     InstructionOperand addr_reg = g.TempRegister();
276     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
277          g.UseRegister(index), g.UseRegister(base));
278     // Emit desired load opcode, using temp addr_reg.
279     Emit(opcode | AddressingModeField::encode(kMode_MRI),
280          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
281   }
282 }
283 
VisitProtectedLoad(Node * node)284 void InstructionSelector::VisitProtectedLoad(Node* node) {
285   // TODO(eholk)
286   UNIMPLEMENTED();
287 }
288 
VisitStore(Node * node)289 void InstructionSelector::VisitStore(Node* node) {
290   MipsOperandGenerator g(this);
291   Node* base = node->InputAt(0);
292   Node* index = node->InputAt(1);
293   Node* value = node->InputAt(2);
294 
295   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
296   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
297   MachineRepresentation rep = store_rep.representation();
298 
299   // TODO(mips): I guess this could be done in a better way.
300   if (write_barrier_kind != kNoWriteBarrier) {
301     DCHECK(CanBeTaggedPointer(rep));
302     InstructionOperand inputs[3];
303     size_t input_count = 0;
304     inputs[input_count++] = g.UseUniqueRegister(base);
305     inputs[input_count++] = g.UseUniqueRegister(index);
306     inputs[input_count++] = g.UseUniqueRegister(value);
307     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
308     switch (write_barrier_kind) {
309       case kNoWriteBarrier:
310         UNREACHABLE();
311         break;
312       case kMapWriteBarrier:
313         record_write_mode = RecordWriteMode::kValueIsMap;
314         break;
315       case kPointerWriteBarrier:
316         record_write_mode = RecordWriteMode::kValueIsPointer;
317         break;
318       case kFullWriteBarrier:
319         record_write_mode = RecordWriteMode::kValueIsAny;
320         break;
321     }
322     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
323     size_t const temp_count = arraysize(temps);
324     InstructionCode code = kArchStoreWithWriteBarrier;
325     code |= MiscField::encode(static_cast<int>(record_write_mode));
326     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
327   } else {
328     ArchOpcode opcode = kArchNop;
329     switch (rep) {
330       case MachineRepresentation::kFloat32:
331         opcode = kMipsSwc1;
332         break;
333       case MachineRepresentation::kFloat64:
334         opcode = kMipsSdc1;
335         break;
336       case MachineRepresentation::kBit:  // Fall through.
337       case MachineRepresentation::kWord8:
338         opcode = kMipsSb;
339         break;
340       case MachineRepresentation::kWord16:
341         opcode = kMipsSh;
342         break;
343       case MachineRepresentation::kTaggedSigned:   // Fall through.
344       case MachineRepresentation::kTaggedPointer:  // Fall through.
345       case MachineRepresentation::kTagged:  // Fall through.
346       case MachineRepresentation::kWord32:
347         opcode = kMipsSw;
348         break;
349       case MachineRepresentation::kWord64:   // Fall through.
350       case MachineRepresentation::kSimd128:  // Fall through.
351       case MachineRepresentation::kNone:
352         UNREACHABLE();
353         return;
354     }
355 
356     if (g.CanBeImmediate(index, opcode)) {
357       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
358            g.UseRegister(base), g.UseImmediate(index),
359            g.UseRegisterOrImmediateZero(value));
360     } else {
361       InstructionOperand addr_reg = g.TempRegister();
362       Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
363            g.UseRegister(index), g.UseRegister(base));
364       // Emit desired store opcode, using temp addr_reg.
365       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
366            addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
367     }
368   }
369 }
370 
371 
VisitWord32And(Node * node)372 void InstructionSelector::VisitWord32And(Node* node) {
373   MipsOperandGenerator g(this);
374   Int32BinopMatcher m(node);
375   if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
376       m.right().HasValue()) {
377     uint32_t mask = m.right().Value();
378     uint32_t mask_width = base::bits::CountPopulation32(mask);
379     uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
380     if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
381       // The mask must be contiguous, and occupy the least-significant bits.
382       DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
383 
384       // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
385       // significant bits.
386       Int32BinopMatcher mleft(m.left().node());
387       if (mleft.right().HasValue()) {
388         // Any shift value can match; int32 shifts use `value % 32`.
389         uint32_t lsb = mleft.right().Value() & 0x1f;
390 
391         // Ext cannot extract bits past the register size, however since
392         // shifting the original value would have introduced some zeros we can
393         // still use Ext with a smaller mask and the remaining bits will be
394         // zeros.
395         if (lsb + mask_width > 32) mask_width = 32 - lsb;
396 
397         Emit(kMipsExt, g.DefineAsRegister(node),
398              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
399              g.TempImmediate(mask_width));
400         return;
401       }
402       // Other cases fall through to the normal And operation.
403     }
404   }
405   if (m.right().HasValue()) {
406     uint32_t mask = m.right().Value();
407     uint32_t shift = base::bits::CountPopulation32(~mask);
408     uint32_t msb = base::bits::CountLeadingZeros32(~mask);
409     if (shift != 0 && shift != 32 && msb + shift == 32) {
410       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
411       // and remove constant loading of invereted mask.
412       Emit(kMipsIns, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
413            g.TempImmediate(0), g.TempImmediate(shift));
414       return;
415     }
416   }
417   VisitBinop(this, node, kMipsAnd, true, kMipsAnd);
418 }
419 
420 
VisitWord32Or(Node * node)421 void InstructionSelector::VisitWord32Or(Node* node) {
422   VisitBinop(this, node, kMipsOr, true, kMipsOr);
423 }
424 
425 
VisitWord32Xor(Node * node)426 void InstructionSelector::VisitWord32Xor(Node* node) {
427   Int32BinopMatcher m(node);
428   if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
429       m.right().Is(-1)) {
430     Int32BinopMatcher mleft(m.left().node());
431     if (!mleft.right().HasValue()) {
432       MipsOperandGenerator g(this);
433       Emit(kMipsNor, g.DefineAsRegister(node),
434            g.UseRegister(mleft.left().node()),
435            g.UseRegister(mleft.right().node()));
436       return;
437     }
438   }
439   if (m.right().Is(-1)) {
440     // Use Nor for bit negation and eliminate constant loading for xori.
441     MipsOperandGenerator g(this);
442     Emit(kMipsNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
443          g.TempImmediate(0));
444     return;
445   }
446   VisitBinop(this, node, kMipsXor, true, kMipsXor);
447 }
448 
449 
VisitWord32Shl(Node * node)450 void InstructionSelector::VisitWord32Shl(Node* node) {
451   Int32BinopMatcher m(node);
452   if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
453       m.right().IsInRange(1, 31)) {
454     MipsOperandGenerator g(this);
455     Int32BinopMatcher mleft(m.left().node());
456     // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
457     // contiguous, and the shift immediate non-zero.
458     if (mleft.right().HasValue()) {
459       uint32_t mask = mleft.right().Value();
460       uint32_t mask_width = base::bits::CountPopulation32(mask);
461       uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
462       if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
463         uint32_t shift = m.right().Value();
464         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
465         DCHECK_NE(0u, shift);
466         if ((shift + mask_width) >= 32) {
467           // If the mask is contiguous and reaches or extends beyond the top
468           // bit, only the shift is needed.
469           Emit(kMipsShl, g.DefineAsRegister(node),
470                g.UseRegister(mleft.left().node()),
471                g.UseImmediate(m.right().node()));
472           return;
473         }
474       }
475     }
476   }
477   VisitRRO(this, kMipsShl, node);
478 }
479 
480 
VisitWord32Shr(Node * node)481 void InstructionSelector::VisitWord32Shr(Node* node) {
482   Int32BinopMatcher m(node);
483   if (m.left().IsWord32And() && m.right().HasValue()) {
484     uint32_t lsb = m.right().Value() & 0x1f;
485     Int32BinopMatcher mleft(m.left().node());
486     if (mleft.right().HasValue()) {
487       // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
488       // shifted into the least-significant bits.
489       uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
490       unsigned mask_width = base::bits::CountPopulation32(mask);
491       unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
492       if ((mask_msb + mask_width + lsb) == 32) {
493         MipsOperandGenerator g(this);
494         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
495         Emit(kMipsExt, g.DefineAsRegister(node),
496              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
497              g.TempImmediate(mask_width));
498         return;
499       }
500     }
501   }
502   VisitRRO(this, kMipsShr, node);
503 }
504 
505 
VisitWord32Sar(Node * node)506 void InstructionSelector::VisitWord32Sar(Node* node) {
507   Int32BinopMatcher m(node);
508   if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
509     Int32BinopMatcher mleft(m.left().node());
510     if (m.right().HasValue() && mleft.right().HasValue()) {
511       MipsOperandGenerator g(this);
512       uint32_t sar = m.right().Value();
513       uint32_t shl = mleft.right().Value();
514       if ((sar == shl) && (sar == 16)) {
515         Emit(kMipsSeh, g.DefineAsRegister(node),
516              g.UseRegister(mleft.left().node()));
517         return;
518       } else if ((sar == shl) && (sar == 24)) {
519         Emit(kMipsSeb, g.DefineAsRegister(node),
520              g.UseRegister(mleft.left().node()));
521         return;
522       }
523     }
524   }
525   VisitRRO(this, kMipsSar, node);
526 }
527 
VisitInt32PairBinop(InstructionSelector * selector,InstructionCode pair_opcode,InstructionCode single_opcode,Node * node)528 static void VisitInt32PairBinop(InstructionSelector* selector,
529                                 InstructionCode pair_opcode,
530                                 InstructionCode single_opcode, Node* node) {
531   MipsOperandGenerator g(selector);
532 
533   Node* projection1 = NodeProperties::FindProjection(node, 1);
534 
535   if (projection1) {
536     // We use UseUniqueRegister here to avoid register sharing with the output
537     // register.
538     InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
539                                    g.UseUniqueRegister(node->InputAt(1)),
540                                    g.UseUniqueRegister(node->InputAt(2)),
541                                    g.UseUniqueRegister(node->InputAt(3))};
542 
543     InstructionOperand outputs[] = {
544         g.DefineAsRegister(node),
545         g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
546     selector->Emit(pair_opcode, 2, outputs, 4, inputs);
547   } else {
548     // The high word of the result is not used, so we emit the standard 32 bit
549     // instruction.
550     selector->Emit(single_opcode, g.DefineSameAsFirst(node),
551                    g.UseRegister(node->InputAt(0)),
552                    g.UseRegister(node->InputAt(2)));
553   }
554 }
555 
VisitInt32PairAdd(Node * node)556 void InstructionSelector::VisitInt32PairAdd(Node* node) {
557   VisitInt32PairBinop(this, kMipsAddPair, kMipsAdd, node);
558 }
559 
VisitInt32PairSub(Node * node)560 void InstructionSelector::VisitInt32PairSub(Node* node) {
561   VisitInt32PairBinop(this, kMipsSubPair, kMipsSub, node);
562 }
563 
VisitInt32PairMul(Node * node)564 void InstructionSelector::VisitInt32PairMul(Node* node) {
565   VisitInt32PairBinop(this, kMipsMulPair, kMipsMul, node);
566 }
567 
568 // Shared routine for multiple shift operations.
VisitWord32PairShift(InstructionSelector * selector,InstructionCode opcode,Node * node)569 static void VisitWord32PairShift(InstructionSelector* selector,
570                                  InstructionCode opcode, Node* node) {
571   MipsOperandGenerator g(selector);
572   Int32Matcher m(node->InputAt(2));
573   InstructionOperand shift_operand;
574   if (m.HasValue()) {
575     shift_operand = g.UseImmediate(m.node());
576   } else {
577     shift_operand = g.UseUniqueRegister(m.node());
578   }
579 
580   // We use UseUniqueRegister here to avoid register sharing with the output
581   // register.
582   InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
583                                  g.UseUniqueRegister(node->InputAt(1)),
584                                  shift_operand};
585 
586   Node* projection1 = NodeProperties::FindProjection(node, 1);
587 
588   InstructionOperand outputs[2];
589   InstructionOperand temps[1];
590   int32_t output_count = 0;
591   int32_t temp_count = 0;
592 
593   outputs[output_count++] = g.DefineAsRegister(node);
594   if (projection1) {
595     outputs[output_count++] = g.DefineAsRegister(projection1);
596   } else {
597     temps[temp_count++] = g.TempRegister();
598   }
599 
600   selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
601 }
602 
VisitWord32PairShl(Node * node)603 void InstructionSelector::VisitWord32PairShl(Node* node) {
604   VisitWord32PairShift(this, kMipsShlPair, node);
605 }
606 
VisitWord32PairShr(Node * node)607 void InstructionSelector::VisitWord32PairShr(Node* node) {
608   VisitWord32PairShift(this, kMipsShrPair, node);
609 }
610 
VisitWord32PairSar(Node * node)611 void InstructionSelector::VisitWord32PairSar(Node* node) {
612   VisitWord32PairShift(this, kMipsSarPair, node);
613 }
614 
VisitWord32Ror(Node * node)615 void InstructionSelector::VisitWord32Ror(Node* node) {
616   VisitRRO(this, kMipsRor, node);
617 }
618 
619 
VisitWord32Clz(Node * node)620 void InstructionSelector::VisitWord32Clz(Node* node) {
621   VisitRR(this, kMipsClz, node);
622 }
623 
624 
VisitWord32ReverseBits(Node * node)625 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
626 
VisitWord64ReverseBytes(Node * node)627 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
628 
VisitWord32ReverseBytes(Node * node)629 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
630   MipsOperandGenerator g(this);
631   Emit(kMipsByteSwap32, g.DefineAsRegister(node),
632        g.UseRegister(node->InputAt(0)));
633 }
634 
VisitWord32Ctz(Node * node)635 void InstructionSelector::VisitWord32Ctz(Node* node) {
636   MipsOperandGenerator g(this);
637   Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
638 }
639 
640 
VisitWord32Popcnt(Node * node)641 void InstructionSelector::VisitWord32Popcnt(Node* node) {
642   MipsOperandGenerator g(this);
643   Emit(kMipsPopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
644 }
645 
646 
VisitInt32Add(Node * node)647 void InstructionSelector::VisitInt32Add(Node* node) {
648   MipsOperandGenerator g(this);
649   Int32BinopMatcher m(node);
650 
651   // Select Lsa for (left + (left_of_right << imm)).
652   if (m.right().opcode() == IrOpcode::kWord32Shl &&
653       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
654     Int32BinopMatcher mright(m.right().node());
655     if (mright.right().HasValue()) {
656       int32_t shift_value = static_cast<int32_t>(mright.right().Value());
657       Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
658            g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
659       return;
660     }
661   }
662 
663   // Select Lsa for ((left_of_left << imm) + right).
664   if (m.left().opcode() == IrOpcode::kWord32Shl &&
665       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
666     Int32BinopMatcher mleft(m.left().node());
667     if (mleft.right().HasValue()) {
668       int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
669       Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.right().node()),
670            g.UseRegister(mleft.left().node()), g.TempImmediate(shift_value));
671       return;
672     }
673   }
674 
675   VisitBinop(this, node, kMipsAdd, true, kMipsAdd);
676 }
677 
678 
VisitInt32Sub(Node * node)679 void InstructionSelector::VisitInt32Sub(Node* node) {
680   VisitBinop(this, node, kMipsSub);
681 }
682 
683 
VisitInt32Mul(Node * node)684 void InstructionSelector::VisitInt32Mul(Node* node) {
685   MipsOperandGenerator g(this);
686   Int32BinopMatcher m(node);
687   if (m.right().HasValue() && m.right().Value() > 0) {
688     int32_t value = m.right().Value();
689     if (base::bits::IsPowerOfTwo32(value)) {
690       Emit(kMipsShl | AddressingModeField::encode(kMode_None),
691            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
692            g.TempImmediate(WhichPowerOf2(value)));
693       return;
694     }
695     if (base::bits::IsPowerOfTwo32(value - 1)) {
696       Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
697            g.UseRegister(m.left().node()),
698            g.TempImmediate(WhichPowerOf2(value - 1)));
699       return;
700     }
701     if (base::bits::IsPowerOfTwo32(value + 1)) {
702       InstructionOperand temp = g.TempRegister();
703       Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
704            g.UseRegister(m.left().node()),
705            g.TempImmediate(WhichPowerOf2(value + 1)));
706       Emit(kMipsSub | AddressingModeField::encode(kMode_None),
707            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
708       return;
709     }
710   }
711   VisitRRR(this, kMipsMul, node);
712 }
713 
714 
VisitInt32MulHigh(Node * node)715 void InstructionSelector::VisitInt32MulHigh(Node* node) {
716   VisitRRR(this, kMipsMulHigh, node);
717 }
718 
719 
VisitUint32MulHigh(Node * node)720 void InstructionSelector::VisitUint32MulHigh(Node* node) {
721   MipsOperandGenerator g(this);
722   Emit(kMipsMulHighU, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
723        g.UseRegister(node->InputAt(1)));
724 }
725 
726 
VisitInt32Div(Node * node)727 void InstructionSelector::VisitInt32Div(Node* node) {
728   MipsOperandGenerator g(this);
729   Int32BinopMatcher m(node);
730   Emit(kMipsDiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
731        g.UseRegister(m.right().node()));
732 }
733 
734 
VisitUint32Div(Node * node)735 void InstructionSelector::VisitUint32Div(Node* node) {
736   MipsOperandGenerator g(this);
737   Int32BinopMatcher m(node);
738   Emit(kMipsDivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
739        g.UseRegister(m.right().node()));
740 }
741 
742 
VisitInt32Mod(Node * node)743 void InstructionSelector::VisitInt32Mod(Node* node) {
744   MipsOperandGenerator g(this);
745   Int32BinopMatcher m(node);
746   Emit(kMipsMod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
747        g.UseRegister(m.right().node()));
748 }
749 
750 
VisitUint32Mod(Node * node)751 void InstructionSelector::VisitUint32Mod(Node* node) {
752   MipsOperandGenerator g(this);
753   Int32BinopMatcher m(node);
754   Emit(kMipsModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
755        g.UseRegister(m.right().node()));
756 }
757 
758 
VisitChangeFloat32ToFloat64(Node * node)759 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
760   VisitRR(this, kMipsCvtDS, node);
761 }
762 
763 
VisitRoundInt32ToFloat32(Node * node)764 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
765   VisitRR(this, kMipsCvtSW, node);
766 }
767 
768 
VisitRoundUint32ToFloat32(Node * node)769 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
770   VisitRR(this, kMipsCvtSUw, node);
771 }
772 
773 
VisitChangeInt32ToFloat64(Node * node)774 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
775   VisitRR(this, kMipsCvtDW, node);
776 }
777 
778 
VisitChangeUint32ToFloat64(Node * node)779 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
780   VisitRR(this, kMipsCvtDUw, node);
781 }
782 
783 
VisitTruncateFloat32ToInt32(Node * node)784 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
785   VisitRR(this, kMipsTruncWS, node);
786 }
787 
788 
VisitTruncateFloat32ToUint32(Node * node)789 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
790   VisitRR(this, kMipsTruncUwS, node);
791 }
792 
793 
VisitChangeFloat64ToInt32(Node * node)794 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
795   MipsOperandGenerator g(this);
796   Node* value = node->InputAt(0);
797   // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
798   // which does rounding and conversion to integer format.
799   if (CanCover(node, value)) {
800     switch (value->opcode()) {
801       case IrOpcode::kFloat64RoundDown:
802         Emit(kMipsFloorWD, g.DefineAsRegister(node),
803              g.UseRegister(value->InputAt(0)));
804         return;
805       case IrOpcode::kFloat64RoundUp:
806         Emit(kMipsCeilWD, g.DefineAsRegister(node),
807              g.UseRegister(value->InputAt(0)));
808         return;
809       case IrOpcode::kFloat64RoundTiesEven:
810         Emit(kMipsRoundWD, g.DefineAsRegister(node),
811              g.UseRegister(value->InputAt(0)));
812         return;
813       case IrOpcode::kFloat64RoundTruncate:
814         Emit(kMipsTruncWD, g.DefineAsRegister(node),
815              g.UseRegister(value->InputAt(0)));
816         return;
817       default:
818         break;
819     }
820     if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
821       Node* next = value->InputAt(0);
822       if (CanCover(value, next)) {
823         // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
824         switch (next->opcode()) {
825           case IrOpcode::kFloat32RoundDown:
826             Emit(kMipsFloorWS, g.DefineAsRegister(node),
827                  g.UseRegister(next->InputAt(0)));
828             return;
829           case IrOpcode::kFloat32RoundUp:
830             Emit(kMipsCeilWS, g.DefineAsRegister(node),
831                  g.UseRegister(next->InputAt(0)));
832             return;
833           case IrOpcode::kFloat32RoundTiesEven:
834             Emit(kMipsRoundWS, g.DefineAsRegister(node),
835                  g.UseRegister(next->InputAt(0)));
836             return;
837           case IrOpcode::kFloat32RoundTruncate:
838             Emit(kMipsTruncWS, g.DefineAsRegister(node),
839                  g.UseRegister(next->InputAt(0)));
840             return;
841           default:
842             Emit(kMipsTruncWS, g.DefineAsRegister(node),
843                  g.UseRegister(value->InputAt(0)));
844             return;
845         }
846       } else {
847         // Match float32 -> float64 -> int32 representation change path.
848         Emit(kMipsTruncWS, g.DefineAsRegister(node),
849              g.UseRegister(value->InputAt(0)));
850         return;
851       }
852     }
853   }
854   VisitRR(this, kMipsTruncWD, node);
855 }
856 
857 
VisitChangeFloat64ToUint32(Node * node)858 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
859   VisitRR(this, kMipsTruncUwD, node);
860 }
861 
VisitTruncateFloat64ToUint32(Node * node)862 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
863   VisitRR(this, kMipsTruncUwD, node);
864 }
865 
VisitTruncateFloat64ToFloat32(Node * node)866 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
867   MipsOperandGenerator g(this);
868   Node* value = node->InputAt(0);
869   // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
870   // instruction.
871   if (CanCover(node, value) &&
872       value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
873     Emit(kMipsCvtSW, g.DefineAsRegister(node),
874          g.UseRegister(value->InputAt(0)));
875     return;
876   }
877   VisitRR(this, kMipsCvtSD, node);
878 }
879 
VisitTruncateFloat64ToWord32(Node * node)880 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
881   VisitRR(this, kArchTruncateDoubleToI, node);
882 }
883 
VisitRoundFloat64ToInt32(Node * node)884 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
885   VisitRR(this, kMipsTruncWD, node);
886 }
887 
VisitBitcastFloat32ToInt32(Node * node)888 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
889   VisitRR(this, kMipsFloat64ExtractLowWord32, node);
890 }
891 
892 
VisitBitcastInt32ToFloat32(Node * node)893 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
894   MipsOperandGenerator g(this);
895   Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node),
896        ImmediateOperand(ImmediateOperand::INLINE, 0),
897        g.UseRegister(node->InputAt(0)));
898 }
899 
900 
VisitFloat32Add(Node * node)901 void InstructionSelector::VisitFloat32Add(Node* node) {
902   MipsOperandGenerator g(this);
903   Float32BinopMatcher m(node);
904   if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
905     // For Add.S(Mul.S(x, y), z):
906     Float32BinopMatcher mleft(m.left().node());
907     if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
908       Emit(kMipsMaddS, g.DefineAsRegister(node),
909            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
910            g.UseRegister(mleft.right().node()));
911       return;
912     } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.S(z, x, y).
913       Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
914            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
915            g.UseRegister(mleft.right().node()));
916       return;
917     }
918   }
919   if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
920     // For Add.S(x, Mul.S(y, z)):
921     Float32BinopMatcher mright(m.right().node());
922     if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(x, y, z).
923       Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
924            g.UseRegister(mright.left().node()),
925            g.UseRegister(mright.right().node()));
926       return;
927     } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.S(x, y, z).
928       Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
929            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
930            g.UseRegister(mright.right().node()));
931       return;
932     }
933   }
934   VisitRRR(this, kMipsAddS, node);
935 }
936 
937 
VisitFloat64Add(Node * node)938 void InstructionSelector::VisitFloat64Add(Node* node) {
939   MipsOperandGenerator g(this);
940   Float64BinopMatcher m(node);
941   if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
942     // For Add.D(Mul.D(x, y), z):
943     Float64BinopMatcher mleft(m.left().node());
944     if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.D(z, x, y).
945       Emit(kMipsMaddD, g.DefineAsRegister(node),
946            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
947            g.UseRegister(mleft.right().node()));
948       return;
949     } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.D(z, x, y).
950       Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
951            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
952            g.UseRegister(mleft.right().node()));
953       return;
954     }
955   }
956   if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
957     // For Add.D(x, Mul.D(y, z)):
958     Float64BinopMatcher mright(m.right().node());
959     if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.D(x, y, z).
960       Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
961            g.UseRegister(mright.left().node()),
962            g.UseRegister(mright.right().node()));
963       return;
964     } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.D(x, y, z).
965       Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
966            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
967            g.UseRegister(mright.right().node()));
968       return;
969     }
970   }
971   VisitRRR(this, kMipsAddD, node);
972 }
973 
974 
VisitFloat32Sub(Node * node)975 void InstructionSelector::VisitFloat32Sub(Node* node) {
976   MipsOperandGenerator g(this);
977   Float32BinopMatcher m(node);
978   if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
979     if (IsMipsArchVariant(kMips32r2)) {
980       // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
981       Float32BinopMatcher mleft(m.left().node());
982       Emit(kMipsMsubS, g.DefineAsRegister(node),
983            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
984            g.UseRegister(mleft.right().node()));
985       return;
986     }
987   } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
988     if (IsMipsArchVariant(kMips32r6)) {
989       // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
990       Float32BinopMatcher mright(m.right().node());
991       Emit(kMipsMsubfS, g.DefineSameAsFirst(node),
992            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
993            g.UseRegister(mright.right().node()));
994       return;
995     }
996   }
997   VisitRRR(this, kMipsSubS, node);
998 }
999 
VisitFloat64Sub(Node * node)1000 void InstructionSelector::VisitFloat64Sub(Node* node) {
1001   MipsOperandGenerator g(this);
1002   Float64BinopMatcher m(node);
1003   if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
1004     if (IsMipsArchVariant(kMips32r2)) {
1005       // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
1006       Float64BinopMatcher mleft(m.left().node());
1007       Emit(kMipsMsubD, g.DefineAsRegister(node),
1008            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1009            g.UseRegister(mleft.right().node()));
1010       return;
1011     }
1012   } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
1013     if (IsMipsArchVariant(kMips32r6)) {
1014       // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
1015       Float64BinopMatcher mright(m.right().node());
1016       Emit(kMipsMsubfD, g.DefineSameAsFirst(node),
1017            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
1018            g.UseRegister(mright.right().node()));
1019       return;
1020     }
1021   }
1022   VisitRRR(this, kMipsSubD, node);
1023 }
1024 
VisitFloat32Mul(Node * node)1025 void InstructionSelector::VisitFloat32Mul(Node* node) {
1026   VisitRRR(this, kMipsMulS, node);
1027 }
1028 
1029 
VisitFloat64Mul(Node * node)1030 void InstructionSelector::VisitFloat64Mul(Node* node) {
1031   VisitRRR(this, kMipsMulD, node);
1032 }
1033 
1034 
VisitFloat32Div(Node * node)1035 void InstructionSelector::VisitFloat32Div(Node* node) {
1036   VisitRRR(this, kMipsDivS, node);
1037 }
1038 
1039 
VisitFloat64Div(Node * node)1040 void InstructionSelector::VisitFloat64Div(Node* node) {
1041   VisitRRR(this, kMipsDivD, node);
1042 }
1043 
1044 
VisitFloat64Mod(Node * node)1045 void InstructionSelector::VisitFloat64Mod(Node* node) {
1046   MipsOperandGenerator g(this);
1047   Emit(kMipsModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
1048        g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
1049 }
1050 
VisitFloat32Max(Node * node)1051 void InstructionSelector::VisitFloat32Max(Node* node) {
1052   MipsOperandGenerator g(this);
1053   Emit(kMipsFloat32Max, g.DefineAsRegister(node),
1054        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1055 }
1056 
VisitFloat64Max(Node * node)1057 void InstructionSelector::VisitFloat64Max(Node* node) {
1058   MipsOperandGenerator g(this);
1059   Emit(kMipsFloat64Max, g.DefineAsRegister(node),
1060        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1061 }
1062 
VisitFloat32Min(Node * node)1063 void InstructionSelector::VisitFloat32Min(Node* node) {
1064   MipsOperandGenerator g(this);
1065   Emit(kMipsFloat32Min, g.DefineAsRegister(node),
1066        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1067 }
1068 
VisitFloat64Min(Node * node)1069 void InstructionSelector::VisitFloat64Min(Node* node) {
1070   MipsOperandGenerator g(this);
1071   Emit(kMipsFloat64Min, g.DefineAsRegister(node),
1072        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1073 }
1074 
1075 
VisitFloat32Abs(Node * node)1076 void InstructionSelector::VisitFloat32Abs(Node* node) {
1077   VisitRR(this, kMipsAbsS, node);
1078 }
1079 
1080 
VisitFloat64Abs(Node * node)1081 void InstructionSelector::VisitFloat64Abs(Node* node) {
1082   VisitRR(this, kMipsAbsD, node);
1083 }
1084 
VisitFloat32Sqrt(Node * node)1085 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1086   VisitRR(this, kMipsSqrtS, node);
1087 }
1088 
1089 
VisitFloat64Sqrt(Node * node)1090 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1091   VisitRR(this, kMipsSqrtD, node);
1092 }
1093 
1094 
VisitFloat32RoundDown(Node * node)1095 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1096   VisitRR(this, kMipsFloat32RoundDown, node);
1097 }
1098 
1099 
VisitFloat64RoundDown(Node * node)1100 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1101   VisitRR(this, kMipsFloat64RoundDown, node);
1102 }
1103 
1104 
VisitFloat32RoundUp(Node * node)1105 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1106   VisitRR(this, kMipsFloat32RoundUp, node);
1107 }
1108 
1109 
VisitFloat64RoundUp(Node * node)1110 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1111   VisitRR(this, kMipsFloat64RoundUp, node);
1112 }
1113 
1114 
VisitFloat32RoundTruncate(Node * node)1115 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1116   VisitRR(this, kMipsFloat32RoundTruncate, node);
1117 }
1118 
1119 
VisitFloat64RoundTruncate(Node * node)1120 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1121   VisitRR(this, kMipsFloat64RoundTruncate, node);
1122 }
1123 
1124 
VisitFloat64RoundTiesAway(Node * node)1125 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1126   UNREACHABLE();
1127 }
1128 
1129 
VisitFloat32RoundTiesEven(Node * node)1130 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1131   VisitRR(this, kMipsFloat32RoundTiesEven, node);
1132 }
1133 
1134 
VisitFloat64RoundTiesEven(Node * node)1135 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1136   VisitRR(this, kMipsFloat64RoundTiesEven, node);
1137 }
1138 
VisitFloat32Neg(Node * node)1139 void InstructionSelector::VisitFloat32Neg(Node* node) {
1140   VisitRR(this, kMipsNegS, node);
1141 }
1142 
VisitFloat64Neg(Node * node)1143 void InstructionSelector::VisitFloat64Neg(Node* node) {
1144   VisitRR(this, kMipsNegD, node);
1145 }
1146 
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1147 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1148                                                    InstructionCode opcode) {
1149   MipsOperandGenerator g(this);
1150   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
1151        g.UseFixed(node->InputAt(1), f4))
1152       ->MarkAsCall();
1153 }
1154 
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1155 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1156                                                   InstructionCode opcode) {
1157   MipsOperandGenerator g(this);
1158   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
1159       ->MarkAsCall();
1160 }
1161 
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * descriptor,Node * node)1162 void InstructionSelector::EmitPrepareArguments(
1163     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1164     Node* node) {
1165   MipsOperandGenerator g(this);
1166 
1167   // Prepare for C function call.
1168   if (descriptor->IsCFunctionCall()) {
1169     Emit(kArchPrepareCallCFunction |
1170              MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
1171          0, nullptr, 0, nullptr);
1172 
1173     // Poke any stack arguments.
1174     int slot = kCArgSlotCount;
1175     for (PushParameter input : (*arguments)) {
1176       if (input.node()) {
1177         Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
1178              g.TempImmediate(slot << kPointerSizeLog2));
1179         ++slot;
1180       }
1181     }
1182   } else {
1183     // Possibly align stack here for functions.
1184     int push_count = static_cast<int>(descriptor->StackParameterCount());
1185     if (push_count > 0) {
1186       Emit(kMipsStackClaim, g.NoOutput(),
1187            g.TempImmediate(push_count << kPointerSizeLog2));
1188     }
1189     for (size_t n = 0; n < arguments->size(); ++n) {
1190       PushParameter input = (*arguments)[n];
1191       if (input.node()) {
1192         Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
1193              g.TempImmediate(n << kPointerSizeLog2));
1194       }
1195     }
1196   }
1197 }
1198 
1199 
IsTailCallAddressImmediate()1200 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1201 
GetTempsCountForTailCallFromJSFunction()1202 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1203 
VisitUnalignedLoad(Node * node)1204 void InstructionSelector::VisitUnalignedLoad(Node* node) {
1205   UnalignedLoadRepresentation load_rep =
1206       UnalignedLoadRepresentationOf(node->op());
1207   MipsOperandGenerator g(this);
1208   Node* base = node->InputAt(0);
1209   Node* index = node->InputAt(1);
1210 
1211   ArchOpcode opcode = kArchNop;
1212   switch (load_rep.representation()) {
1213     case MachineRepresentation::kBit:  // Fall through.
1214     case MachineRepresentation::kWord8:
1215       UNREACHABLE();
1216       break;
1217     case MachineRepresentation::kWord16:
1218       opcode = load_rep.IsUnsigned() ? kMipsUlhu : kMipsUlh;
1219       break;
1220     case MachineRepresentation::kTaggedSigned:   // Fall through.
1221     case MachineRepresentation::kTaggedPointer:  // Fall through.
1222     case MachineRepresentation::kTagged:  // Fall through.
1223     case MachineRepresentation::kWord32:
1224       opcode = kMipsUlw;
1225       break;
1226     case MachineRepresentation::kFloat32:
1227       opcode = kMipsUlwc1;
1228       break;
1229     case MachineRepresentation::kFloat64:
1230       opcode = kMipsUldc1;
1231       break;
1232     case MachineRepresentation::kWord64:   // Fall through.
1233     case MachineRepresentation::kSimd128:  // Fall through.
1234     case MachineRepresentation::kNone:
1235       UNREACHABLE();
1236       return;
1237   }
1238 
1239   if (g.CanBeImmediate(index, opcode)) {
1240     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1241          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1242   } else {
1243     InstructionOperand addr_reg = g.TempRegister();
1244     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1245          g.UseRegister(index), g.UseRegister(base));
1246     // Emit desired load opcode, using temp addr_reg.
1247     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1248          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1249   }
1250 }
1251 
VisitUnalignedStore(Node * node)1252 void InstructionSelector::VisitUnalignedStore(Node* node) {
1253   MipsOperandGenerator g(this);
1254   Node* base = node->InputAt(0);
1255   Node* index = node->InputAt(1);
1256   Node* value = node->InputAt(2);
1257 
1258   UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
1259 
1260   // TODO(mips): I guess this could be done in a better way.
1261   ArchOpcode opcode = kArchNop;
1262   switch (rep) {
1263     case MachineRepresentation::kFloat32:
1264       opcode = kMipsUswc1;
1265       break;
1266     case MachineRepresentation::kFloat64:
1267       opcode = kMipsUsdc1;
1268       break;
1269     case MachineRepresentation::kBit:  // Fall through.
1270     case MachineRepresentation::kWord8:
1271       UNREACHABLE();
1272       break;
1273     case MachineRepresentation::kWord16:
1274       opcode = kMipsUsh;
1275       break;
1276     case MachineRepresentation::kTaggedSigned:   // Fall through.
1277     case MachineRepresentation::kTaggedPointer:  // Fall through.
1278     case MachineRepresentation::kTagged:  // Fall through.
1279     case MachineRepresentation::kWord32:
1280       opcode = kMipsUsw;
1281       break;
1282     case MachineRepresentation::kWord64:   // Fall through.
1283     case MachineRepresentation::kSimd128:  // Fall through.
1284     case MachineRepresentation::kNone:
1285       UNREACHABLE();
1286       return;
1287   }
1288 
1289   if (g.CanBeImmediate(index, opcode)) {
1290     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1291          g.UseRegister(base), g.UseImmediate(index),
1292          g.UseRegisterOrImmediateZero(value));
1293   } else {
1294     InstructionOperand addr_reg = g.TempRegister();
1295     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1296          g.UseRegister(index), g.UseRegister(base));
1297     // Emit desired store opcode, using temp addr_reg.
1298     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1299          addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1300   }
1301 }
1302 
VisitCheckedLoad(Node * node)1303 void InstructionSelector::VisitCheckedLoad(Node* node) {
1304   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
1305   MipsOperandGenerator g(this);
1306   Node* const buffer = node->InputAt(0);
1307   Node* const offset = node->InputAt(1);
1308   Node* const length = node->InputAt(2);
1309   ArchOpcode opcode = kArchNop;
1310   switch (load_rep.representation()) {
1311     case MachineRepresentation::kWord8:
1312       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
1313       break;
1314     case MachineRepresentation::kWord16:
1315       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
1316       break;
1317     case MachineRepresentation::kWord32:
1318       opcode = kCheckedLoadWord32;
1319       break;
1320     case MachineRepresentation::kFloat32:
1321       opcode = kCheckedLoadFloat32;
1322       break;
1323     case MachineRepresentation::kFloat64:
1324       opcode = kCheckedLoadFloat64;
1325       break;
1326     case MachineRepresentation::kBit:      // Fall through.
1327     case MachineRepresentation::kTaggedSigned:   // Fall through.
1328     case MachineRepresentation::kTaggedPointer:  // Fall through.
1329     case MachineRepresentation::kTagged:   // Fall through.
1330     case MachineRepresentation::kWord64:   // Fall through.
1331     case MachineRepresentation::kSimd128:  // Fall through.
1332     case MachineRepresentation::kNone:
1333       UNREACHABLE();
1334       return;
1335   }
1336   InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
1337                                           ? g.UseImmediate(offset)
1338                                           : g.UseRegister(offset);
1339 
1340   InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
1341                                           ? g.CanBeImmediate(length, opcode)
1342                                                 ? g.UseImmediate(length)
1343                                                 : g.UseRegister(length)
1344                                           : g.UseRegister(length);
1345 
1346   Emit(opcode | AddressingModeField::encode(kMode_MRI),
1347        g.DefineAsRegister(node), offset_operand, length_operand,
1348        g.UseRegister(buffer));
1349 }
1350 
1351 
VisitCheckedStore(Node * node)1352 void InstructionSelector::VisitCheckedStore(Node* node) {
1353   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
1354   MipsOperandGenerator g(this);
1355   Node* const buffer = node->InputAt(0);
1356   Node* const offset = node->InputAt(1);
1357   Node* const length = node->InputAt(2);
1358   Node* const value = node->InputAt(3);
1359   ArchOpcode opcode = kArchNop;
1360   switch (rep) {
1361     case MachineRepresentation::kWord8:
1362       opcode = kCheckedStoreWord8;
1363       break;
1364     case MachineRepresentation::kWord16:
1365       opcode = kCheckedStoreWord16;
1366       break;
1367     case MachineRepresentation::kWord32:
1368       opcode = kCheckedStoreWord32;
1369       break;
1370     case MachineRepresentation::kFloat32:
1371       opcode = kCheckedStoreFloat32;
1372       break;
1373     case MachineRepresentation::kFloat64:
1374       opcode = kCheckedStoreFloat64;
1375       break;
1376     default:
1377       UNREACHABLE();
1378       return;
1379   }
1380   InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
1381                                           ? g.UseImmediate(offset)
1382                                           : g.UseRegister(offset);
1383 
1384   InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
1385                                           ? g.CanBeImmediate(length, opcode)
1386                                                 ? g.UseImmediate(length)
1387                                                 : g.UseRegister(length)
1388                                           : g.UseRegister(length);
1389 
1390   Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1391        offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
1392        g.UseRegister(buffer));
1393 }
1394 
1395 
1396 namespace {
1397 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1398 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1399                          InstructionOperand left, InstructionOperand right,
1400                          FlagsContinuation* cont) {
1401   MipsOperandGenerator g(selector);
1402   opcode = cont->Encode(opcode);
1403   if (cont->IsBranch()) {
1404     selector->Emit(opcode, g.NoOutput(), left, right,
1405                    g.Label(cont->true_block()), g.Label(cont->false_block()));
1406   } else if (cont->IsDeoptimize()) {
1407     selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
1408                              cont->frame_state());
1409   } else {
1410     DCHECK(cont->IsSet());
1411     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1412   }
1413 }
1414 
1415 
1416 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1417 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1418                          FlagsContinuation* cont) {
1419   MipsOperandGenerator g(selector);
1420   Float32BinopMatcher m(node);
1421   InstructionOperand lhs, rhs;
1422 
1423   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1424                           : g.UseRegister(m.left().node());
1425   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1426                            : g.UseRegister(m.right().node());
1427   VisitCompare(selector, kMipsCmpS, lhs, rhs, cont);
1428 }
1429 
1430 
1431 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1432 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1433                          FlagsContinuation* cont) {
1434   MipsOperandGenerator g(selector);
1435   Float64BinopMatcher m(node);
1436   InstructionOperand lhs, rhs;
1437 
1438   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1439                           : g.UseRegister(m.left().node());
1440   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1441                            : g.UseRegister(m.right().node());
1442   VisitCompare(selector, kMipsCmpD, lhs, rhs, cont);
1443 }
1444 
1445 
1446 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative)1447 void VisitWordCompare(InstructionSelector* selector, Node* node,
1448                       InstructionCode opcode, FlagsContinuation* cont,
1449                       bool commutative) {
1450   MipsOperandGenerator g(selector);
1451   Node* left = node->InputAt(0);
1452   Node* right = node->InputAt(1);
1453 
1454   // Match immediates on left or right side of comparison.
1455   if (g.CanBeImmediate(right, opcode)) {
1456     if (opcode == kMipsTst) {
1457       VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1458                    cont);
1459     } else {
1460       switch (cont->condition()) {
1461         case kEqual:
1462         case kNotEqual:
1463           if (cont->IsSet()) {
1464             VisitCompare(selector, opcode, g.UseRegister(left),
1465                          g.UseImmediate(right), cont);
1466           } else {
1467             VisitCompare(selector, opcode, g.UseRegister(left),
1468                          g.UseRegister(right), cont);
1469           }
1470           break;
1471         case kSignedLessThan:
1472         case kSignedGreaterThanOrEqual:
1473         case kUnsignedLessThan:
1474         case kUnsignedGreaterThanOrEqual:
1475           VisitCompare(selector, opcode, g.UseRegister(left),
1476                        g.UseImmediate(right), cont);
1477           break;
1478         default:
1479           VisitCompare(selector, opcode, g.UseRegister(left),
1480                        g.UseRegister(right), cont);
1481       }
1482     }
1483   } else if (g.CanBeImmediate(left, opcode)) {
1484     if (!commutative) cont->Commute();
1485     if (opcode == kMipsTst) {
1486       VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1487                    cont);
1488     } else {
1489       switch (cont->condition()) {
1490         case kEqual:
1491         case kNotEqual:
1492           if (cont->IsSet()) {
1493             VisitCompare(selector, opcode, g.UseRegister(right),
1494                          g.UseImmediate(left), cont);
1495           } else {
1496             VisitCompare(selector, opcode, g.UseRegister(right),
1497                          g.UseRegister(left), cont);
1498           }
1499           break;
1500         case kSignedLessThan:
1501         case kSignedGreaterThanOrEqual:
1502         case kUnsignedLessThan:
1503         case kUnsignedGreaterThanOrEqual:
1504           VisitCompare(selector, opcode, g.UseRegister(right),
1505                        g.UseImmediate(left), cont);
1506           break;
1507         default:
1508           VisitCompare(selector, opcode, g.UseRegister(right),
1509                        g.UseRegister(left), cont);
1510       }
1511     }
1512   } else {
1513     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1514                  cont);
1515   }
1516 }
1517 
1518 
VisitWordCompare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1519 void VisitWordCompare(InstructionSelector* selector, Node* node,
1520                       FlagsContinuation* cont) {
1521   VisitWordCompare(selector, node, kMipsCmp, cont, false);
1522 }
1523 
1524 // Shared routine for word comparisons against zero.
VisitWordCompareZero(InstructionSelector * selector,Node * user,Node * value,FlagsContinuation * cont)1525 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1526                           Node* value, FlagsContinuation* cont) {
1527   // Try to combine with comparisons against 0 by simply inverting the branch.
1528   while (value->opcode() == IrOpcode::kWord32Equal &&
1529          selector->CanCover(user, value)) {
1530     Int32BinopMatcher m(value);
1531     if (!m.right().Is(0)) break;
1532 
1533     user = value;
1534     value = m.left().node();
1535     cont->Negate();
1536   }
1537 
1538   if (selector->CanCover(user, value)) {
1539     switch (value->opcode()) {
1540       case IrOpcode::kWord32Equal:
1541         cont->OverwriteAndNegateIfEqual(kEqual);
1542         return VisitWordCompare(selector, value, cont);
1543       case IrOpcode::kInt32LessThan:
1544         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1545         return VisitWordCompare(selector, value, cont);
1546       case IrOpcode::kInt32LessThanOrEqual:
1547         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1548         return VisitWordCompare(selector, value, cont);
1549       case IrOpcode::kUint32LessThan:
1550         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1551         return VisitWordCompare(selector, value, cont);
1552       case IrOpcode::kUint32LessThanOrEqual:
1553         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1554         return VisitWordCompare(selector, value, cont);
1555       case IrOpcode::kFloat32Equal:
1556         cont->OverwriteAndNegateIfEqual(kEqual);
1557         return VisitFloat32Compare(selector, value, cont);
1558       case IrOpcode::kFloat32LessThan:
1559         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1560         return VisitFloat32Compare(selector, value, cont);
1561       case IrOpcode::kFloat32LessThanOrEqual:
1562         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1563         return VisitFloat32Compare(selector, value, cont);
1564       case IrOpcode::kFloat64Equal:
1565         cont->OverwriteAndNegateIfEqual(kEqual);
1566         return VisitFloat64Compare(selector, value, cont);
1567       case IrOpcode::kFloat64LessThan:
1568         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1569         return VisitFloat64Compare(selector, value, cont);
1570       case IrOpcode::kFloat64LessThanOrEqual:
1571         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1572         return VisitFloat64Compare(selector, value, cont);
1573       case IrOpcode::kProjection:
1574         // Check if this is the overflow output projection of an
1575         // <Operation>WithOverflow node.
1576         if (ProjectionIndexOf(value->op()) == 1u) {
1577           // We cannot combine the <Operation>WithOverflow with this branch
1578           // unless the 0th projection (the use of the actual value of the
1579           // <Operation> is either nullptr, which means there's no use of the
1580           // actual value, or was already defined, which means it is scheduled
1581           // *AFTER* this branch).
1582           Node* const node = value->InputAt(0);
1583           Node* const result = NodeProperties::FindProjection(node, 0);
1584           if (!result || selector->IsDefined(result)) {
1585             switch (node->opcode()) {
1586               case IrOpcode::kInt32AddWithOverflow:
1587                 cont->OverwriteAndNegateIfEqual(kOverflow);
1588                 return VisitBinop(selector, node, kMipsAddOvf, cont);
1589               case IrOpcode::kInt32SubWithOverflow:
1590                 cont->OverwriteAndNegateIfEqual(kOverflow);
1591                 return VisitBinop(selector, node, kMipsSubOvf, cont);
1592               case IrOpcode::kInt32MulWithOverflow:
1593                 cont->OverwriteAndNegateIfEqual(kOverflow);
1594                 return VisitBinop(selector, node, kMipsMulOvf, cont);
1595               default:
1596                 break;
1597             }
1598           }
1599         }
1600         break;
1601       case IrOpcode::kWord32And:
1602         return VisitWordCompare(selector, value, kMipsTst, cont, true);
1603       default:
1604         break;
1605     }
1606   }
1607 
1608   // Continuation could not be combined with a compare, emit compare against 0.
1609   MipsOperandGenerator g(selector);
1610   InstructionCode const opcode = cont->Encode(kMipsCmp);
1611   InstructionOperand const value_operand = g.UseRegister(value);
1612   if (cont->IsBranch()) {
1613     selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
1614                    g.Label(cont->true_block()), g.Label(cont->false_block()));
1615   } else if (cont->IsDeoptimize()) {
1616     selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
1617                              g.TempImmediate(0), cont->reason(),
1618                              cont->frame_state());
1619   } else {
1620     DCHECK(cont->IsSet());
1621     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
1622                    g.TempImmediate(0));
1623   }
1624 }
1625 
1626 }  // namespace
1627 
VisitBranch(Node * branch,BasicBlock * tbranch,BasicBlock * fbranch)1628 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1629                                       BasicBlock* fbranch) {
1630   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1631   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
1632 }
1633 
VisitDeoptimizeIf(Node * node)1634 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
1635   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
1636       kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
1637   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
1638 }
1639 
VisitDeoptimizeUnless(Node * node)1640 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
1641   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
1642       kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
1643   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
1644 }
1645 
VisitSwitch(Node * node,const SwitchInfo & sw)1646 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1647   MipsOperandGenerator g(this);
1648   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1649 
1650   // Emit either ArchTableSwitch or ArchLookupSwitch.
1651   size_t table_space_cost = 9 + sw.value_range;
1652   size_t table_time_cost = 3;
1653   size_t lookup_space_cost = 2 + 2 * sw.case_count;
1654   size_t lookup_time_cost = sw.case_count;
1655   if (sw.case_count > 0 &&
1656       table_space_cost + 3 * table_time_cost <=
1657           lookup_space_cost + 3 * lookup_time_cost &&
1658       sw.min_value > std::numeric_limits<int32_t>::min()) {
1659     InstructionOperand index_operand = value_operand;
1660     if (sw.min_value) {
1661       index_operand = g.TempRegister();
1662       Emit(kMipsSub, index_operand, value_operand,
1663            g.TempImmediate(sw.min_value));
1664     }
1665     // Generate a table lookup.
1666     return EmitTableSwitch(sw, index_operand);
1667   }
1668 
1669   // Generate a sequence of conditional jumps.
1670   return EmitLookupSwitch(sw, value_operand);
1671 }
1672 
1673 
VisitWord32Equal(Node * const node)1674 void InstructionSelector::VisitWord32Equal(Node* const node) {
1675   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1676   Int32BinopMatcher m(node);
1677   if (m.right().Is(0)) {
1678     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1679   }
1680   VisitWordCompare(this, node, &cont);
1681 }
1682 
1683 
VisitInt32LessThan(Node * node)1684 void InstructionSelector::VisitInt32LessThan(Node* node) {
1685   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1686   VisitWordCompare(this, node, &cont);
1687 }
1688 
1689 
VisitInt32LessThanOrEqual(Node * node)1690 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1691   FlagsContinuation cont =
1692       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1693   VisitWordCompare(this, node, &cont);
1694 }
1695 
1696 
VisitUint32LessThan(Node * node)1697 void InstructionSelector::VisitUint32LessThan(Node* node) {
1698   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1699   VisitWordCompare(this, node, &cont);
1700 }
1701 
1702 
VisitUint32LessThanOrEqual(Node * node)1703 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1704   FlagsContinuation cont =
1705       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1706   VisitWordCompare(this, node, &cont);
1707 }
1708 
1709 
VisitInt32AddWithOverflow(Node * node)1710 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1711   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1712     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1713     return VisitBinop(this, node, kMipsAddOvf, &cont);
1714   }
1715   FlagsContinuation cont;
1716   VisitBinop(this, node, kMipsAddOvf, &cont);
1717 }
1718 
1719 
VisitInt32SubWithOverflow(Node * node)1720 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1721   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1722     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1723     return VisitBinop(this, node, kMipsSubOvf, &cont);
1724   }
1725   FlagsContinuation cont;
1726   VisitBinop(this, node, kMipsSubOvf, &cont);
1727 }
1728 
VisitInt32MulWithOverflow(Node * node)1729 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
1730   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1731     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1732     return VisitBinop(this, node, kMipsMulOvf, &cont);
1733   }
1734   FlagsContinuation cont;
1735   VisitBinop(this, node, kMipsMulOvf, &cont);
1736 }
1737 
VisitFloat32Equal(Node * node)1738 void InstructionSelector::VisitFloat32Equal(Node* node) {
1739   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1740   VisitFloat32Compare(this, node, &cont);
1741 }
1742 
1743 
VisitFloat32LessThan(Node * node)1744 void InstructionSelector::VisitFloat32LessThan(Node* node) {
1745   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1746   VisitFloat32Compare(this, node, &cont);
1747 }
1748 
1749 
VisitFloat32LessThanOrEqual(Node * node)1750 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1751   FlagsContinuation cont =
1752       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1753   VisitFloat32Compare(this, node, &cont);
1754 }
1755 
1756 
VisitFloat64Equal(Node * node)1757 void InstructionSelector::VisitFloat64Equal(Node* node) {
1758   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1759   VisitFloat64Compare(this, node, &cont);
1760 }
1761 
1762 
VisitFloat64LessThan(Node * node)1763 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1764   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1765   VisitFloat64Compare(this, node, &cont);
1766 }
1767 
1768 
VisitFloat64LessThanOrEqual(Node * node)1769 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1770   FlagsContinuation cont =
1771       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1772   VisitFloat64Compare(this, node, &cont);
1773 }
1774 
1775 
VisitFloat64ExtractLowWord32(Node * node)1776 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1777   MipsOperandGenerator g(this);
1778   Emit(kMipsFloat64ExtractLowWord32, g.DefineAsRegister(node),
1779        g.UseRegister(node->InputAt(0)));
1780 }
1781 
1782 
VisitFloat64ExtractHighWord32(Node * node)1783 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1784   MipsOperandGenerator g(this);
1785   Emit(kMipsFloat64ExtractHighWord32, g.DefineAsRegister(node),
1786        g.UseRegister(node->InputAt(0)));
1787 }
1788 
1789 
VisitFloat64InsertLowWord32(Node * node)1790 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1791   MipsOperandGenerator g(this);
1792   Node* left = node->InputAt(0);
1793   Node* right = node->InputAt(1);
1794   Emit(kMipsFloat64InsertLowWord32, g.DefineSameAsFirst(node),
1795        g.UseRegister(left), g.UseRegister(right));
1796 }
1797 
1798 
VisitFloat64InsertHighWord32(Node * node)1799 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1800   MipsOperandGenerator g(this);
1801   Node* left = node->InputAt(0);
1802   Node* right = node->InputAt(1);
1803   Emit(kMipsFloat64InsertHighWord32, g.DefineSameAsFirst(node),
1804        g.UseRegister(left), g.UseRegister(right));
1805 }
1806 
VisitFloat64SilenceNaN(Node * node)1807 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
1808   MipsOperandGenerator g(this);
1809   Node* left = node->InputAt(0);
1810   InstructionOperand temps[] = {g.TempRegister()};
1811   Emit(kMipsFloat64SilenceNaN, g.DefineSameAsFirst(node), g.UseRegister(left),
1812        arraysize(temps), temps);
1813 }
1814 
VisitAtomicLoad(Node * node)1815 void InstructionSelector::VisitAtomicLoad(Node* node) {
1816   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1817   MipsOperandGenerator g(this);
1818   Node* base = node->InputAt(0);
1819   Node* index = node->InputAt(1);
1820   ArchOpcode opcode = kArchNop;
1821   switch (load_rep.representation()) {
1822     case MachineRepresentation::kWord8:
1823       opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
1824       break;
1825     case MachineRepresentation::kWord16:
1826       opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
1827       break;
1828     case MachineRepresentation::kWord32:
1829       opcode = kAtomicLoadWord32;
1830       break;
1831     default:
1832       UNREACHABLE();
1833       return;
1834   }
1835 
1836   if (g.CanBeImmediate(index, opcode)) {
1837     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1838          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1839   } else {
1840     InstructionOperand addr_reg = g.TempRegister();
1841     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1842          g.UseRegister(index), g.UseRegister(base));
1843     // Emit desired load opcode, using temp addr_reg.
1844     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1845          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1846   }
1847 }
1848 
VisitAtomicStore(Node * node)1849 void InstructionSelector::VisitAtomicStore(Node* node) {
1850   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
1851   MipsOperandGenerator g(this);
1852   Node* base = node->InputAt(0);
1853   Node* index = node->InputAt(1);
1854   Node* value = node->InputAt(2);
1855   ArchOpcode opcode = kArchNop;
1856   switch (rep) {
1857     case MachineRepresentation::kWord8:
1858       opcode = kAtomicStoreWord8;
1859       break;
1860     case MachineRepresentation::kWord16:
1861       opcode = kAtomicStoreWord16;
1862       break;
1863     case MachineRepresentation::kWord32:
1864       opcode = kAtomicStoreWord32;
1865       break;
1866     default:
1867       UNREACHABLE();
1868       return;
1869   }
1870 
1871   if (g.CanBeImmediate(index, opcode)) {
1872     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1873          g.UseRegister(base), g.UseImmediate(index),
1874          g.UseRegisterOrImmediateZero(value));
1875   } else {
1876     InstructionOperand addr_reg = g.TempRegister();
1877     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1878          g.UseRegister(index), g.UseRegister(base));
1879     // Emit desired store opcode, using temp addr_reg.
1880     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1881          addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1882   }
1883 }
1884 
1885 // static
1886 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()1887 InstructionSelector::SupportedMachineOperatorFlags() {
1888   MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
1889   if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
1890       IsFp64Mode()) {
1891     flags |= MachineOperatorBuilder::kFloat64RoundDown |
1892              MachineOperatorBuilder::kFloat64RoundUp |
1893              MachineOperatorBuilder::kFloat64RoundTruncate |
1894              MachineOperatorBuilder::kFloat64RoundTiesEven;
1895   }
1896 
1897   return flags | MachineOperatorBuilder::kWord32Ctz |
1898          MachineOperatorBuilder::kWord32Popcnt |
1899          MachineOperatorBuilder::kInt32DivIsSafe |
1900          MachineOperatorBuilder::kUint32DivIsSafe |
1901          MachineOperatorBuilder::kWord32ShiftIsSafe |
1902          MachineOperatorBuilder::kFloat32RoundDown |
1903          MachineOperatorBuilder::kFloat32RoundUp |
1904          MachineOperatorBuilder::kFloat32RoundTruncate |
1905          MachineOperatorBuilder::kFloat32RoundTiesEven |
1906          MachineOperatorBuilder::kWord32ReverseBytes |
1907          MachineOperatorBuilder::kWord64ReverseBytes;
1908 }
1909 
1910 // static
1911 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()1912 InstructionSelector::AlignmentRequirements() {
1913   if (IsMipsArchVariant(kMips32r6)) {
1914     return MachineOperatorBuilder::AlignmentRequirements::
1915         FullUnalignedAccessSupport();
1916   } else {
1917     DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1918            IsMipsArchVariant(kMips32r2));
1919     return MachineOperatorBuilder::AlignmentRequirements::
1920         NoUnalignedAccessSupport();
1921   }
1922 }
1923 
1924 }  // namespace compiler
1925 }  // namespace internal
1926 }  // namespace v8
1927