1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/adapters.h"
6 #include "src/base/bits.h"
7 #include "src/compiler/instruction-selector-impl.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/compiler/node-properties.h"
10 
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14 
15 #define TRACE_UNIMPL() \
16   PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
17 
18 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
19 
20 
21 // Adds Mips-specific methods for generating InstructionOperands.
22 class MipsOperandGenerator final : public OperandGenerator {
23  public:
MipsOperandGenerator(InstructionSelector * selector)24   explicit MipsOperandGenerator(InstructionSelector* selector)
25       : OperandGenerator(selector) {}
26 
UseOperand(Node * node,InstructionCode opcode)27   InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
28     if (CanBeImmediate(node, opcode)) {
29       return UseImmediate(node);
30     }
31     return UseRegister(node);
32   }
33 
CanBeImmediate(Node * node,InstructionCode opcode)34   bool CanBeImmediate(Node* node, InstructionCode opcode) {
35     Int32Matcher m(node);
36     if (!m.HasValue()) return false;
37     int32_t value = m.Value();
38     switch (ArchOpcodeField::decode(opcode)) {
39       case kMipsShl:
40       case kMipsSar:
41       case kMipsShr:
42         return is_uint5(value);
43       case kMipsXor:
44         return is_uint16(value);
45       case kMipsLdc1:
46       case kMipsSdc1:
47       case kCheckedLoadFloat64:
48       case kCheckedStoreFloat64:
49         return std::numeric_limits<int16_t>::min() <= (value + kIntSize) &&
50                std::numeric_limits<int16_t>::max() >= (value + kIntSize);
51       default:
52         return is_int16(value);
53     }
54   }
55 
56  private:
ImmediateFitsAddrMode1Instruction(int32_t imm) const57   bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
58     TRACE_UNIMPL();
59     return false;
60   }
61 };
62 
63 
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)64 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
65                      Node* node) {
66   MipsOperandGenerator g(selector);
67   selector->Emit(opcode, g.DefineAsRegister(node),
68                  g.UseRegister(node->InputAt(0)),
69                  g.UseRegister(node->InputAt(1)));
70 }
71 
72 
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)73 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
74                     Node* node) {
75   MipsOperandGenerator g(selector);
76   selector->Emit(opcode, g.DefineAsRegister(node),
77                  g.UseRegister(node->InputAt(0)));
78 }
79 
80 
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node)81 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
82                      Node* node) {
83   MipsOperandGenerator g(selector);
84   selector->Emit(opcode, g.DefineAsRegister(node),
85                  g.UseRegister(node->InputAt(0)),
86                  g.UseOperand(node->InputAt(1), opcode));
87 }
88 
89 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)90 static void VisitBinop(InstructionSelector* selector, Node* node,
91                        InstructionCode opcode, FlagsContinuation* cont) {
92   MipsOperandGenerator g(selector);
93   Int32BinopMatcher m(node);
94   InstructionOperand inputs[4];
95   size_t input_count = 0;
96   InstructionOperand outputs[2];
97   size_t output_count = 0;
98 
99   inputs[input_count++] = g.UseRegister(m.left().node());
100   inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
101 
102   if (cont->IsBranch()) {
103     inputs[input_count++] = g.Label(cont->true_block());
104     inputs[input_count++] = g.Label(cont->false_block());
105   }
106 
107   outputs[output_count++] = g.DefineAsRegister(node);
108   if (cont->IsSet()) {
109     outputs[output_count++] = g.DefineAsRegister(cont->result());
110   }
111 
112   DCHECK_NE(0u, input_count);
113   DCHECK_NE(0u, output_count);
114   DCHECK_GE(arraysize(inputs), input_count);
115   DCHECK_GE(arraysize(outputs), output_count);
116 
117   selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
118                  inputs);
119 }
120 
121 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)122 static void VisitBinop(InstructionSelector* selector, Node* node,
123                        InstructionCode opcode) {
124   FlagsContinuation cont;
125   VisitBinop(selector, node, opcode, &cont);
126 }
127 
128 
VisitLoad(Node * node)129 void InstructionSelector::VisitLoad(Node* node) {
130   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
131   MipsOperandGenerator g(this);
132   Node* base = node->InputAt(0);
133   Node* index = node->InputAt(1);
134 
135   ArchOpcode opcode = kArchNop;
136   switch (load_rep.representation()) {
137     case MachineRepresentation::kFloat32:
138       opcode = kMipsLwc1;
139       break;
140     case MachineRepresentation::kFloat64:
141       opcode = kMipsLdc1;
142       break;
143     case MachineRepresentation::kBit:  // Fall through.
144     case MachineRepresentation::kWord8:
145       opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
146       break;
147     case MachineRepresentation::kWord16:
148       opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh;
149       break;
150     case MachineRepresentation::kTagged:  // Fall through.
151     case MachineRepresentation::kWord32:
152       opcode = kMipsLw;
153       break;
154     case MachineRepresentation::kWord64:  // Fall through.
155     case MachineRepresentation::kNone:
156       UNREACHABLE();
157       return;
158   }
159 
160   if (g.CanBeImmediate(index, opcode)) {
161     Emit(opcode | AddressingModeField::encode(kMode_MRI),
162          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
163   } else {
164     InstructionOperand addr_reg = g.TempRegister();
165     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
166          g.UseRegister(index), g.UseRegister(base));
167     // Emit desired load opcode, using temp addr_reg.
168     Emit(opcode | AddressingModeField::encode(kMode_MRI),
169          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
170   }
171 }
172 
173 
VisitStore(Node * node)174 void InstructionSelector::VisitStore(Node* node) {
175   MipsOperandGenerator g(this);
176   Node* base = node->InputAt(0);
177   Node* index = node->InputAt(1);
178   Node* value = node->InputAt(2);
179 
180   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
181   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
182   MachineRepresentation rep = store_rep.representation();
183 
184   // TODO(mips): I guess this could be done in a better way.
185   if (write_barrier_kind != kNoWriteBarrier) {
186     DCHECK_EQ(MachineRepresentation::kTagged, rep);
187     InstructionOperand inputs[3];
188     size_t input_count = 0;
189     inputs[input_count++] = g.UseUniqueRegister(base);
190     inputs[input_count++] = g.UseUniqueRegister(index);
191     inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
192                                 ? g.UseRegister(value)
193                                 : g.UseUniqueRegister(value);
194     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
195     switch (write_barrier_kind) {
196       case kNoWriteBarrier:
197         UNREACHABLE();
198         break;
199       case kMapWriteBarrier:
200         record_write_mode = RecordWriteMode::kValueIsMap;
201         break;
202       case kPointerWriteBarrier:
203         record_write_mode = RecordWriteMode::kValueIsPointer;
204         break;
205       case kFullWriteBarrier:
206         record_write_mode = RecordWriteMode::kValueIsAny;
207         break;
208     }
209     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
210     size_t const temp_count = arraysize(temps);
211     InstructionCode code = kArchStoreWithWriteBarrier;
212     code |= MiscField::encode(static_cast<int>(record_write_mode));
213     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
214   } else {
215     ArchOpcode opcode = kArchNop;
216     switch (rep) {
217       case MachineRepresentation::kFloat32:
218         opcode = kMipsSwc1;
219         break;
220       case MachineRepresentation::kFloat64:
221         opcode = kMipsSdc1;
222         break;
223       case MachineRepresentation::kBit:  // Fall through.
224       case MachineRepresentation::kWord8:
225         opcode = kMipsSb;
226         break;
227       case MachineRepresentation::kWord16:
228         opcode = kMipsSh;
229         break;
230       case MachineRepresentation::kTagged:  // Fall through.
231       case MachineRepresentation::kWord32:
232         opcode = kMipsSw;
233         break;
234       case MachineRepresentation::kWord64:  // Fall through.
235       case MachineRepresentation::kNone:
236         UNREACHABLE();
237         return;
238     }
239 
240     if (g.CanBeImmediate(index, opcode)) {
241       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
242            g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
243     } else {
244       InstructionOperand addr_reg = g.TempRegister();
245       Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
246            g.UseRegister(index), g.UseRegister(base));
247       // Emit desired store opcode, using temp addr_reg.
248       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
249            addr_reg, g.TempImmediate(0), g.UseRegister(value));
250     }
251   }
252 }
253 
254 
VisitWord32And(Node * node)255 void InstructionSelector::VisitWord32And(Node* node) {
256   MipsOperandGenerator g(this);
257   Int32BinopMatcher m(node);
258   if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
259       m.right().HasValue()) {
260     uint32_t mask = m.right().Value();
261     uint32_t mask_width = base::bits::CountPopulation32(mask);
262     uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
263     if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
264       // The mask must be contiguous, and occupy the least-significant bits.
265       DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
266 
267       // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
268       // significant bits.
269       Int32BinopMatcher mleft(m.left().node());
270       if (mleft.right().HasValue()) {
271         // Any shift value can match; int32 shifts use `value % 32`.
272         uint32_t lsb = mleft.right().Value() & 0x1f;
273 
274         // Ext cannot extract bits past the register size, however since
275         // shifting the original value would have introduced some zeros we can
276         // still use Ext with a smaller mask and the remaining bits will be
277         // zeros.
278         if (lsb + mask_width > 32) mask_width = 32 - lsb;
279 
280         Emit(kMipsExt, g.DefineAsRegister(node),
281              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
282              g.TempImmediate(mask_width));
283         return;
284       }
285       // Other cases fall through to the normal And operation.
286     }
287   }
288   if (m.right().HasValue()) {
289     uint32_t mask = m.right().Value();
290     uint32_t shift = base::bits::CountPopulation32(~mask);
291     uint32_t msb = base::bits::CountLeadingZeros32(~mask);
292     if (shift != 0 && shift != 32 && msb + shift == 32) {
293       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
294       // and remove constant loading of invereted mask.
295       Emit(kMipsIns, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
296            g.TempImmediate(0), g.TempImmediate(shift));
297       return;
298     }
299   }
300   VisitBinop(this, node, kMipsAnd);
301 }
302 
303 
VisitWord32Or(Node * node)304 void InstructionSelector::VisitWord32Or(Node* node) {
305   VisitBinop(this, node, kMipsOr);
306 }
307 
308 
VisitWord32Xor(Node * node)309 void InstructionSelector::VisitWord32Xor(Node* node) {
310   Int32BinopMatcher m(node);
311   if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
312       m.right().Is(-1)) {
313     Int32BinopMatcher mleft(m.left().node());
314     if (!mleft.right().HasValue()) {
315       MipsOperandGenerator g(this);
316       Emit(kMipsNor, g.DefineAsRegister(node),
317            g.UseRegister(mleft.left().node()),
318            g.UseRegister(mleft.right().node()));
319       return;
320     }
321   }
322   if (m.right().Is(-1)) {
323     // Use Nor for bit negation and eliminate constant loading for xori.
324     MipsOperandGenerator g(this);
325     Emit(kMipsNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
326          g.TempImmediate(0));
327     return;
328   }
329   VisitBinop(this, node, kMipsXor);
330 }
331 
332 
VisitWord32Shl(Node * node)333 void InstructionSelector::VisitWord32Shl(Node* node) {
334   Int32BinopMatcher m(node);
335   if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
336       m.right().IsInRange(1, 31)) {
337     MipsOperandGenerator g(this);
338     Int32BinopMatcher mleft(m.left().node());
339     // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
340     // contiguous, and the shift immediate non-zero.
341     if (mleft.right().HasValue()) {
342       uint32_t mask = mleft.right().Value();
343       uint32_t mask_width = base::bits::CountPopulation32(mask);
344       uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
345       if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
346         uint32_t shift = m.right().Value();
347         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
348         DCHECK_NE(0u, shift);
349         if ((shift + mask_width) >= 32) {
350           // If the mask is contiguous and reaches or extends beyond the top
351           // bit, only the shift is needed.
352           Emit(kMipsShl, g.DefineAsRegister(node),
353                g.UseRegister(mleft.left().node()),
354                g.UseImmediate(m.right().node()));
355           return;
356         }
357       }
358     }
359   }
360   VisitRRO(this, kMipsShl, node);
361 }
362 
363 
VisitWord32Shr(Node * node)364 void InstructionSelector::VisitWord32Shr(Node* node) {
365   Int32BinopMatcher m(node);
366   if (m.left().IsWord32And() && m.right().HasValue()) {
367     uint32_t lsb = m.right().Value() & 0x1f;
368     Int32BinopMatcher mleft(m.left().node());
369     if (mleft.right().HasValue()) {
370       // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
371       // shifted into the least-significant bits.
372       uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
373       unsigned mask_width = base::bits::CountPopulation32(mask);
374       unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
375       if ((mask_msb + mask_width + lsb) == 32) {
376         MipsOperandGenerator g(this);
377         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
378         Emit(kMipsExt, g.DefineAsRegister(node),
379              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
380              g.TempImmediate(mask_width));
381         return;
382       }
383     }
384   }
385   VisitRRO(this, kMipsShr, node);
386 }
387 
388 
VisitWord32Sar(Node * node)389 void InstructionSelector::VisitWord32Sar(Node* node) {
390   VisitRRO(this, kMipsSar, node);
391 }
392 
393 
VisitWord32Ror(Node * node)394 void InstructionSelector::VisitWord32Ror(Node* node) {
395   VisitRRO(this, kMipsRor, node);
396 }
397 
398 
VisitWord32Clz(Node * node)399 void InstructionSelector::VisitWord32Clz(Node* node) {
400   VisitRR(this, kMipsClz, node);
401 }
402 
403 
VisitWord32Ctz(Node * node)404 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
405 
406 
VisitWord32Popcnt(Node * node)407 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
408 
409 
VisitInt32Add(Node * node)410 void InstructionSelector::VisitInt32Add(Node* node) {
411   MipsOperandGenerator g(this);
412 
413   // TODO(plind): Consider multiply & add optimization from arm port.
414   VisitBinop(this, node, kMipsAdd);
415 }
416 
417 
VisitInt32Sub(Node * node)418 void InstructionSelector::VisitInt32Sub(Node* node) {
419   VisitBinop(this, node, kMipsSub);
420 }
421 
422 
VisitInt32Mul(Node * node)423 void InstructionSelector::VisitInt32Mul(Node* node) {
424   MipsOperandGenerator g(this);
425   Int32BinopMatcher m(node);
426   if (m.right().HasValue() && m.right().Value() > 0) {
427     int32_t value = m.right().Value();
428     if (base::bits::IsPowerOfTwo32(value)) {
429       Emit(kMipsShl | AddressingModeField::encode(kMode_None),
430            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
431            g.TempImmediate(WhichPowerOf2(value)));
432       return;
433     }
434     if (base::bits::IsPowerOfTwo32(value - 1)) {
435       InstructionOperand temp = g.TempRegister();
436       Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
437            g.UseRegister(m.left().node()),
438            g.TempImmediate(WhichPowerOf2(value - 1)));
439       Emit(kMipsAdd | AddressingModeField::encode(kMode_None),
440            g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
441       return;
442     }
443     if (base::bits::IsPowerOfTwo32(value + 1)) {
444       InstructionOperand temp = g.TempRegister();
445       Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
446            g.UseRegister(m.left().node()),
447            g.TempImmediate(WhichPowerOf2(value + 1)));
448       Emit(kMipsSub | AddressingModeField::encode(kMode_None),
449            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
450       return;
451     }
452   }
453   VisitRRR(this, kMipsMul, node);
454 }
455 
456 
VisitInt32MulHigh(Node * node)457 void InstructionSelector::VisitInt32MulHigh(Node* node) {
458   VisitRRR(this, kMipsMulHigh, node);
459 }
460 
461 
VisitUint32MulHigh(Node * node)462 void InstructionSelector::VisitUint32MulHigh(Node* node) {
463   MipsOperandGenerator g(this);
464   Emit(kMipsMulHighU, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
465        g.UseRegister(node->InputAt(1)));
466 }
467 
468 
VisitInt32Div(Node * node)469 void InstructionSelector::VisitInt32Div(Node* node) {
470   MipsOperandGenerator g(this);
471   Int32BinopMatcher m(node);
472   Emit(kMipsDiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
473        g.UseRegister(m.right().node()));
474 }
475 
476 
VisitUint32Div(Node * node)477 void InstructionSelector::VisitUint32Div(Node* node) {
478   MipsOperandGenerator g(this);
479   Int32BinopMatcher m(node);
480   Emit(kMipsDivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
481        g.UseRegister(m.right().node()));
482 }
483 
484 
VisitInt32Mod(Node * node)485 void InstructionSelector::VisitInt32Mod(Node* node) {
486   MipsOperandGenerator g(this);
487   Int32BinopMatcher m(node);
488   Emit(kMipsMod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
489        g.UseRegister(m.right().node()));
490 }
491 
492 
VisitUint32Mod(Node * node)493 void InstructionSelector::VisitUint32Mod(Node* node) {
494   MipsOperandGenerator g(this);
495   Int32BinopMatcher m(node);
496   Emit(kMipsModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
497        g.UseRegister(m.right().node()));
498 }
499 
500 
VisitChangeFloat32ToFloat64(Node * node)501 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
502   VisitRR(this, kMipsCvtDS, node);
503 }
504 
505 
VisitChangeInt32ToFloat64(Node * node)506 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
507   VisitRR(this, kMipsCvtDW, node);
508 }
509 
510 
VisitChangeUint32ToFloat64(Node * node)511 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
512   VisitRR(this, kMipsCvtDUw, node);
513 }
514 
515 
VisitChangeFloat64ToInt32(Node * node)516 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
517   MipsOperandGenerator g(this);
518   Node* value = node->InputAt(0);
519   // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
520   // which does rounding and conversion to integer format.
521   if (CanCover(node, value)) {
522     switch (value->opcode()) {
523       case IrOpcode::kFloat64RoundDown:
524         Emit(kMipsFloorWD, g.DefineAsRegister(node),
525              g.UseRegister(value->InputAt(0)));
526         return;
527       case IrOpcode::kFloat64RoundUp:
528         Emit(kMipsCeilWD, g.DefineAsRegister(node),
529              g.UseRegister(value->InputAt(0)));
530         return;
531       case IrOpcode::kFloat64RoundTiesEven:
532         Emit(kMipsRoundWD, g.DefineAsRegister(node),
533              g.UseRegister(value->InputAt(0)));
534         return;
535       case IrOpcode::kFloat64RoundTruncate:
536         Emit(kMipsTruncWD, g.DefineAsRegister(node),
537              g.UseRegister(value->InputAt(0)));
538         return;
539       default:
540         break;
541     }
542     if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
543       Node* next = value->InputAt(0);
544       if (CanCover(value, next)) {
545         // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
546         switch (next->opcode()) {
547           case IrOpcode::kFloat32RoundDown:
548             Emit(kMipsFloorWS, g.DefineAsRegister(node),
549                  g.UseRegister(next->InputAt(0)));
550             return;
551           case IrOpcode::kFloat32RoundUp:
552             Emit(kMipsCeilWS, g.DefineAsRegister(node),
553                  g.UseRegister(next->InputAt(0)));
554             return;
555           case IrOpcode::kFloat32RoundTiesEven:
556             Emit(kMipsRoundWS, g.DefineAsRegister(node),
557                  g.UseRegister(next->InputAt(0)));
558             return;
559           case IrOpcode::kFloat32RoundTruncate:
560             Emit(kMipsTruncWS, g.DefineAsRegister(node),
561                  g.UseRegister(next->InputAt(0)));
562             return;
563           default:
564             Emit(kMipsTruncWS, g.DefineAsRegister(node),
565                  g.UseRegister(value->InputAt(0)));
566             return;
567         }
568       } else {
569         // Match float32 -> float64 -> int32 representation change path.
570         Emit(kMipsTruncWS, g.DefineAsRegister(node),
571              g.UseRegister(value->InputAt(0)));
572         return;
573       }
574     }
575   }
576   VisitRR(this, kMipsTruncWD, node);
577 }
578 
579 
VisitChangeFloat64ToUint32(Node * node)580 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
581   VisitRR(this, kMipsTruncUwD, node);
582 }
583 
584 
VisitTruncateFloat64ToFloat32(Node * node)585 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
586   MipsOperandGenerator g(this);
587   Node* value = node->InputAt(0);
588   // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
589   // instruction.
590   if (CanCover(node, value) &&
591       value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
592     Emit(kMipsCvtSW, g.DefineAsRegister(node),
593          g.UseRegister(value->InputAt(0)));
594     return;
595   }
596   VisitRR(this, kMipsCvtSD, node);
597 }
598 
599 
VisitTruncateFloat64ToInt32(Node * node)600 void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
601   switch (TruncationModeOf(node->op())) {
602     case TruncationMode::kJavaScript:
603       return VisitRR(this, kArchTruncateDoubleToI, node);
604     case TruncationMode::kRoundToZero:
605       return VisitRR(this, kMipsTruncWD, node);
606   }
607   UNREACHABLE();
608 }
609 
610 
VisitBitcastFloat32ToInt32(Node * node)611 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
612   VisitRR(this, kMipsFloat64ExtractLowWord32, node);
613 }
614 
615 
VisitBitcastInt32ToFloat32(Node * node)616 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
617   MipsOperandGenerator g(this);
618   Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node),
619        ImmediateOperand(ImmediateOperand::INLINE, 0),
620        g.UseRegister(node->InputAt(0)));
621 }
622 
623 
VisitFloat32Add(Node * node)624 void InstructionSelector::VisitFloat32Add(Node* node) {
625   VisitRRR(this, kMipsAddS, node);
626 }
627 
628 
VisitFloat64Add(Node * node)629 void InstructionSelector::VisitFloat64Add(Node* node) {
630   VisitRRR(this, kMipsAddD, node);
631 }
632 
633 
VisitFloat32Sub(Node * node)634 void InstructionSelector::VisitFloat32Sub(Node* node) {
635   VisitRRR(this, kMipsSubS, node);
636 }
637 
638 
VisitFloat64Sub(Node * node)639 void InstructionSelector::VisitFloat64Sub(Node* node) {
640   MipsOperandGenerator g(this);
641   Float64BinopMatcher m(node);
642   if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
643       CanCover(m.node(), m.right().node())) {
644     if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
645         CanCover(m.right().node(), m.right().InputAt(0))) {
646       Float64BinopMatcher mright0(m.right().InputAt(0));
647       if (mright0.left().IsMinusZero()) {
648         Emit(kMipsFloat64RoundUp, g.DefineAsRegister(node),
649              g.UseRegister(mright0.right().node()));
650         return;
651       }
652     }
653   }
654   VisitRRR(this, kMipsSubD, node);
655 }
656 
657 
VisitFloat32Mul(Node * node)658 void InstructionSelector::VisitFloat32Mul(Node* node) {
659   VisitRRR(this, kMipsMulS, node);
660 }
661 
662 
VisitFloat64Mul(Node * node)663 void InstructionSelector::VisitFloat64Mul(Node* node) {
664   VisitRRR(this, kMipsMulD, node);
665 }
666 
667 
VisitFloat32Div(Node * node)668 void InstructionSelector::VisitFloat32Div(Node* node) {
669   VisitRRR(this, kMipsDivS, node);
670 }
671 
672 
VisitFloat64Div(Node * node)673 void InstructionSelector::VisitFloat64Div(Node* node) {
674   VisitRRR(this, kMipsDivD, node);
675 }
676 
677 
VisitFloat64Mod(Node * node)678 void InstructionSelector::VisitFloat64Mod(Node* node) {
679   MipsOperandGenerator g(this);
680   Emit(kMipsModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
681        g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
682 }
683 
684 
VisitFloat32Max(Node * node)685 void InstructionSelector::VisitFloat32Max(Node* node) {
686   MipsOperandGenerator g(this);
687   if (IsMipsArchVariant(kMips32r6)) {
688     Emit(kMipsFloat32Max, g.DefineAsRegister(node),
689          g.UseUniqueRegister(node->InputAt(0)),
690          g.UseUniqueRegister(node->InputAt(1)));
691 
692   } else {
693     // Reverse operands, and use same reg. for result and right operand.
694     Emit(kMipsFloat32Max, g.DefineSameAsFirst(node),
695          g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
696   }
697 }
698 
699 
VisitFloat64Max(Node * node)700 void InstructionSelector::VisitFloat64Max(Node* node) {
701   MipsOperandGenerator g(this);
702   if (IsMipsArchVariant(kMips32r6)) {
703     Emit(kMipsFloat64Max, g.DefineAsRegister(node),
704          g.UseUniqueRegister(node->InputAt(0)),
705          g.UseUniqueRegister(node->InputAt(1)));
706 
707   } else {
708     // Reverse operands, and use same reg. for result and right operand.
709     Emit(kMipsFloat64Max, g.DefineSameAsFirst(node),
710          g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
711   }
712 }
713 
714 
VisitFloat32Min(Node * node)715 void InstructionSelector::VisitFloat32Min(Node* node) {
716   MipsOperandGenerator g(this);
717   if (IsMipsArchVariant(kMips32r6)) {
718     Emit(kMipsFloat32Min, g.DefineAsRegister(node),
719          g.UseUniqueRegister(node->InputAt(0)),
720          g.UseUniqueRegister(node->InputAt(1)));
721 
722   } else {
723     // Reverse operands, and use same reg. for result and right operand.
724     Emit(kMipsFloat32Min, g.DefineSameAsFirst(node),
725          g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
726   }
727 }
728 
729 
VisitFloat64Min(Node * node)730 void InstructionSelector::VisitFloat64Min(Node* node) {
731   MipsOperandGenerator g(this);
732   if (IsMipsArchVariant(kMips32r6)) {
733     Emit(kMipsFloat64Min, g.DefineAsRegister(node),
734          g.UseUniqueRegister(node->InputAt(0)),
735          g.UseUniqueRegister(node->InputAt(1)));
736 
737   } else {
738     // Reverse operands, and use same reg. for result and right operand.
739     Emit(kMipsFloat64Min, g.DefineSameAsFirst(node),
740          g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
741   }
742 }
743 
744 
VisitFloat32Abs(Node * node)745 void InstructionSelector::VisitFloat32Abs(Node* node) {
746   VisitRR(this, kMipsAbsS, node);
747 }
748 
749 
VisitFloat64Abs(Node * node)750 void InstructionSelector::VisitFloat64Abs(Node* node) {
751   VisitRR(this, kMipsAbsD, node);
752 }
753 
754 
VisitFloat32Sqrt(Node * node)755 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
756   VisitRR(this, kMipsSqrtS, node);
757 }
758 
759 
VisitFloat64Sqrt(Node * node)760 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
761   VisitRR(this, kMipsSqrtD, node);
762 }
763 
764 
VisitFloat32RoundDown(Node * node)765 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
766   VisitRR(this, kMipsFloat32RoundDown, node);
767 }
768 
769 
VisitFloat64RoundDown(Node * node)770 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
771   VisitRR(this, kMipsFloat64RoundDown, node);
772 }
773 
774 
VisitFloat32RoundUp(Node * node)775 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
776   VisitRR(this, kMipsFloat32RoundUp, node);
777 }
778 
779 
VisitFloat64RoundUp(Node * node)780 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
781   VisitRR(this, kMipsFloat64RoundUp, node);
782 }
783 
784 
VisitFloat32RoundTruncate(Node * node)785 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
786   VisitRR(this, kMipsFloat32RoundTruncate, node);
787 }
788 
789 
VisitFloat64RoundTruncate(Node * node)790 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
791   VisitRR(this, kMipsFloat64RoundTruncate, node);
792 }
793 
794 
VisitFloat64RoundTiesAway(Node * node)795 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
796   UNREACHABLE();
797 }
798 
799 
VisitFloat32RoundTiesEven(Node * node)800 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
801   VisitRR(this, kMipsFloat32RoundTiesEven, node);
802 }
803 
804 
VisitFloat64RoundTiesEven(Node * node)805 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
806   VisitRR(this, kMipsFloat64RoundTiesEven, node);
807 }
808 
809 
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * descriptor,Node * node)810 void InstructionSelector::EmitPrepareArguments(
811     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
812     Node* node) {
813   MipsOperandGenerator g(this);
814 
815   // Prepare for C function call.
816   if (descriptor->IsCFunctionCall()) {
817     Emit(kArchPrepareCallCFunction |
818              MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
819          0, nullptr, 0, nullptr);
820 
821     // Poke any stack arguments.
822     int slot = kCArgSlotCount;
823     for (PushParameter input : (*arguments)) {
824       Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
825            g.TempImmediate(slot << kPointerSizeLog2));
826       ++slot;
827     }
828   } else {
829     // Possibly align stack here for functions.
830     int push_count = static_cast<int>(descriptor->StackParameterCount());
831     if (push_count > 0) {
832       Emit(kMipsStackClaim, g.NoOutput(),
833            g.TempImmediate(push_count << kPointerSizeLog2));
834     }
835     for (size_t n = 0; n < arguments->size(); ++n) {
836       PushParameter input = (*arguments)[n];
837       if (input.node()) {
838         Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
839              g.TempImmediate(n << kPointerSizeLog2));
840       }
841     }
842   }
843 }
844 
845 
IsTailCallAddressImmediate()846 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
847 
848 
VisitCheckedLoad(Node * node)849 void InstructionSelector::VisitCheckedLoad(Node* node) {
850   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
851   MipsOperandGenerator g(this);
852   Node* const buffer = node->InputAt(0);
853   Node* const offset = node->InputAt(1);
854   Node* const length = node->InputAt(2);
855   ArchOpcode opcode = kArchNop;
856   switch (load_rep.representation()) {
857     case MachineRepresentation::kWord8:
858       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
859       break;
860     case MachineRepresentation::kWord16:
861       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
862       break;
863     case MachineRepresentation::kWord32:
864       opcode = kCheckedLoadWord32;
865       break;
866     case MachineRepresentation::kFloat32:
867       opcode = kCheckedLoadFloat32;
868       break;
869     case MachineRepresentation::kFloat64:
870       opcode = kCheckedLoadFloat64;
871       break;
872     case MachineRepresentation::kBit:     // Fall through.
873     case MachineRepresentation::kTagged:  // Fall through.
874     case MachineRepresentation::kWord64:  // Fall through.
875     case MachineRepresentation::kNone:
876       UNREACHABLE();
877       return;
878   }
879   InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
880                                           ? g.UseImmediate(offset)
881                                           : g.UseRegister(offset);
882 
883   InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
884                                           ? g.CanBeImmediate(length, opcode)
885                                                 ? g.UseImmediate(length)
886                                                 : g.UseRegister(length)
887                                           : g.UseRegister(length);
888 
889   Emit(opcode | AddressingModeField::encode(kMode_MRI),
890        g.DefineAsRegister(node), offset_operand, length_operand,
891        g.UseRegister(buffer));
892 }
893 
894 
VisitCheckedStore(Node * node)895 void InstructionSelector::VisitCheckedStore(Node* node) {
896   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
897   MipsOperandGenerator g(this);
898   Node* const buffer = node->InputAt(0);
899   Node* const offset = node->InputAt(1);
900   Node* const length = node->InputAt(2);
901   Node* const value = node->InputAt(3);
902   ArchOpcode opcode = kArchNop;
903   switch (rep) {
904     case MachineRepresentation::kWord8:
905       opcode = kCheckedStoreWord8;
906       break;
907     case MachineRepresentation::kWord16:
908       opcode = kCheckedStoreWord16;
909       break;
910     case MachineRepresentation::kWord32:
911       opcode = kCheckedStoreWord32;
912       break;
913     case MachineRepresentation::kFloat32:
914       opcode = kCheckedStoreFloat32;
915       break;
916     case MachineRepresentation::kFloat64:
917       opcode = kCheckedStoreFloat64;
918       break;
919     default:
920       UNREACHABLE();
921       return;
922   }
923   InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
924                                           ? g.UseImmediate(offset)
925                                           : g.UseRegister(offset);
926 
927   InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
928                                           ? g.CanBeImmediate(length, opcode)
929                                                 ? g.UseImmediate(length)
930                                                 : g.UseRegister(length)
931                                           : g.UseRegister(length);
932 
933   Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
934        offset_operand, length_operand, g.UseRegister(value),
935        g.UseRegister(buffer));
936 }
937 
938 
939 namespace {
940 
941 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)942 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
943                          InstructionOperand left, InstructionOperand right,
944                          FlagsContinuation* cont) {
945   MipsOperandGenerator g(selector);
946   opcode = cont->Encode(opcode);
947   if (cont->IsBranch()) {
948     selector->Emit(opcode, g.NoOutput(), left, right,
949                    g.Label(cont->true_block()), g.Label(cont->false_block()));
950   } else {
951     DCHECK(cont->IsSet());
952     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
953   }
954 }
955 
956 
957 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)958 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
959                          FlagsContinuation* cont) {
960   MipsOperandGenerator g(selector);
961   Float32BinopMatcher m(node);
962   InstructionOperand lhs, rhs;
963 
964   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
965                           : g.UseRegister(m.left().node());
966   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
967                            : g.UseRegister(m.right().node());
968   VisitCompare(selector, kMipsCmpS, lhs, rhs, cont);
969 }
970 
971 
972 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)973 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
974                          FlagsContinuation* cont) {
975   MipsOperandGenerator g(selector);
976   Float64BinopMatcher m(node);
977   InstructionOperand lhs, rhs;
978 
979   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
980                           : g.UseRegister(m.left().node());
981   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
982                            : g.UseRegister(m.right().node());
983   VisitCompare(selector, kMipsCmpD, lhs, rhs, cont);
984 }
985 
986 
987 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative)988 void VisitWordCompare(InstructionSelector* selector, Node* node,
989                       InstructionCode opcode, FlagsContinuation* cont,
990                       bool commutative) {
991   MipsOperandGenerator g(selector);
992   Node* left = node->InputAt(0);
993   Node* right = node->InputAt(1);
994 
995   // Match immediates on left or right side of comparison.
996   if (g.CanBeImmediate(right, opcode)) {
997     switch (cont->condition()) {
998       case kEqual:
999       case kNotEqual:
1000         if (cont->IsSet()) {
1001           VisitCompare(selector, opcode, g.UseRegister(left),
1002                        g.UseImmediate(right), cont);
1003         } else {
1004           VisitCompare(selector, opcode, g.UseRegister(left),
1005                        g.UseRegister(right), cont);
1006         }
1007         break;
1008       case kSignedLessThan:
1009       case kSignedGreaterThanOrEqual:
1010       case kUnsignedLessThan:
1011       case kUnsignedGreaterThanOrEqual:
1012         VisitCompare(selector, opcode, g.UseRegister(left),
1013                      g.UseImmediate(right), cont);
1014         break;
1015       default:
1016         VisitCompare(selector, opcode, g.UseRegister(left),
1017                      g.UseRegister(right), cont);
1018     }
1019   } else if (g.CanBeImmediate(left, opcode)) {
1020     if (!commutative) cont->Commute();
1021     switch (cont->condition()) {
1022       case kEqual:
1023       case kNotEqual:
1024         if (cont->IsSet()) {
1025           VisitCompare(selector, opcode, g.UseRegister(right),
1026                        g.UseImmediate(left), cont);
1027         } else {
1028           VisitCompare(selector, opcode, g.UseRegister(right),
1029                        g.UseRegister(left), cont);
1030         }
1031         break;
1032       case kSignedLessThan:
1033       case kSignedGreaterThanOrEqual:
1034       case kUnsignedLessThan:
1035       case kUnsignedGreaterThanOrEqual:
1036         VisitCompare(selector, opcode, g.UseRegister(right),
1037                      g.UseImmediate(left), cont);
1038         break;
1039       default:
1040         VisitCompare(selector, opcode, g.UseRegister(right),
1041                      g.UseRegister(left), cont);
1042     }
1043   } else {
1044     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1045                  cont);
1046   }
1047 }
1048 
1049 
VisitWordCompare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1050 void VisitWordCompare(InstructionSelector* selector, Node* node,
1051                       FlagsContinuation* cont) {
1052   VisitWordCompare(selector, node, kMipsCmp, cont, false);
1053 }
1054 
1055 }  // namespace
1056 
1057 
1058 // Shared routine for word comparisons against zero.
VisitWordCompareZero(InstructionSelector * selector,Node * user,Node * value,FlagsContinuation * cont)1059 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1060                           Node* value, FlagsContinuation* cont) {
1061   while (selector->CanCover(user, value)) {
1062     switch (value->opcode()) {
1063       case IrOpcode::kWord32Equal: {
1064         // Combine with comparisons against 0 by simply inverting the
1065         // continuation.
1066         Int32BinopMatcher m(value);
1067         if (m.right().Is(0)) {
1068           user = value;
1069           value = m.left().node();
1070           cont->Negate();
1071           continue;
1072         }
1073         cont->OverwriteAndNegateIfEqual(kEqual);
1074         return VisitWordCompare(selector, value, cont);
1075       }
1076       case IrOpcode::kInt32LessThan:
1077         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1078         return VisitWordCompare(selector, value, cont);
1079       case IrOpcode::kInt32LessThanOrEqual:
1080         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1081         return VisitWordCompare(selector, value, cont);
1082       case IrOpcode::kUint32LessThan:
1083         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1084         return VisitWordCompare(selector, value, cont);
1085       case IrOpcode::kUint32LessThanOrEqual:
1086         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1087         return VisitWordCompare(selector, value, cont);
1088       case IrOpcode::kFloat32Equal:
1089         cont->OverwriteAndNegateIfEqual(kEqual);
1090         return VisitFloat32Compare(selector, value, cont);
1091       case IrOpcode::kFloat32LessThan:
1092         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1093         return VisitFloat32Compare(selector, value, cont);
1094       case IrOpcode::kFloat32LessThanOrEqual:
1095         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1096         return VisitFloat32Compare(selector, value, cont);
1097       case IrOpcode::kFloat64Equal:
1098         cont->OverwriteAndNegateIfEqual(kEqual);
1099         return VisitFloat64Compare(selector, value, cont);
1100       case IrOpcode::kFloat64LessThan:
1101         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1102         return VisitFloat64Compare(selector, value, cont);
1103       case IrOpcode::kFloat64LessThanOrEqual:
1104         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1105         return VisitFloat64Compare(selector, value, cont);
1106       case IrOpcode::kProjection:
1107         // Check if this is the overflow output projection of an
1108         // <Operation>WithOverflow node.
1109         if (ProjectionIndexOf(value->op()) == 1u) {
1110           // We cannot combine the <Operation>WithOverflow with this branch
1111           // unless the 0th projection (the use of the actual value of the
1112           // <Operation> is either nullptr, which means there's no use of the
1113           // actual value, or was already defined, which means it is scheduled
1114           // *AFTER* this branch).
1115           Node* const node = value->InputAt(0);
1116           Node* const result = NodeProperties::FindProjection(node, 0);
1117           if (!result || selector->IsDefined(result)) {
1118             switch (node->opcode()) {
1119               case IrOpcode::kInt32AddWithOverflow:
1120                 cont->OverwriteAndNegateIfEqual(kOverflow);
1121                 return VisitBinop(selector, node, kMipsAddOvf, cont);
1122               case IrOpcode::kInt32SubWithOverflow:
1123                 cont->OverwriteAndNegateIfEqual(kOverflow);
1124                 return VisitBinop(selector, node, kMipsSubOvf, cont);
1125               default:
1126                 break;
1127             }
1128           }
1129         }
1130         break;
1131       case IrOpcode::kWord32And:
1132         return VisitWordCompare(selector, value, kMipsTst, cont, true);
1133       default:
1134         break;
1135     }
1136     break;
1137   }
1138 
1139   // Continuation could not be combined with a compare, emit compare against 0.
1140   MipsOperandGenerator g(selector);
1141   InstructionCode const opcode = cont->Encode(kMipsCmp);
1142   InstructionOperand const value_operand = g.UseRegister(value);
1143   if (cont->IsBranch()) {
1144     selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
1145                    g.Label(cont->true_block()), g.Label(cont->false_block()));
1146   } else {
1147     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
1148                    g.TempImmediate(0));
1149   }
1150 }
1151 
1152 
VisitBranch(Node * branch,BasicBlock * tbranch,BasicBlock * fbranch)1153 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1154                                       BasicBlock* fbranch) {
1155   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1156   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
1157 }
1158 
1159 
VisitSwitch(Node * node,const SwitchInfo & sw)1160 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1161   MipsOperandGenerator g(this);
1162   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1163 
1164   // Emit either ArchTableSwitch or ArchLookupSwitch.
1165   size_t table_space_cost = 9 + sw.value_range;
1166   size_t table_time_cost = 3;
1167   size_t lookup_space_cost = 2 + 2 * sw.case_count;
1168   size_t lookup_time_cost = sw.case_count;
1169   if (sw.case_count > 0 &&
1170       table_space_cost + 3 * table_time_cost <=
1171           lookup_space_cost + 3 * lookup_time_cost &&
1172       sw.min_value > std::numeric_limits<int32_t>::min()) {
1173     InstructionOperand index_operand = value_operand;
1174     if (sw.min_value) {
1175       index_operand = g.TempRegister();
1176       Emit(kMipsSub, index_operand, value_operand,
1177            g.TempImmediate(sw.min_value));
1178     }
1179     // Generate a table lookup.
1180     return EmitTableSwitch(sw, index_operand);
1181   }
1182 
1183   // Generate a sequence of conditional jumps.
1184   return EmitLookupSwitch(sw, value_operand);
1185 }
1186 
1187 
VisitWord32Equal(Node * const node)1188 void InstructionSelector::VisitWord32Equal(Node* const node) {
1189   FlagsContinuation cont(kEqual, node);
1190   Int32BinopMatcher m(node);
1191   if (m.right().Is(0)) {
1192     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1193   }
1194   VisitWordCompare(this, node, &cont);
1195 }
1196 
1197 
VisitInt32LessThan(Node * node)1198 void InstructionSelector::VisitInt32LessThan(Node* node) {
1199   FlagsContinuation cont(kSignedLessThan, node);
1200   VisitWordCompare(this, node, &cont);
1201 }
1202 
1203 
VisitInt32LessThanOrEqual(Node * node)1204 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1205   FlagsContinuation cont(kSignedLessThanOrEqual, node);
1206   VisitWordCompare(this, node, &cont);
1207 }
1208 
1209 
VisitUint32LessThan(Node * node)1210 void InstructionSelector::VisitUint32LessThan(Node* node) {
1211   FlagsContinuation cont(kUnsignedLessThan, node);
1212   VisitWordCompare(this, node, &cont);
1213 }
1214 
1215 
VisitUint32LessThanOrEqual(Node * node)1216 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1217   FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1218   VisitWordCompare(this, node, &cont);
1219 }
1220 
1221 
VisitInt32AddWithOverflow(Node * node)1222 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1223   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1224     FlagsContinuation cont(kOverflow, ovf);
1225     return VisitBinop(this, node, kMipsAddOvf, &cont);
1226   }
1227   FlagsContinuation cont;
1228   VisitBinop(this, node, kMipsAddOvf, &cont);
1229 }
1230 
1231 
VisitInt32SubWithOverflow(Node * node)1232 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1233   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1234     FlagsContinuation cont(kOverflow, ovf);
1235     return VisitBinop(this, node, kMipsSubOvf, &cont);
1236   }
1237   FlagsContinuation cont;
1238   VisitBinop(this, node, kMipsSubOvf, &cont);
1239 }
1240 
1241 
VisitFloat32Equal(Node * node)1242 void InstructionSelector::VisitFloat32Equal(Node* node) {
1243   FlagsContinuation cont(kEqual, node);
1244   VisitFloat32Compare(this, node, &cont);
1245 }
1246 
1247 
VisitFloat32LessThan(Node * node)1248 void InstructionSelector::VisitFloat32LessThan(Node* node) {
1249   FlagsContinuation cont(kUnsignedLessThan, node);
1250   VisitFloat32Compare(this, node, &cont);
1251 }
1252 
1253 
VisitFloat32LessThanOrEqual(Node * node)1254 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1255   FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1256   VisitFloat32Compare(this, node, &cont);
1257 }
1258 
1259 
VisitFloat64Equal(Node * node)1260 void InstructionSelector::VisitFloat64Equal(Node* node) {
1261   FlagsContinuation cont(kEqual, node);
1262   VisitFloat64Compare(this, node, &cont);
1263 }
1264 
1265 
VisitFloat64LessThan(Node * node)1266 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1267   FlagsContinuation cont(kUnsignedLessThan, node);
1268   VisitFloat64Compare(this, node, &cont);
1269 }
1270 
1271 
VisitFloat64LessThanOrEqual(Node * node)1272 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1273   FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1274   VisitFloat64Compare(this, node, &cont);
1275 }
1276 
1277 
VisitFloat64ExtractLowWord32(Node * node)1278 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1279   MipsOperandGenerator g(this);
1280   Emit(kMipsFloat64ExtractLowWord32, g.DefineAsRegister(node),
1281        g.UseRegister(node->InputAt(0)));
1282 }
1283 
1284 
VisitFloat64ExtractHighWord32(Node * node)1285 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1286   MipsOperandGenerator g(this);
1287   Emit(kMipsFloat64ExtractHighWord32, g.DefineAsRegister(node),
1288        g.UseRegister(node->InputAt(0)));
1289 }
1290 
1291 
VisitFloat64InsertLowWord32(Node * node)1292 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1293   MipsOperandGenerator g(this);
1294   Node* left = node->InputAt(0);
1295   Node* right = node->InputAt(1);
1296   Emit(kMipsFloat64InsertLowWord32, g.DefineSameAsFirst(node),
1297        g.UseRegister(left), g.UseRegister(right));
1298 }
1299 
1300 
VisitFloat64InsertHighWord32(Node * node)1301 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1302   MipsOperandGenerator g(this);
1303   Node* left = node->InputAt(0);
1304   Node* right = node->InputAt(1);
1305   Emit(kMipsFloat64InsertHighWord32, g.DefineSameAsFirst(node),
1306        g.UseRegister(left), g.UseRegister(right));
1307 }
1308 
1309 
1310 // static
1311 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()1312 InstructionSelector::SupportedMachineOperatorFlags() {
1313   MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
1314   if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
1315       IsFp64Mode()) {
1316     flags |= MachineOperatorBuilder::kFloat64RoundDown |
1317              MachineOperatorBuilder::kFloat64RoundUp |
1318              MachineOperatorBuilder::kFloat64RoundTruncate |
1319              MachineOperatorBuilder::kFloat64RoundTiesEven;
1320   }
1321   return flags | MachineOperatorBuilder::kInt32DivIsSafe |
1322          MachineOperatorBuilder::kUint32DivIsSafe |
1323          MachineOperatorBuilder::kWord32ShiftIsSafe |
1324          MachineOperatorBuilder::kFloat64Min |
1325          MachineOperatorBuilder::kFloat64Max |
1326          MachineOperatorBuilder::kFloat32Min |
1327          MachineOperatorBuilder::kFloat32Max |
1328          MachineOperatorBuilder::kFloat32RoundDown |
1329          MachineOperatorBuilder::kFloat32RoundUp |
1330          MachineOperatorBuilder::kFloat32RoundTruncate |
1331          MachineOperatorBuilder::kFloat32RoundTiesEven;
1332 }
1333 
1334 }  // namespace compiler
1335 }  // namespace internal
1336 }  // namespace v8
1337