1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/base/adapters.h"
6 #include "src/compiler/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
9 #include "src/frame-constants.h"
10
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14
15 enum class OperandMode : uint32_t {
16 kNone = 0u,
17 // Immediate mode
18 kShift32Imm = 1u << 0,
19 kShift64Imm = 1u << 1,
20 kInt32Imm = 1u << 2,
21 kInt32Imm_Negate = 1u << 3,
22 kUint32Imm = 1u << 4,
23 kInt20Imm = 1u << 5,
24 kUint12Imm = 1u << 6,
25 // Instr format
26 kAllowRRR = 1u << 7,
27 kAllowRM = 1u << 8,
28 kAllowRI = 1u << 9,
29 kAllowRRI = 1u << 10,
30 kAllowRRM = 1u << 11,
31 // Useful combination
32 kAllowImmediate = kAllowRI | kAllowRRI,
33 kAllowMemoryOperand = kAllowRM | kAllowRRM,
34 kAllowDistinctOps = kAllowRRR | kAllowRRI | kAllowRRM,
35 kBitWiseCommonMode = kAllowRI,
36 kArithmeticCommonMode = kAllowRM | kAllowRI
37 };
38
39 typedef base::Flags<OperandMode, uint32_t> OperandModes;
40 DEFINE_OPERATORS_FOR_FLAGS(OperandModes);
41 OperandModes immediateModeMask =
42 OperandMode::kShift32Imm | OperandMode::kShift64Imm |
43 OperandMode::kInt32Imm | OperandMode::kInt32Imm_Negate |
44 OperandMode::kUint32Imm | OperandMode::kInt20Imm;
45
46 #define AndCommonMode \
47 ((OperandMode::kAllowRM | \
48 (CpuFeatures::IsSupported(DISTINCT_OPS) ? OperandMode::kAllowRRR \
49 : OperandMode::kNone)))
50 #define And64OperandMode AndCommonMode
51 #define Or64OperandMode And64OperandMode
52 #define Xor64OperandMode And64OperandMode
53
54 #define And32OperandMode \
55 (AndCommonMode | OperandMode::kAllowRI | OperandMode::kUint32Imm)
56 #define Or32OperandMode And32OperandMode
57 #define Xor32OperandMode And32OperandMode
58
59 #define Shift32OperandMode \
60 ((OperandMode::kAllowRI | OperandMode::kShift64Imm | \
61 (CpuFeatures::IsSupported(DISTINCT_OPS) \
62 ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
63 : OperandMode::kNone)))
64
65 #define Shift64OperandMode \
66 ((OperandMode::kAllowRI | OperandMode::kShift64Imm | \
67 OperandMode::kAllowRRR | OperandMode::kAllowRRI))
68
69 #define AddOperandMode \
70 ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm | \
71 (CpuFeatures::IsSupported(DISTINCT_OPS) \
72 ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
73 : OperandMode::kArithmeticCommonMode)))
74 #define SubOperandMode \
75 ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm_Negate | \
76 (CpuFeatures::IsSupported(DISTINCT_OPS) \
77 ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
78 : OperandMode::kArithmeticCommonMode)))
79 #define MulOperandMode \
80 (OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm)
81
82 // Adds S390-specific methods for generating operands.
83 class S390OperandGenerator final : public OperandGenerator {
84 public:
S390OperandGenerator(InstructionSelector * selector)85 explicit S390OperandGenerator(InstructionSelector* selector)
86 : OperandGenerator(selector) {}
87
UseOperand(Node * node,OperandModes mode)88 InstructionOperand UseOperand(Node* node, OperandModes mode) {
89 if (CanBeImmediate(node, mode)) {
90 return UseImmediate(node);
91 }
92 return UseRegister(node);
93 }
94
UseAnyExceptImmediate(Node * node)95 InstructionOperand UseAnyExceptImmediate(Node* node) {
96 if (NodeProperties::IsConstant(node))
97 return UseRegister(node);
98 else
99 return Use(node);
100 }
101
GetImmediate(Node * node)102 int64_t GetImmediate(Node* node) {
103 if (node->opcode() == IrOpcode::kInt32Constant)
104 return OpParameter<int32_t>(node->op());
105 else if (node->opcode() == IrOpcode::kInt64Constant)
106 return OpParameter<int64_t>(node->op());
107 else
108 UNIMPLEMENTED();
109 return 0L;
110 }
111
CanBeImmediate(Node * node,OperandModes mode)112 bool CanBeImmediate(Node* node, OperandModes mode) {
113 int64_t value;
114 if (node->opcode() == IrOpcode::kInt32Constant)
115 value = OpParameter<int32_t>(node->op());
116 else if (node->opcode() == IrOpcode::kInt64Constant)
117 value = OpParameter<int64_t>(node->op());
118 else
119 return false;
120 return CanBeImmediate(value, mode);
121 }
122
CanBeImmediate(int64_t value,OperandModes mode)123 bool CanBeImmediate(int64_t value, OperandModes mode) {
124 if (mode & OperandMode::kShift32Imm)
125 return 0 <= value && value < 32;
126 else if (mode & OperandMode::kShift64Imm)
127 return 0 <= value && value < 64;
128 else if (mode & OperandMode::kInt32Imm)
129 return is_int32(value);
130 else if (mode & OperandMode::kInt32Imm_Negate)
131 return is_int32(-value);
132 else if (mode & OperandMode::kUint32Imm)
133 return is_uint32(value);
134 else if (mode & OperandMode::kInt20Imm)
135 return is_int20(value);
136 else if (mode & OperandMode::kUint12Imm)
137 return is_uint12(value);
138 else
139 return false;
140 }
141
CanBeMemoryOperand(InstructionCode opcode,Node * user,Node * input,int effect_level)142 bool CanBeMemoryOperand(InstructionCode opcode, Node* user, Node* input,
143 int effect_level) {
144 if (input->opcode() != IrOpcode::kLoad ||
145 !selector()->CanCover(user, input)) {
146 return false;
147 }
148
149 if (effect_level != selector()->GetEffectLevel(input)) {
150 return false;
151 }
152
153 MachineRepresentation rep =
154 LoadRepresentationOf(input->op()).representation();
155 switch (opcode) {
156 case kS390_Cmp64:
157 case kS390_LoadAndTestWord64:
158 return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
159 case kS390_LoadAndTestWord32:
160 case kS390_Cmp32:
161 return rep == MachineRepresentation::kWord32;
162 default:
163 break;
164 }
165 return false;
166 }
167
GenerateMemoryOperandInputs(Node * index,Node * base,Node * displacement,DisplacementMode displacement_mode,InstructionOperand inputs[],size_t * input_count)168 AddressingMode GenerateMemoryOperandInputs(Node* index, Node* base,
169 Node* displacement,
170 DisplacementMode displacement_mode,
171 InstructionOperand inputs[],
172 size_t* input_count) {
173 AddressingMode mode = kMode_MRI;
174 if (base != nullptr) {
175 inputs[(*input_count)++] = UseRegister(base);
176 if (index != nullptr) {
177 inputs[(*input_count)++] = UseRegister(index);
178 if (displacement != nullptr) {
179 inputs[(*input_count)++] = displacement_mode
180 ? UseNegatedImmediate(displacement)
181 : UseImmediate(displacement);
182 mode = kMode_MRRI;
183 } else {
184 mode = kMode_MRR;
185 }
186 } else {
187 if (displacement == nullptr) {
188 mode = kMode_MR;
189 } else {
190 inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
191 ? UseNegatedImmediate(displacement)
192 : UseImmediate(displacement);
193 mode = kMode_MRI;
194 }
195 }
196 } else {
197 DCHECK_NOT_NULL(index);
198 inputs[(*input_count)++] = UseRegister(index);
199 if (displacement != nullptr) {
200 inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
201 ? UseNegatedImmediate(displacement)
202 : UseImmediate(displacement);
203 mode = kMode_MRI;
204 } else {
205 mode = kMode_MR;
206 }
207 }
208 return mode;
209 }
210
GetEffectiveAddressMemoryOperand(Node * operand,InstructionOperand inputs[],size_t * input_count,OperandModes immediate_mode=OperandMode::kInt20Imm)211 AddressingMode GetEffectiveAddressMemoryOperand(
212 Node* operand, InstructionOperand inputs[], size_t* input_count,
213 OperandModes immediate_mode = OperandMode::kInt20Imm) {
214 #if V8_TARGET_ARCH_S390X
215 BaseWithIndexAndDisplacement64Matcher m(operand,
216 AddressOption::kAllowInputSwap);
217 #else
218 BaseWithIndexAndDisplacement32Matcher m(operand,
219 AddressOption::kAllowInputSwap);
220 #endif
221 DCHECK(m.matches());
222 if ((m.displacement() == nullptr ||
223 CanBeImmediate(m.displacement(), immediate_mode))) {
224 DCHECK_EQ(0, m.scale());
225 return GenerateMemoryOperandInputs(m.index(), m.base(), m.displacement(),
226 m.displacement_mode(), inputs,
227 input_count);
228 } else {
229 inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
230 inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
231 return kMode_MRR;
232 }
233 }
234
CanBeBetterLeftOperand(Node * node) const235 bool CanBeBetterLeftOperand(Node* node) const {
236 return !selector()->IsLive(node);
237 }
238
GetRepresentation(Node * node)239 MachineRepresentation GetRepresentation(Node* node) {
240 return sequence()->GetRepresentation(selector()->GetVirtualRegister(node));
241 }
242
Is64BitOperand(Node * node)243 bool Is64BitOperand(Node* node) {
244 return MachineRepresentation::kWord64 == GetRepresentation(node);
245 }
246
247 // Use the stack pointer if the node is LoadStackPointer, otherwise assign a
248 // register.
UseRegisterOrStackPointer(Node * node)249 InstructionOperand UseRegisterOrStackPointer(Node* node) {
250 if (node->opcode() == IrOpcode::kLoadStackPointer) {
251 return LocationOperand(LocationOperand::EXPLICIT,
252 LocationOperand::REGISTER,
253 MachineRepresentation::kWord32, sp.code());
254 }
255 return UseRegister(node);
256 }
257 };
258
259 namespace {
260
S390OpcodeOnlySupport12BitDisp(ArchOpcode opcode)261 bool S390OpcodeOnlySupport12BitDisp(ArchOpcode opcode) {
262 switch (opcode) {
263 case kS390_AddFloat:
264 case kS390_AddDouble:
265 case kS390_CmpFloat:
266 case kS390_CmpDouble:
267 case kS390_Float32ToDouble:
268 return true;
269 default:
270 return false;
271 }
272 }
273
S390OpcodeOnlySupport12BitDisp(InstructionCode op)274 bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
275 ArchOpcode opcode = ArchOpcodeField::decode(op);
276 return S390OpcodeOnlySupport12BitDisp(opcode);
277 }
278
279 #define OpcodeImmMode(op) \
280 (S390OpcodeOnlySupport12BitDisp(op) ? OperandMode::kUint12Imm \
281 : OperandMode::kInt20Imm)
282
SelectLoadOpcode(Node * node)283 ArchOpcode SelectLoadOpcode(Node* node) {
284 NodeMatcher m(node);
285 DCHECK(m.IsLoad() || m.IsPoisonedLoad());
286 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
287 ArchOpcode opcode = kArchNop;
288 switch (load_rep.representation()) {
289 case MachineRepresentation::kFloat32:
290 opcode = kS390_LoadFloat32;
291 break;
292 case MachineRepresentation::kFloat64:
293 opcode = kS390_LoadDouble;
294 break;
295 case MachineRepresentation::kBit: // Fall through.
296 case MachineRepresentation::kWord8:
297 opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
298 break;
299 case MachineRepresentation::kWord16:
300 opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
301 break;
302 #if !V8_TARGET_ARCH_S390X
303 case MachineRepresentation::kTaggedSigned: // Fall through.
304 case MachineRepresentation::kTaggedPointer: // Fall through.
305 case MachineRepresentation::kTagged: // Fall through.
306 #endif
307 case MachineRepresentation::kWord32:
308 opcode = kS390_LoadWordU32;
309 break;
310 #if V8_TARGET_ARCH_S390X
311 case MachineRepresentation::kTaggedSigned: // Fall through.
312 case MachineRepresentation::kTaggedPointer: // Fall through.
313 case MachineRepresentation::kTagged: // Fall through.
314 case MachineRepresentation::kWord64:
315 opcode = kS390_LoadWord64;
316 break;
317 #else
318 case MachineRepresentation::kWord64: // Fall through.
319 #endif
320 case MachineRepresentation::kSimd128: // Fall through.
321 case MachineRepresentation::kNone:
322 default:
323 UNREACHABLE();
324 }
325 return opcode;
326 }
327
328 #define RESULT_IS_WORD32_LIST(V) \
329 /* Float unary op*/ \
330 V(BitcastFloat32ToInt32) \
331 /* V(TruncateFloat64ToWord32) */ \
332 V(RoundFloat64ToInt32) \
333 V(TruncateFloat32ToInt32) \
334 V(TruncateFloat32ToUint32) \
335 V(TruncateFloat64ToUint32) \
336 V(ChangeFloat64ToInt32) \
337 V(ChangeFloat64ToUint32) \
338 /* Word32 unary op */ \
339 V(Word32Clz) \
340 V(Word32Popcnt) \
341 V(Float64ExtractLowWord32) \
342 V(Float64ExtractHighWord32) \
343 V(SignExtendWord8ToInt32) \
344 V(SignExtendWord16ToInt32) \
345 /* Word32 bin op */ \
346 V(Int32Add) \
347 V(Int32Sub) \
348 V(Int32Mul) \
349 V(Int32AddWithOverflow) \
350 V(Int32SubWithOverflow) \
351 V(Int32MulWithOverflow) \
352 V(Int32MulHigh) \
353 V(Uint32MulHigh) \
354 V(Int32Div) \
355 V(Uint32Div) \
356 V(Int32Mod) \
357 V(Uint32Mod) \
358 V(Word32Ror) \
359 V(Word32And) \
360 V(Word32Or) \
361 V(Word32Xor) \
362 V(Word32Shl) \
363 V(Word32Shr) \
364 V(Word32Sar)
365
ProduceWord32Result(Node * node)366 bool ProduceWord32Result(Node* node) {
367 #if !V8_TARGET_ARCH_S390X
368 return true;
369 #else
370 switch (node->opcode()) {
371 #define VISITOR(name) case IrOpcode::k##name:
372 RESULT_IS_WORD32_LIST(VISITOR)
373 #undef VISITOR
374 return true;
375 // TODO(john.yan): consider the following case to be valid
376 // case IrOpcode::kWord32Equal:
377 // case IrOpcode::kInt32LessThan:
378 // case IrOpcode::kInt32LessThanOrEqual:
379 // case IrOpcode::kUint32LessThan:
380 // case IrOpcode::kUint32LessThanOrEqual:
381 // case IrOpcode::kUint32MulHigh:
382 // // These 32-bit operations implicitly zero-extend to 64-bit on x64, so
383 // the
384 // // zero-extension is a no-op.
385 // return true;
386 // case IrOpcode::kProjection: {
387 // Node* const value = node->InputAt(0);
388 // switch (value->opcode()) {
389 // case IrOpcode::kInt32AddWithOverflow:
390 // case IrOpcode::kInt32SubWithOverflow:
391 // case IrOpcode::kInt32MulWithOverflow:
392 // return true;
393 // default:
394 // return false;
395 // }
396 // }
397 case IrOpcode::kLoad: {
398 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
399 switch (load_rep.representation()) {
400 case MachineRepresentation::kWord32:
401 return true;
402 case MachineRepresentation::kWord8:
403 if (load_rep.IsSigned())
404 return false;
405 else
406 return true;
407 default:
408 return false;
409 }
410 }
411 default:
412 return false;
413 }
414 #endif
415 }
416
DoZeroExtForResult(Node * node)417 static inline bool DoZeroExtForResult(Node* node) {
418 #if V8_TARGET_ARCH_S390X
419 return ProduceWord32Result(node);
420 #else
421 return false;
422 #endif
423 }
424
425 // TODO(john.yan): Create VisiteShift to match dst = src shift (R+I)
426 #if 0
427 void VisitShift() { }
428 #endif
429
430 #if V8_TARGET_ARCH_S390X
VisitTryTruncateDouble(InstructionSelector * selector,ArchOpcode opcode,Node * node)431 void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
432 Node* node) {
433 S390OperandGenerator g(selector);
434 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
435 InstructionOperand outputs[2];
436 size_t output_count = 0;
437 outputs[output_count++] = g.DefineAsRegister(node);
438
439 Node* success_output = NodeProperties::FindProjection(node, 1);
440 if (success_output) {
441 outputs[output_count++] = g.DefineAsRegister(success_output);
442 }
443
444 selector->Emit(opcode, output_count, outputs, 1, inputs);
445 }
446 #endif
447
448 template <class CanCombineWithLoad>
GenerateRightOperands(InstructionSelector * selector,Node * node,Node * right,InstructionCode & opcode,OperandModes & operand_mode,InstructionOperand * inputs,size_t & input_count,CanCombineWithLoad canCombineWithLoad)449 void GenerateRightOperands(InstructionSelector* selector, Node* node,
450 Node* right, InstructionCode& opcode,
451 OperandModes& operand_mode,
452 InstructionOperand* inputs, size_t& input_count,
453 CanCombineWithLoad canCombineWithLoad) {
454 S390OperandGenerator g(selector);
455
456 if ((operand_mode & OperandMode::kAllowImmediate) &&
457 g.CanBeImmediate(right, operand_mode)) {
458 inputs[input_count++] = g.UseImmediate(right);
459 // Can only be RI or RRI
460 operand_mode &= OperandMode::kAllowImmediate;
461 } else if (operand_mode & OperandMode::kAllowMemoryOperand) {
462 NodeMatcher mright(right);
463 if (mright.IsLoad() && selector->CanCover(node, right) &&
464 canCombineWithLoad(SelectLoadOpcode(right))) {
465 AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
466 right, inputs, &input_count, OpcodeImmMode(opcode));
467 opcode |= AddressingModeField::encode(mode);
468 operand_mode &= ~OperandMode::kAllowImmediate;
469 if (operand_mode & OperandMode::kAllowRM)
470 operand_mode &= ~OperandMode::kAllowDistinctOps;
471 } else if (operand_mode & OperandMode::kAllowRM) {
472 DCHECK(!(operand_mode & OperandMode::kAllowRRM));
473 inputs[input_count++] = g.UseAnyExceptImmediate(right);
474 // Can not be Immediate
475 operand_mode &=
476 ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
477 } else if (operand_mode & OperandMode::kAllowRRM) {
478 DCHECK(!(operand_mode & OperandMode::kAllowRM));
479 inputs[input_count++] = g.UseAnyExceptImmediate(right);
480 // Can not be Immediate
481 operand_mode &= ~OperandMode::kAllowImmediate;
482 } else {
483 UNREACHABLE();
484 }
485 } else {
486 inputs[input_count++] = g.UseRegister(right);
487 // Can only be RR or RRR
488 operand_mode &= OperandMode::kAllowRRR;
489 }
490 }
491
492 template <class CanCombineWithLoad>
GenerateBinOpOperands(InstructionSelector * selector,Node * node,Node * left,Node * right,InstructionCode & opcode,OperandModes & operand_mode,InstructionOperand * inputs,size_t & input_count,CanCombineWithLoad canCombineWithLoad)493 void GenerateBinOpOperands(InstructionSelector* selector, Node* node,
494 Node* left, Node* right, InstructionCode& opcode,
495 OperandModes& operand_mode,
496 InstructionOperand* inputs, size_t& input_count,
497 CanCombineWithLoad canCombineWithLoad) {
498 S390OperandGenerator g(selector);
499 // left is always register
500 InstructionOperand const left_input = g.UseRegister(left);
501 inputs[input_count++] = left_input;
502
503 if (left == right) {
504 inputs[input_count++] = left_input;
505 // Can only be RR or RRR
506 operand_mode &= OperandMode::kAllowRRR;
507 } else {
508 GenerateRightOperands(selector, node, right, opcode, operand_mode, inputs,
509 input_count, canCombineWithLoad);
510 }
511 }
512
513 template <class CanCombineWithLoad>
514 void VisitUnaryOp(InstructionSelector* selector, Node* node,
515 InstructionCode opcode, OperandModes operand_mode,
516 FlagsContinuation* cont,
517 CanCombineWithLoad canCombineWithLoad);
518
519 template <class CanCombineWithLoad>
520 void VisitBinOp(InstructionSelector* selector, Node* node,
521 InstructionCode opcode, OperandModes operand_mode,
522 FlagsContinuation* cont, CanCombineWithLoad canCombineWithLoad);
523
524 // Generate The following variations:
525 // VisitWord32UnaryOp, VisitWord32BinOp,
526 // VisitWord64UnaryOp, VisitWord64BinOp,
527 // VisitFloat32UnaryOp, VisitFloat32BinOp,
528 // VisitFloat64UnaryOp, VisitFloat64BinOp
529 #define VISIT_OP_LIST_32(V) \
530 V(Word32, Unary, [](ArchOpcode opcode) { \
531 return opcode == kS390_LoadWordS32 || opcode == kS390_LoadWordU32; \
532 }) \
533 V(Word64, Unary, \
534 [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; }) \
535 V(Float32, Unary, \
536 [](ArchOpcode opcode) { return opcode == kS390_LoadFloat32; }) \
537 V(Float64, Unary, \
538 [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; }) \
539 V(Word32, Bin, [](ArchOpcode opcode) { \
540 return opcode == kS390_LoadWordS32 || opcode == kS390_LoadWordU32; \
541 }) \
542 V(Float32, Bin, \
543 [](ArchOpcode opcode) { return opcode == kS390_LoadFloat32; }) \
544 V(Float64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; })
545
546 #if V8_TARGET_ARCH_S390X
547 #define VISIT_OP_LIST(V) \
548 VISIT_OP_LIST_32(V) \
549 V(Word64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; })
550 #else
551 #define VISIT_OP_LIST VISIT_OP_LIST_32
552 #endif
553
554 #define DECLARE_VISIT_HELPER_FUNCTIONS(type1, type2, canCombineWithLoad) \
555 static inline void Visit##type1##type2##Op( \
556 InstructionSelector* selector, Node* node, InstructionCode opcode, \
557 OperandModes operand_mode, FlagsContinuation* cont) { \
558 Visit##type2##Op(selector, node, opcode, operand_mode, cont, \
559 canCombineWithLoad); \
560 } \
561 static inline void Visit##type1##type2##Op( \
562 InstructionSelector* selector, Node* node, InstructionCode opcode, \
563 OperandModes operand_mode) { \
564 FlagsContinuation cont; \
565 Visit##type1##type2##Op(selector, node, opcode, operand_mode, &cont); \
566 }
567 VISIT_OP_LIST(DECLARE_VISIT_HELPER_FUNCTIONS);
568 #undef DECLARE_VISIT_HELPER_FUNCTIONS
569 #undef VISIT_OP_LIST_32
570 #undef VISIT_OP_LIST
571
572 template <class CanCombineWithLoad>
VisitUnaryOp(InstructionSelector * selector,Node * node,InstructionCode opcode,OperandModes operand_mode,FlagsContinuation * cont,CanCombineWithLoad canCombineWithLoad)573 void VisitUnaryOp(InstructionSelector* selector, Node* node,
574 InstructionCode opcode, OperandModes operand_mode,
575 FlagsContinuation* cont,
576 CanCombineWithLoad canCombineWithLoad) {
577 S390OperandGenerator g(selector);
578 InstructionOperand inputs[8];
579 size_t input_count = 0;
580 InstructionOperand outputs[2];
581 size_t output_count = 0;
582 Node* input = node->InputAt(0);
583
584 GenerateRightOperands(selector, node, input, opcode, operand_mode, inputs,
585 input_count, canCombineWithLoad);
586
587 bool input_is_word32 = ProduceWord32Result(input);
588
589 bool doZeroExt = DoZeroExtForResult(node);
590 bool canEliminateZeroExt = input_is_word32;
591
592 if (doZeroExt) {
593 // Add zero-ext indication
594 inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
595 }
596
597 if (!cont->IsDeoptimize()) {
598 // If we can deoptimize as a result of the binop, we need to make sure
599 // that the deopt inputs are not overwritten by the binop result. One way
600 // to achieve that is to declare the output register as same-as-first.
601 if (doZeroExt && canEliminateZeroExt) {
602 // we have to make sure result and left use the same register
603 outputs[output_count++] = g.DefineSameAsFirst(node);
604 } else {
605 outputs[output_count++] = g.DefineAsRegister(node);
606 }
607 } else {
608 outputs[output_count++] = g.DefineSameAsFirst(node);
609 }
610
611 DCHECK_NE(0u, input_count);
612 DCHECK_NE(0u, output_count);
613 DCHECK_GE(arraysize(inputs), input_count);
614 DCHECK_GE(arraysize(outputs), output_count);
615
616 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
617 inputs, cont);
618 }
619
620 template <class CanCombineWithLoad>
VisitBinOp(InstructionSelector * selector,Node * node,InstructionCode opcode,OperandModes operand_mode,FlagsContinuation * cont,CanCombineWithLoad canCombineWithLoad)621 void VisitBinOp(InstructionSelector* selector, Node* node,
622 InstructionCode opcode, OperandModes operand_mode,
623 FlagsContinuation* cont,
624 CanCombineWithLoad canCombineWithLoad) {
625 S390OperandGenerator g(selector);
626 Int32BinopMatcher m(node);
627 Node* left = m.left().node();
628 Node* right = m.right().node();
629 InstructionOperand inputs[8];
630 size_t input_count = 0;
631 InstructionOperand outputs[2];
632 size_t output_count = 0;
633
634 if (node->op()->HasProperty(Operator::kCommutative) &&
635 !g.CanBeImmediate(right, operand_mode) &&
636 (g.CanBeBetterLeftOperand(right))) {
637 std::swap(left, right);
638 }
639
640 GenerateBinOpOperands(selector, node, left, right, opcode, operand_mode,
641 inputs, input_count, canCombineWithLoad);
642
643 bool left_is_word32 = ProduceWord32Result(left);
644
645 bool doZeroExt = DoZeroExtForResult(node);
646 bool canEliminateZeroExt = left_is_word32;
647
648 if (doZeroExt) {
649 // Add zero-ext indication
650 inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
651 }
652
653 if ((operand_mode & OperandMode::kAllowDistinctOps) &&
654 // If we can deoptimize as a result of the binop, we need to make sure
655 // that the deopt inputs are not overwritten by the binop result. One way
656 // to achieve that is to declare the output register as same-as-first.
657 !cont->IsDeoptimize()) {
658 if (doZeroExt && canEliminateZeroExt) {
659 // we have to make sure result and left use the same register
660 outputs[output_count++] = g.DefineSameAsFirst(node);
661 } else {
662 outputs[output_count++] = g.DefineAsRegister(node);
663 }
664 } else {
665 outputs[output_count++] = g.DefineSameAsFirst(node);
666 }
667
668 DCHECK_NE(0u, input_count);
669 DCHECK_NE(0u, output_count);
670 DCHECK_GE(arraysize(inputs), input_count);
671 DCHECK_GE(arraysize(outputs), output_count);
672
673 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
674 inputs, cont);
675 }
676
677 } // namespace
678
VisitStackSlot(Node * node)679 void InstructionSelector::VisitStackSlot(Node* node) {
680 StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
681 int slot = frame_->AllocateSpillSlot(rep.size());
682 OperandGenerator g(this);
683
684 Emit(kArchStackSlot, g.DefineAsRegister(node),
685 sequence()->AddImmediate(Constant(slot)), 0, nullptr);
686 }
687
VisitDebugAbort(Node * node)688 void InstructionSelector::VisitDebugAbort(Node* node) {
689 S390OperandGenerator g(this);
690 Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
691 }
692
VisitLoad(Node * node)693 void InstructionSelector::VisitLoad(Node* node) {
694 S390OperandGenerator g(this);
695 InstructionCode opcode = SelectLoadOpcode(node);
696 InstructionOperand outputs[1];
697 outputs[0] = g.DefineAsRegister(node);
698 InstructionOperand inputs[3];
699 size_t input_count = 0;
700 AddressingMode mode =
701 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
702 opcode |= AddressingModeField::encode(mode);
703 if (node->opcode() == IrOpcode::kPoisonedLoad) {
704 CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
705 opcode |= MiscField::encode(kMemoryAccessPoisoned);
706 }
707
708 Emit(opcode, 1, outputs, input_count, inputs);
709 }
710
VisitPoisonedLoad(Node * node)711 void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
712
VisitProtectedLoad(Node * node)713 void InstructionSelector::VisitProtectedLoad(Node* node) {
714 // TODO(eholk)
715 UNIMPLEMENTED();
716 }
717
VisitStore(Node * node)718 void InstructionSelector::VisitStore(Node* node) {
719 S390OperandGenerator g(this);
720 Node* base = node->InputAt(0);
721 Node* offset = node->InputAt(1);
722 Node* value = node->InputAt(2);
723
724 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
725 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
726 MachineRepresentation rep = store_rep.representation();
727
728 if (write_barrier_kind != kNoWriteBarrier) {
729 DCHECK(CanBeTaggedPointer(rep));
730 AddressingMode addressing_mode;
731 InstructionOperand inputs[3];
732 size_t input_count = 0;
733 inputs[input_count++] = g.UseUniqueRegister(base);
734 // OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
735 // for the store itself, so we must check compatibility with both.
736 if (g.CanBeImmediate(offset, OperandMode::kInt20Imm)) {
737 inputs[input_count++] = g.UseImmediate(offset);
738 addressing_mode = kMode_MRI;
739 } else {
740 inputs[input_count++] = g.UseUniqueRegister(offset);
741 addressing_mode = kMode_MRR;
742 }
743 inputs[input_count++] = g.UseUniqueRegister(value);
744 RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
745 switch (write_barrier_kind) {
746 case kNoWriteBarrier:
747 UNREACHABLE();
748 break;
749 case kMapWriteBarrier:
750 record_write_mode = RecordWriteMode::kValueIsMap;
751 break;
752 case kPointerWriteBarrier:
753 record_write_mode = RecordWriteMode::kValueIsPointer;
754 break;
755 case kFullWriteBarrier:
756 record_write_mode = RecordWriteMode::kValueIsAny;
757 break;
758 }
759 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
760 size_t const temp_count = arraysize(temps);
761 InstructionCode code = kArchStoreWithWriteBarrier;
762 code |= AddressingModeField::encode(addressing_mode);
763 code |= MiscField::encode(static_cast<int>(record_write_mode));
764 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
765 } else {
766 ArchOpcode opcode = kArchNop;
767 NodeMatcher m(value);
768 switch (rep) {
769 case MachineRepresentation::kFloat32:
770 opcode = kS390_StoreFloat32;
771 break;
772 case MachineRepresentation::kFloat64:
773 opcode = kS390_StoreDouble;
774 break;
775 case MachineRepresentation::kBit: // Fall through.
776 case MachineRepresentation::kWord8:
777 opcode = kS390_StoreWord8;
778 break;
779 case MachineRepresentation::kWord16:
780 opcode = kS390_StoreWord16;
781 break;
782 #if !V8_TARGET_ARCH_S390X
783 case MachineRepresentation::kTaggedSigned: // Fall through.
784 case MachineRepresentation::kTaggedPointer: // Fall through.
785 case MachineRepresentation::kTagged: // Fall through.
786 #endif
787 case MachineRepresentation::kWord32:
788 opcode = kS390_StoreWord32;
789 if (m.IsWord32ReverseBytes()) {
790 opcode = kS390_StoreReverse32;
791 value = value->InputAt(0);
792 }
793 break;
794 #if V8_TARGET_ARCH_S390X
795 case MachineRepresentation::kTaggedSigned: // Fall through.
796 case MachineRepresentation::kTaggedPointer: // Fall through.
797 case MachineRepresentation::kTagged: // Fall through.
798 case MachineRepresentation::kWord64:
799 opcode = kS390_StoreWord64;
800 if (m.IsWord64ReverseBytes()) {
801 opcode = kS390_StoreReverse64;
802 value = value->InputAt(0);
803 }
804 break;
805 #else
806 case MachineRepresentation::kWord64: // Fall through.
807 #endif
808 case MachineRepresentation::kSimd128: // Fall through.
809 case MachineRepresentation::kNone:
810 UNREACHABLE();
811 return;
812 }
813 InstructionOperand inputs[4];
814 size_t input_count = 0;
815 AddressingMode addressing_mode =
816 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
817 InstructionCode code =
818 opcode | AddressingModeField::encode(addressing_mode);
819 InstructionOperand value_operand = g.UseRegister(value);
820 inputs[input_count++] = value_operand;
821 Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
822 inputs);
823 }
824 }
825
VisitProtectedStore(Node * node)826 void InstructionSelector::VisitProtectedStore(Node* node) {
827 // TODO(eholk)
828 UNIMPLEMENTED();
829 }
830
831 // Architecture supports unaligned access, therefore VisitLoad is used instead
VisitUnalignedLoad(Node * node)832 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
833
834 // Architecture supports unaligned access, therefore VisitStore is used instead
VisitUnalignedStore(Node * node)835 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
836
837 #if 0
838 static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
839 int mask_width = base::bits::CountPopulation(value);
840 int mask_msb = base::bits::CountLeadingZeros32(value);
841 int mask_lsb = base::bits::CountTrailingZeros32(value);
842 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
843 return false;
844 *mb = mask_lsb + mask_width - 1;
845 *me = mask_lsb;
846 return true;
847 }
848 #endif
849
850 #if V8_TARGET_ARCH_S390X
IsContiguousMask64(uint64_t value,int * mb,int * me)851 static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
852 int mask_width = base::bits::CountPopulation(value);
853 int mask_msb = base::bits::CountLeadingZeros64(value);
854 int mask_lsb = base::bits::CountTrailingZeros64(value);
855 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
856 return false;
857 *mb = mask_lsb + mask_width - 1;
858 *me = mask_lsb;
859 return true;
860 }
861 #endif
862
863 #if V8_TARGET_ARCH_S390X
VisitWord64And(Node * node)864 void InstructionSelector::VisitWord64And(Node* node) {
865 S390OperandGenerator g(this);
866 Int64BinopMatcher m(node);
867 int mb = 0;
868 int me = 0;
869 if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
870 int sh = 0;
871 Node* left = m.left().node();
872 if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
873 CanCover(node, left)) {
874 Int64BinopMatcher mleft(m.left().node());
875 if (mleft.right().IsInRange(0, 63)) {
876 left = mleft.left().node();
877 sh = mleft.right().Value();
878 if (m.left().IsWord64Shr()) {
879 // Adjust the mask such that it doesn't include any rotated bits.
880 if (mb > 63 - sh) mb = 63 - sh;
881 sh = (64 - sh) & 0x3F;
882 } else {
883 // Adjust the mask such that it doesn't include any rotated bits.
884 if (me < sh) me = sh;
885 }
886 }
887 }
888 if (mb >= me) {
889 bool match = false;
890 ArchOpcode opcode;
891 int mask;
892 if (me == 0) {
893 match = true;
894 opcode = kS390_RotLeftAndClearLeft64;
895 mask = mb;
896 } else if (mb == 63) {
897 match = true;
898 opcode = kS390_RotLeftAndClearRight64;
899 mask = me;
900 } else if (sh && me <= sh && m.left().IsWord64Shl()) {
901 match = true;
902 opcode = kS390_RotLeftAndClear64;
903 mask = mb;
904 }
905 if (match && CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
906 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
907 g.TempImmediate(sh), g.TempImmediate(mask));
908 return;
909 }
910 }
911 }
912 VisitWord64BinOp(this, node, kS390_And64, And64OperandMode);
913 }
914
VisitWord64Shl(Node * node)915 void InstructionSelector::VisitWord64Shl(Node* node) {
916 S390OperandGenerator g(this);
917 Int64BinopMatcher m(node);
918 // TODO(mbrandy): eliminate left sign extension if right >= 32
919 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
920 Int64BinopMatcher mleft(m.left().node());
921 int sh = m.right().Value();
922 int mb;
923 int me;
924 if (mleft.right().HasValue() &&
925 IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
926 // Adjust the mask such that it doesn't include any rotated bits.
927 if (me < sh) me = sh;
928 if (mb >= me) {
929 bool match = false;
930 ArchOpcode opcode;
931 int mask;
932 if (me == 0) {
933 match = true;
934 opcode = kS390_RotLeftAndClearLeft64;
935 mask = mb;
936 } else if (mb == 63) {
937 match = true;
938 opcode = kS390_RotLeftAndClearRight64;
939 mask = me;
940 } else if (sh && me <= sh) {
941 match = true;
942 opcode = kS390_RotLeftAndClear64;
943 mask = mb;
944 }
945 if (match && CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
946 Emit(opcode, g.DefineAsRegister(node),
947 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
948 g.TempImmediate(mask));
949 return;
950 }
951 }
952 }
953 }
954 VisitWord64BinOp(this, node, kS390_ShiftLeft64, Shift64OperandMode);
955 }
956
VisitWord64Shr(Node * node)957 void InstructionSelector::VisitWord64Shr(Node* node) {
958 S390OperandGenerator g(this);
959 Int64BinopMatcher m(node);
960 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
961 Int64BinopMatcher mleft(m.left().node());
962 int sh = m.right().Value();
963 int mb;
964 int me;
965 if (mleft.right().HasValue() &&
966 IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
967 // Adjust the mask such that it doesn't include any rotated bits.
968 if (mb > 63 - sh) mb = 63 - sh;
969 sh = (64 - sh) & 0x3F;
970 if (mb >= me) {
971 bool match = false;
972 ArchOpcode opcode;
973 int mask;
974 if (me == 0) {
975 match = true;
976 opcode = kS390_RotLeftAndClearLeft64;
977 mask = mb;
978 } else if (mb == 63) {
979 match = true;
980 opcode = kS390_RotLeftAndClearRight64;
981 mask = me;
982 }
983 if (match) {
984 Emit(opcode, g.DefineAsRegister(node),
985 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
986 g.TempImmediate(mask));
987 return;
988 }
989 }
990 }
991 }
992 VisitWord64BinOp(this, node, kS390_ShiftRight64, Shift64OperandMode);
993 }
994 #endif
995
TryMatchSignExtInt16OrInt8FromWord32Sar(InstructionSelector * selector,Node * node)996 static inline bool TryMatchSignExtInt16OrInt8FromWord32Sar(
997 InstructionSelector* selector, Node* node) {
998 S390OperandGenerator g(selector);
999 Int32BinopMatcher m(node);
1000 if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
1001 Int32BinopMatcher mleft(m.left().node());
1002 if (mleft.right().Is(16) && m.right().Is(16)) {
1003 bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node());
1004 selector->Emit(kS390_SignExtendWord16ToInt32,
1005 canEliminateZeroExt ? g.DefineSameAsFirst(node)
1006 : g.DefineAsRegister(node),
1007 g.UseRegister(mleft.left().node()),
1008 g.TempImmediate(!canEliminateZeroExt));
1009 return true;
1010 } else if (mleft.right().Is(24) && m.right().Is(24)) {
1011 bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node());
1012 selector->Emit(kS390_SignExtendWord8ToInt32,
1013 canEliminateZeroExt ? g.DefineSameAsFirst(node)
1014 : g.DefineAsRegister(node),
1015 g.UseRegister(mleft.left().node()),
1016 g.TempImmediate(!canEliminateZeroExt));
1017 return true;
1018 }
1019 }
1020 return false;
1021 }
1022
1023 #if !V8_TARGET_ARCH_S390X
VisitPairBinop(InstructionSelector * selector,InstructionCode opcode,InstructionCode opcode2,Node * node)1024 void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
1025 InstructionCode opcode2, Node* node) {
1026 S390OperandGenerator g(selector);
1027
1028 Node* projection1 = NodeProperties::FindProjection(node, 1);
1029 if (projection1) {
1030 // We use UseUniqueRegister here to avoid register sharing with the output
1031 // registers.
1032 InstructionOperand inputs[] = {
1033 g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
1034 g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
1035
1036 InstructionOperand outputs[] = {
1037 g.DefineAsRegister(node),
1038 g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
1039
1040 selector->Emit(opcode, 2, outputs, 4, inputs);
1041 } else {
1042 // The high word of the result is not used, so we emit the standard 32 bit
1043 // instruction.
1044 selector->Emit(opcode2, g.DefineSameAsFirst(node),
1045 g.UseRegister(node->InputAt(0)),
1046 g.UseRegister(node->InputAt(2)), g.TempImmediate(0));
1047 }
1048 }
1049
VisitInt32PairAdd(Node * node)1050 void InstructionSelector::VisitInt32PairAdd(Node* node) {
1051 VisitPairBinop(this, kS390_AddPair, kS390_Add32, node);
1052 }
1053
VisitInt32PairSub(Node * node)1054 void InstructionSelector::VisitInt32PairSub(Node* node) {
1055 VisitPairBinop(this, kS390_SubPair, kS390_Sub32, node);
1056 }
1057
VisitInt32PairMul(Node * node)1058 void InstructionSelector::VisitInt32PairMul(Node* node) {
1059 S390OperandGenerator g(this);
1060 Node* projection1 = NodeProperties::FindProjection(node, 1);
1061 if (projection1) {
1062 InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
1063 g.UseUniqueRegister(node->InputAt(1)),
1064 g.UseUniqueRegister(node->InputAt(2)),
1065 g.UseUniqueRegister(node->InputAt(3))};
1066
1067 InstructionOperand outputs[] = {
1068 g.DefineAsRegister(node),
1069 g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
1070
1071 Emit(kS390_MulPair, 2, outputs, 4, inputs);
1072 } else {
1073 // The high word of the result is not used, so we emit the standard 32 bit
1074 // instruction.
1075 Emit(kS390_Mul32, g.DefineSameAsFirst(node),
1076 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(2)),
1077 g.TempImmediate(0));
1078 }
1079 }
1080
1081 namespace {
1082 // Shared routine for multiple shift operations.
VisitPairShift(InstructionSelector * selector,InstructionCode opcode,Node * node)1083 void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
1084 Node* node) {
1085 S390OperandGenerator g(selector);
1086 // We use g.UseUniqueRegister here to guarantee that there is
1087 // no register aliasing of input registers with output registers.
1088 Int32Matcher m(node->InputAt(2));
1089 InstructionOperand shift_operand;
1090 if (m.HasValue()) {
1091 shift_operand = g.UseImmediate(m.node());
1092 } else {
1093 shift_operand = g.UseUniqueRegister(m.node());
1094 }
1095
1096 InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
1097 g.UseUniqueRegister(node->InputAt(1)),
1098 shift_operand};
1099
1100 Node* projection1 = NodeProperties::FindProjection(node, 1);
1101
1102 InstructionOperand outputs[2];
1103 InstructionOperand temps[1];
1104 int32_t output_count = 0;
1105 int32_t temp_count = 0;
1106
1107 outputs[output_count++] = g.DefineAsRegister(node);
1108 if (projection1) {
1109 outputs[output_count++] = g.DefineAsRegister(projection1);
1110 } else {
1111 temps[temp_count++] = g.TempRegister();
1112 }
1113
1114 selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
1115 }
1116 } // namespace
1117
VisitWord32PairShl(Node * node)1118 void InstructionSelector::VisitWord32PairShl(Node* node) {
1119 VisitPairShift(this, kS390_ShiftLeftPair, node);
1120 }
1121
VisitWord32PairShr(Node * node)1122 void InstructionSelector::VisitWord32PairShr(Node* node) {
1123 VisitPairShift(this, kS390_ShiftRightPair, node);
1124 }
1125
VisitWord32PairSar(Node * node)1126 void InstructionSelector::VisitWord32PairSar(Node* node) {
1127 VisitPairShift(this, kS390_ShiftRightArithPair, node);
1128 }
1129 #endif
1130
VisitWord32Ctz(Node * node)1131 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
1132
1133 #if V8_TARGET_ARCH_S390X
VisitWord64Ctz(Node * node)1134 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
1135 #endif
1136
VisitWord32ReverseBits(Node * node)1137 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
1138
1139 #if V8_TARGET_ARCH_S390X
VisitWord64ReverseBits(Node * node)1140 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
1141 #endif
1142
VisitSpeculationFence(Node * node)1143 void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
1144
VisitInt32AbsWithOverflow(Node * node)1145 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
1146 VisitWord32UnaryOp(this, node, kS390_Abs32, OperandMode::kNone);
1147 }
1148
VisitInt64AbsWithOverflow(Node * node)1149 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
1150 VisitWord64UnaryOp(this, node, kS390_Abs64, OperandMode::kNone);
1151 }
1152
VisitWord64ReverseBytes(Node * node)1153 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
1154 S390OperandGenerator g(this);
1155 Emit(kS390_LoadReverse64RR, g.DefineAsRegister(node),
1156 g.UseRegister(node->InputAt(0)));
1157 }
1158
VisitWord32ReverseBytes(Node * node)1159 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
1160 S390OperandGenerator g(this);
1161 NodeMatcher input(node->InputAt(0));
1162 if (CanCover(node, input.node()) && input.IsLoad()) {
1163 LoadRepresentation load_rep = LoadRepresentationOf(input.node()->op());
1164 if (load_rep.representation() == MachineRepresentation::kWord32) {
1165 Node* base = input.node()->InputAt(0);
1166 Node* offset = input.node()->InputAt(1);
1167 Emit(kS390_LoadReverse32 | AddressingModeField::encode(kMode_MRR),
1168 // TODO(john.yan): one of the base and offset can be imm.
1169 g.DefineAsRegister(node), g.UseRegister(base),
1170 g.UseRegister(offset));
1171 return;
1172 }
1173 }
1174 Emit(kS390_LoadReverse32RR, g.DefineAsRegister(node),
1175 g.UseRegister(node->InputAt(0)));
1176 }
1177
1178 template <class Matcher, ArchOpcode neg_opcode>
TryMatchNegFromSub(InstructionSelector * selector,Node * node)1179 static inline bool TryMatchNegFromSub(InstructionSelector* selector,
1180 Node* node) {
1181 S390OperandGenerator g(selector);
1182 Matcher m(node);
1183 static_assert(neg_opcode == kS390_Neg32 || neg_opcode == kS390_Neg64,
1184 "Provided opcode is not a Neg opcode.");
1185 if (m.left().Is(0)) {
1186 Node* value = m.right().node();
1187 bool doZeroExt = DoZeroExtForResult(node);
1188 bool canEliminateZeroExt = ProduceWord32Result(value);
1189 if (doZeroExt) {
1190 selector->Emit(neg_opcode,
1191 canEliminateZeroExt ? g.DefineSameAsFirst(node)
1192 : g.DefineAsRegister(node),
1193 g.UseRegister(value),
1194 g.TempImmediate(!canEliminateZeroExt));
1195 } else {
1196 selector->Emit(neg_opcode, g.DefineAsRegister(node),
1197 g.UseRegister(value));
1198 }
1199 return true;
1200 }
1201 return false;
1202 }
1203
1204 template <class Matcher, ArchOpcode shift_op>
TryMatchShiftFromMul(InstructionSelector * selector,Node * node)1205 bool TryMatchShiftFromMul(InstructionSelector* selector, Node* node) {
1206 S390OperandGenerator g(selector);
1207 Matcher m(node);
1208 Node* left = m.left().node();
1209 Node* right = m.right().node();
1210 if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
1211 base::bits::IsPowerOfTwo(g.GetImmediate(right))) {
1212 int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
1213 bool doZeroExt = DoZeroExtForResult(node);
1214 bool canEliminateZeroExt = ProduceWord32Result(left);
1215 InstructionOperand dst = (doZeroExt && !canEliminateZeroExt &&
1216 CpuFeatures::IsSupported(DISTINCT_OPS))
1217 ? g.DefineAsRegister(node)
1218 : g.DefineSameAsFirst(node);
1219
1220 if (doZeroExt) {
1221 selector->Emit(shift_op, dst, g.UseRegister(left), g.UseImmediate(power),
1222 g.TempImmediate(!canEliminateZeroExt));
1223 } else {
1224 selector->Emit(shift_op, dst, g.UseRegister(left), g.UseImmediate(power));
1225 }
1226 return true;
1227 }
1228 return false;
1229 }
1230
1231 template <ArchOpcode opcode>
TryMatchInt32OpWithOverflow(InstructionSelector * selector,Node * node,OperandModes mode)1232 static inline bool TryMatchInt32OpWithOverflow(InstructionSelector* selector,
1233 Node* node, OperandModes mode) {
1234 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1235 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1236 VisitWord32BinOp(selector, node, opcode, mode, &cont);
1237 return true;
1238 }
1239 return false;
1240 }
1241
TryMatchInt32AddWithOverflow(InstructionSelector * selector,Node * node)1242 static inline bool TryMatchInt32AddWithOverflow(InstructionSelector* selector,
1243 Node* node) {
1244 return TryMatchInt32OpWithOverflow<kS390_Add32>(selector, node,
1245 AddOperandMode);
1246 }
1247
TryMatchInt32SubWithOverflow(InstructionSelector * selector,Node * node)1248 static inline bool TryMatchInt32SubWithOverflow(InstructionSelector* selector,
1249 Node* node) {
1250 return TryMatchInt32OpWithOverflow<kS390_Sub32>(selector, node,
1251 SubOperandMode);
1252 }
1253
TryMatchInt32MulWithOverflow(InstructionSelector * selector,Node * node)1254 static inline bool TryMatchInt32MulWithOverflow(InstructionSelector* selector,
1255 Node* node) {
1256 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1257 if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
1258 TryMatchInt32OpWithOverflow<kS390_Mul32>(
1259 selector, node, OperandMode::kAllowRRR | OperandMode::kAllowRM);
1260 } else {
1261 FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
1262 VisitWord32BinOp(selector, node, kS390_Mul32WithOverflow,
1263 OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
1264 &cont);
1265 }
1266 return true;
1267 }
1268 return TryMatchShiftFromMul<Int32BinopMatcher, kS390_ShiftLeft32>(selector,
1269 node);
1270 }
1271
1272 #if V8_TARGET_ARCH_S390X
1273 template <ArchOpcode opcode>
TryMatchInt64OpWithOverflow(InstructionSelector * selector,Node * node,OperandModes mode)1274 static inline bool TryMatchInt64OpWithOverflow(InstructionSelector* selector,
1275 Node* node, OperandModes mode) {
1276 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1277 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1278 VisitWord64BinOp(selector, node, opcode, mode, &cont);
1279 return true;
1280 }
1281 return false;
1282 }
1283
TryMatchInt64AddWithOverflow(InstructionSelector * selector,Node * node)1284 static inline bool TryMatchInt64AddWithOverflow(InstructionSelector* selector,
1285 Node* node) {
1286 return TryMatchInt64OpWithOverflow<kS390_Add64>(selector, node,
1287 AddOperandMode);
1288 }
1289
TryMatchInt64SubWithOverflow(InstructionSelector * selector,Node * node)1290 static inline bool TryMatchInt64SubWithOverflow(InstructionSelector* selector,
1291 Node* node) {
1292 return TryMatchInt64OpWithOverflow<kS390_Sub64>(selector, node,
1293 SubOperandMode);
1294 }
1295 #endif
1296
TryMatchDoubleConstructFromInsert(InstructionSelector * selector,Node * node)1297 static inline bool TryMatchDoubleConstructFromInsert(
1298 InstructionSelector* selector, Node* node) {
1299 S390OperandGenerator g(selector);
1300 Node* left = node->InputAt(0);
1301 Node* right = node->InputAt(1);
1302 Node* lo32 = nullptr;
1303 Node* hi32 = nullptr;
1304
1305 if (node->opcode() == IrOpcode::kFloat64InsertLowWord32) {
1306 lo32 = right;
1307 } else if (node->opcode() == IrOpcode::kFloat64InsertHighWord32) {
1308 hi32 = right;
1309 } else {
1310 return false; // doesn't match
1311 }
1312
1313 if (left->opcode() == IrOpcode::kFloat64InsertLowWord32) {
1314 lo32 = left->InputAt(1);
1315 } else if (left->opcode() == IrOpcode::kFloat64InsertHighWord32) {
1316 hi32 = left->InputAt(1);
1317 } else {
1318 return false; // doesn't match
1319 }
1320
1321 if (!lo32 || !hi32) return false; // doesn't match
1322
1323 selector->Emit(kS390_DoubleConstruct, g.DefineAsRegister(node),
1324 g.UseRegister(hi32), g.UseRegister(lo32));
1325 return true;
1326 }
1327
1328 #define null ([]() { return false; })
1329 // TODO(john.yan): place kAllowRM where available
1330 #define FLOAT_UNARY_OP_LIST_32(V) \
1331 V(Float32, ChangeFloat32ToFloat64, kS390_Float32ToDouble, \
1332 OperandMode::kAllowRM, null) \
1333 V(Float32, BitcastFloat32ToInt32, kS390_BitcastFloat32ToInt32, \
1334 OperandMode::kAllowRM, null) \
1335 V(Float64, TruncateFloat64ToFloat32, kS390_DoubleToFloat32, \
1336 OperandMode::kNone, null) \
1337 V(Float64, TruncateFloat64ToWord32, kArchTruncateDoubleToI, \
1338 OperandMode::kNone, null) \
1339 V(Float64, RoundFloat64ToInt32, kS390_DoubleToInt32, OperandMode::kNone, \
1340 null) \
1341 V(Float32, TruncateFloat32ToInt32, kS390_Float32ToInt32, OperandMode::kNone, \
1342 null) \
1343 V(Float32, TruncateFloat32ToUint32, kS390_Float32ToUint32, \
1344 OperandMode::kNone, null) \
1345 V(Float64, TruncateFloat64ToUint32, kS390_DoubleToUint32, \
1346 OperandMode::kNone, null) \
1347 V(Float64, ChangeFloat64ToInt32, kS390_DoubleToInt32, OperandMode::kNone, \
1348 null) \
1349 V(Float64, ChangeFloat64ToUint32, kS390_DoubleToUint32, OperandMode::kNone, \
1350 null) \
1351 V(Float64, Float64SilenceNaN, kS390_Float64SilenceNaN, OperandMode::kNone, \
1352 null) \
1353 V(Float32, Float32Abs, kS390_AbsFloat, OperandMode::kNone, null) \
1354 V(Float64, Float64Abs, kS390_AbsDouble, OperandMode::kNone, null) \
1355 V(Float32, Float32Sqrt, kS390_SqrtFloat, OperandMode::kNone, null) \
1356 V(Float64, Float64Sqrt, kS390_SqrtDouble, OperandMode::kNone, null) \
1357 V(Float32, Float32RoundDown, kS390_FloorFloat, OperandMode::kNone, null) \
1358 V(Float64, Float64RoundDown, kS390_FloorDouble, OperandMode::kNone, null) \
1359 V(Float32, Float32RoundUp, kS390_CeilFloat, OperandMode::kNone, null) \
1360 V(Float64, Float64RoundUp, kS390_CeilDouble, OperandMode::kNone, null) \
1361 V(Float32, Float32RoundTruncate, kS390_TruncateFloat, OperandMode::kNone, \
1362 null) \
1363 V(Float64, Float64RoundTruncate, kS390_TruncateDouble, OperandMode::kNone, \
1364 null) \
1365 V(Float64, Float64RoundTiesAway, kS390_RoundDouble, OperandMode::kNone, \
1366 null) \
1367 V(Float32, Float32Neg, kS390_NegFloat, OperandMode::kNone, null) \
1368 V(Float64, Float64Neg, kS390_NegDouble, OperandMode::kNone, null) \
1369 /* TODO(john.yan): can use kAllowRM */ \
1370 V(Word32, Float64ExtractLowWord32, kS390_DoubleExtractLowWord32, \
1371 OperandMode::kNone, null) \
1372 V(Word32, Float64ExtractHighWord32, kS390_DoubleExtractHighWord32, \
1373 OperandMode::kNone, null)
1374
1375 #define FLOAT_BIN_OP_LIST(V) \
1376 V(Float32, Float32Add, kS390_AddFloat, OperandMode::kAllowRM, null) \
1377 V(Float64, Float64Add, kS390_AddDouble, OperandMode::kAllowRM, null) \
1378 V(Float32, Float32Sub, kS390_SubFloat, OperandMode::kAllowRM, null) \
1379 V(Float64, Float64Sub, kS390_SubDouble, OperandMode::kAllowRM, null) \
1380 V(Float32, Float32Mul, kS390_MulFloat, OperandMode::kAllowRM, null) \
1381 V(Float64, Float64Mul, kS390_MulDouble, OperandMode::kAllowRM, null) \
1382 V(Float32, Float32Div, kS390_DivFloat, OperandMode::kAllowRM, null) \
1383 V(Float64, Float64Div, kS390_DivDouble, OperandMode::kAllowRM, null) \
1384 V(Float32, Float32Max, kS390_MaxFloat, OperandMode::kNone, null) \
1385 V(Float64, Float64Max, kS390_MaxDouble, OperandMode::kNone, null) \
1386 V(Float32, Float32Min, kS390_MinFloat, OperandMode::kNone, null) \
1387 V(Float64, Float64Min, kS390_MinDouble, OperandMode::kNone, null)
1388
1389 #define WORD32_UNARY_OP_LIST_32(V) \
1390 V(Word32, Word32Clz, kS390_Cntlz32, OperandMode::kNone, null) \
1391 V(Word32, Word32Popcnt, kS390_Popcnt32, OperandMode::kNone, null) \
1392 V(Word32, RoundInt32ToFloat32, kS390_Int32ToFloat32, OperandMode::kNone, \
1393 null) \
1394 V(Word32, RoundUint32ToFloat32, kS390_Uint32ToFloat32, OperandMode::kNone, \
1395 null) \
1396 V(Word32, ChangeInt32ToFloat64, kS390_Int32ToDouble, OperandMode::kNone, \
1397 null) \
1398 V(Word32, ChangeUint32ToFloat64, kS390_Uint32ToDouble, OperandMode::kNone, \
1399 null) \
1400 V(Word32, SignExtendWord8ToInt32, kS390_SignExtendWord8ToInt32, \
1401 OperandMode::kNone, null) \
1402 V(Word32, SignExtendWord16ToInt32, kS390_SignExtendWord16ToInt32, \
1403 OperandMode::kNone, null) \
1404 V(Word32, BitcastInt32ToFloat32, kS390_BitcastInt32ToFloat32, \
1405 OperandMode::kNone, null)
1406
1407 #ifdef V8_TARGET_ARCH_S390X
1408 #define FLOAT_UNARY_OP_LIST(V) \
1409 FLOAT_UNARY_OP_LIST_32(V) \
1410 V(Float64, ChangeFloat64ToUint64, kS390_DoubleToUint64, OperandMode::kNone, \
1411 null) \
1412 V(Float64, BitcastFloat64ToInt64, kS390_BitcastDoubleToInt64, \
1413 OperandMode::kNone, null)
1414 #define WORD32_UNARY_OP_LIST(V) \
1415 WORD32_UNARY_OP_LIST_32(V) \
1416 V(Word32, ChangeInt32ToInt64, kS390_SignExtendWord32ToInt64, \
1417 OperandMode::kNone, null) \
1418 V(Word32, SignExtendWord8ToInt64, kS390_SignExtendWord8ToInt64, \
1419 OperandMode::kNone, null) \
1420 V(Word32, SignExtendWord16ToInt64, kS390_SignExtendWord16ToInt64, \
1421 OperandMode::kNone, null) \
1422 V(Word32, SignExtendWord32ToInt64, kS390_SignExtendWord32ToInt64, \
1423 OperandMode::kNone, null) \
1424 V(Word32, ChangeUint32ToUint64, kS390_Uint32ToUint64, OperandMode::kNone, \
1425 [&]() -> bool { \
1426 if (ProduceWord32Result(node->InputAt(0))) { \
1427 EmitIdentity(node); \
1428 return true; \
1429 } \
1430 return false; \
1431 })
1432
1433 #else
1434 #define FLOAT_UNARY_OP_LIST(V) FLOAT_UNARY_OP_LIST_32(V)
1435 #define WORD32_UNARY_OP_LIST(V) WORD32_UNARY_OP_LIST_32(V)
1436 #endif
1437
1438 #define WORD32_BIN_OP_LIST(V) \
1439 V(Word32, Int32Add, kS390_Add32, AddOperandMode, null) \
1440 V(Word32, Int32Sub, kS390_Sub32, SubOperandMode, ([&]() { \
1441 return TryMatchNegFromSub<Int32BinopMatcher, kS390_Neg32>(this, node); \
1442 })) \
1443 V(Word32, Int32Mul, kS390_Mul32, MulOperandMode, ([&]() { \
1444 return TryMatchShiftFromMul<Int32BinopMatcher, kS390_ShiftLeft32>(this, \
1445 node); \
1446 })) \
1447 V(Word32, Int32AddWithOverflow, kS390_Add32, AddOperandMode, \
1448 ([&]() { return TryMatchInt32AddWithOverflow(this, node); })) \
1449 V(Word32, Int32SubWithOverflow, kS390_Sub32, SubOperandMode, \
1450 ([&]() { return TryMatchInt32SubWithOverflow(this, node); })) \
1451 V(Word32, Int32MulWithOverflow, kS390_Mul32, MulOperandMode, \
1452 ([&]() { return TryMatchInt32MulWithOverflow(this, node); })) \
1453 V(Word32, Int32MulHigh, kS390_MulHigh32, \
1454 OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps, null) \
1455 V(Word32, Uint32MulHigh, kS390_MulHighU32, \
1456 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1457 V(Word32, Int32Div, kS390_Div32, \
1458 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1459 V(Word32, Uint32Div, kS390_DivU32, \
1460 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1461 V(Word32, Int32Mod, kS390_Mod32, \
1462 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1463 V(Word32, Uint32Mod, kS390_ModU32, \
1464 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1465 V(Word32, Word32Ror, kS390_RotRight32, \
1466 OperandMode::kAllowRI | OperandMode::kAllowRRR | OperandMode::kAllowRRI | \
1467 OperandMode::kShift32Imm, \
1468 null) \
1469 V(Word32, Word32And, kS390_And32, And32OperandMode, null) \
1470 V(Word32, Word32Or, kS390_Or32, Or32OperandMode, null) \
1471 V(Word32, Word32Xor, kS390_Xor32, Xor32OperandMode, null) \
1472 V(Word32, Word32Shl, kS390_ShiftLeft32, Shift32OperandMode, null) \
1473 V(Word32, Word32Shr, kS390_ShiftRight32, Shift32OperandMode, null) \
1474 V(Word32, Word32Sar, kS390_ShiftRightArith32, Shift32OperandMode, \
1475 [&]() { return TryMatchSignExtInt16OrInt8FromWord32Sar(this, node); }) \
1476 V(Word32, Float64InsertLowWord32, kS390_DoubleInsertLowWord32, \
1477 OperandMode::kAllowRRR, \
1478 [&]() -> bool { return TryMatchDoubleConstructFromInsert(this, node); }) \
1479 V(Word32, Float64InsertHighWord32, kS390_DoubleInsertHighWord32, \
1480 OperandMode::kAllowRRR, \
1481 [&]() -> bool { return TryMatchDoubleConstructFromInsert(this, node); })
1482
1483 #define WORD64_UNARY_OP_LIST(V) \
1484 V(Word64, Word64Popcnt, kS390_Popcnt64, OperandMode::kNone, null) \
1485 V(Word64, Word64Clz, kS390_Cntlz64, OperandMode::kNone, null) \
1486 V(Word64, TruncateInt64ToInt32, kS390_Int64ToInt32, OperandMode::kNone, \
1487 null) \
1488 V(Word64, RoundInt64ToFloat32, kS390_Int64ToFloat32, OperandMode::kNone, \
1489 null) \
1490 V(Word64, RoundInt64ToFloat64, kS390_Int64ToDouble, OperandMode::kNone, \
1491 null) \
1492 V(Word64, RoundUint64ToFloat32, kS390_Uint64ToFloat32, OperandMode::kNone, \
1493 null) \
1494 V(Word64, RoundUint64ToFloat64, kS390_Uint64ToDouble, OperandMode::kNone, \
1495 null) \
1496 V(Word64, BitcastInt64ToFloat64, kS390_BitcastInt64ToDouble, \
1497 OperandMode::kNone, null)
1498
1499 #define WORD64_BIN_OP_LIST(V) \
1500 V(Word64, Int64Add, kS390_Add64, AddOperandMode, null) \
1501 V(Word64, Int64Sub, kS390_Sub64, SubOperandMode, ([&]() { \
1502 return TryMatchNegFromSub<Int64BinopMatcher, kS390_Neg64>(this, node); \
1503 })) \
1504 V(Word64, Int64AddWithOverflow, kS390_Add64, AddOperandMode, \
1505 ([&]() { return TryMatchInt64AddWithOverflow(this, node); })) \
1506 V(Word64, Int64SubWithOverflow, kS390_Sub64, SubOperandMode, \
1507 ([&]() { return TryMatchInt64SubWithOverflow(this, node); })) \
1508 V(Word64, Int64Mul, kS390_Mul64, MulOperandMode, ([&]() { \
1509 return TryMatchShiftFromMul<Int64BinopMatcher, kS390_ShiftLeft64>(this, \
1510 node); \
1511 })) \
1512 V(Word64, Int64Div, kS390_Div64, \
1513 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1514 V(Word64, Uint64Div, kS390_DivU64, \
1515 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1516 V(Word64, Int64Mod, kS390_Mod64, \
1517 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1518 V(Word64, Uint64Mod, kS390_ModU64, \
1519 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1520 V(Word64, Word64Sar, kS390_ShiftRightArith64, Shift64OperandMode, null) \
1521 V(Word64, Word64Ror, kS390_RotRight64, Shift64OperandMode, null) \
1522 V(Word64, Word64Or, kS390_Or64, Or64OperandMode, null) \
1523 V(Word64, Word64Xor, kS390_Xor64, Xor64OperandMode, null)
1524
1525 #define DECLARE_UNARY_OP(type, name, op, mode, try_extra) \
1526 void InstructionSelector::Visit##name(Node* node) { \
1527 if (std::function<bool()>(try_extra)()) return; \
1528 Visit##type##UnaryOp(this, node, op, mode); \
1529 }
1530
1531 #define DECLARE_BIN_OP(type, name, op, mode, try_extra) \
1532 void InstructionSelector::Visit##name(Node* node) { \
1533 if (std::function<bool()>(try_extra)()) return; \
1534 Visit##type##BinOp(this, node, op, mode); \
1535 }
1536
1537 WORD32_BIN_OP_LIST(DECLARE_BIN_OP);
1538 WORD32_UNARY_OP_LIST(DECLARE_UNARY_OP);
1539 FLOAT_UNARY_OP_LIST(DECLARE_UNARY_OP);
1540 FLOAT_BIN_OP_LIST(DECLARE_BIN_OP);
1541
1542 #if V8_TARGET_ARCH_S390X
1543 WORD64_UNARY_OP_LIST(DECLARE_UNARY_OP)
WORD64_BIN_OP_LIST(DECLARE_BIN_OP)1544 WORD64_BIN_OP_LIST(DECLARE_BIN_OP)
1545 #endif
1546
1547 #undef DECLARE_BIN_OP
1548 #undef DECLARE_UNARY_OP
1549 #undef WORD64_BIN_OP_LIST
1550 #undef WORD64_UNARY_OP_LIST
1551 #undef WORD32_BIN_OP_LIST
1552 #undef WORD32_UNARY_OP_LIST
1553 #undef FLOAT_UNARY_OP_LIST
1554 #undef WORD32_UNARY_OP_LIST_32
1555 #undef FLOAT_BIN_OP_LIST
1556 #undef FLOAT_BIN_OP_LIST_32
1557 #undef null
1558
1559 #if V8_TARGET_ARCH_S390X
1560 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1561 VisitTryTruncateDouble(this, kS390_Float32ToInt64, node);
1562 }
1563
VisitTryTruncateFloat64ToInt64(Node * node)1564 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1565 VisitTryTruncateDouble(this, kS390_DoubleToInt64, node);
1566 }
1567
VisitTryTruncateFloat32ToUint64(Node * node)1568 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1569 VisitTryTruncateDouble(this, kS390_Float32ToUint64, node);
1570 }
1571
VisitTryTruncateFloat64ToUint64(Node * node)1572 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1573 VisitTryTruncateDouble(this, kS390_DoubleToUint64, node);
1574 }
1575
1576 #endif
1577
VisitFloat64Mod(Node * node)1578 void InstructionSelector::VisitFloat64Mod(Node* node) {
1579 S390OperandGenerator g(this);
1580 Emit(kS390_ModDouble, g.DefineAsFixed(node, d1),
1581 g.UseFixed(node->InputAt(0), d1), g.UseFixed(node->InputAt(1), d2))
1582 ->MarkAsCall();
1583 }
1584
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1585 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1586 InstructionCode opcode) {
1587 S390OperandGenerator g(this);
1588 Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1))
1589 ->MarkAsCall();
1590 }
1591
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1592 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1593 InstructionCode opcode) {
1594 S390OperandGenerator g(this);
1595 Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1),
1596 g.UseFixed(node->InputAt(1), d2))
1597 ->MarkAsCall();
1598 }
1599
VisitFloat32RoundTiesEven(Node * node)1600 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1601 UNREACHABLE();
1602 }
1603
VisitFloat64RoundTiesEven(Node * node)1604 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1605 UNREACHABLE();
1606 }
1607
CompareLogical(FlagsContinuation * cont)1608 static bool CompareLogical(FlagsContinuation* cont) {
1609 switch (cont->condition()) {
1610 case kUnsignedLessThan:
1611 case kUnsignedGreaterThanOrEqual:
1612 case kUnsignedLessThanOrEqual:
1613 case kUnsignedGreaterThan:
1614 return true;
1615 default:
1616 return false;
1617 }
1618 UNREACHABLE();
1619 }
1620
1621 namespace {
1622
1623 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1624 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1625 InstructionOperand left, InstructionOperand right,
1626 FlagsContinuation* cont) {
1627 selector->EmitWithContinuation(opcode, left, right, cont);
1628 }
1629
1630 void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
1631 Node* node, Node* value, FlagsContinuation* cont,
1632 bool discard_output = false);
1633
1634 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,OperandModes immediate_mode)1635 void VisitWordCompare(InstructionSelector* selector, Node* node,
1636 InstructionCode opcode, FlagsContinuation* cont,
1637 OperandModes immediate_mode) {
1638 S390OperandGenerator g(selector);
1639 Node* left = node->InputAt(0);
1640 Node* right = node->InputAt(1);
1641
1642 DCHECK(IrOpcode::IsComparisonOpcode(node->opcode()) ||
1643 node->opcode() == IrOpcode::kInt32Sub ||
1644 node->opcode() == IrOpcode::kInt64Sub);
1645
1646 InstructionOperand inputs[8];
1647 InstructionOperand outputs[1];
1648 size_t input_count = 0;
1649 size_t output_count = 0;
1650
1651 // If one of the two inputs is an immediate, make sure it's on the right, or
1652 // if one of the two inputs is a memory operand, make sure it's on the left.
1653 int effect_level = selector->GetEffectLevel(node);
1654 if (cont->IsBranch()) {
1655 effect_level = selector->GetEffectLevel(
1656 cont->true_block()->PredecessorAt(0)->control_input());
1657 }
1658
1659 if ((!g.CanBeImmediate(right, immediate_mode) &&
1660 g.CanBeImmediate(left, immediate_mode)) ||
1661 (!g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
1662 g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
1663 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1664 std::swap(left, right);
1665 }
1666
1667 // check if compare with 0
1668 if (g.CanBeImmediate(right, immediate_mode) && g.GetImmediate(right) == 0) {
1669 DCHECK(opcode == kS390_Cmp32 || opcode == kS390_Cmp64);
1670 ArchOpcode load_and_test = (opcode == kS390_Cmp32)
1671 ? kS390_LoadAndTestWord32
1672 : kS390_LoadAndTestWord64;
1673 return VisitLoadAndTest(selector, load_and_test, node, left, cont, true);
1674 }
1675
1676 inputs[input_count++] = g.UseRegisterOrStackPointer(left);
1677 if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) {
1678 // generate memory operand
1679 AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
1680 right, inputs, &input_count, OpcodeImmMode(opcode));
1681 opcode |= AddressingModeField::encode(addressing_mode);
1682 } else if (g.CanBeImmediate(right, immediate_mode)) {
1683 inputs[input_count++] = g.UseImmediate(right);
1684 } else {
1685 inputs[input_count++] = g.UseAnyExceptImmediate(right);
1686 }
1687
1688 DCHECK(input_count <= 8 && output_count <= 1);
1689 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
1690 inputs, cont);
1691 }
1692
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1693 void VisitWord32Compare(InstructionSelector* selector, Node* node,
1694 FlagsContinuation* cont) {
1695 OperandModes mode =
1696 (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
1697 VisitWordCompare(selector, node, kS390_Cmp32, cont, mode);
1698 }
1699
1700 #if V8_TARGET_ARCH_S390X
VisitWord64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1701 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1702 FlagsContinuation* cont) {
1703 OperandModes mode =
1704 (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
1705 VisitWordCompare(selector, node, kS390_Cmp64, cont, mode);
1706 }
1707 #endif
1708
1709 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1710 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1711 FlagsContinuation* cont) {
1712 VisitWordCompare(selector, node, kS390_CmpFloat, cont, OperandMode::kNone);
1713 }
1714
1715 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1716 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1717 FlagsContinuation* cont) {
1718 VisitWordCompare(selector, node, kS390_CmpDouble, cont, OperandMode::kNone);
1719 }
1720
VisitTestUnderMask(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1721 void VisitTestUnderMask(InstructionSelector* selector, Node* node,
1722 FlagsContinuation* cont) {
1723 DCHECK(node->opcode() == IrOpcode::kWord32And ||
1724 node->opcode() == IrOpcode::kWord64And);
1725 ArchOpcode opcode =
1726 (node->opcode() == IrOpcode::kWord32And) ? kS390_Tst32 : kS390_Tst64;
1727 S390OperandGenerator g(selector);
1728 Node* left = node->InputAt(0);
1729 Node* right = node->InputAt(1);
1730 if (!g.CanBeImmediate(right, OperandMode::kUint32Imm) &&
1731 g.CanBeImmediate(left, OperandMode::kUint32Imm)) {
1732 std::swap(left, right);
1733 }
1734 VisitCompare(selector, opcode, g.UseRegister(left),
1735 g.UseOperand(right, OperandMode::kUint32Imm), cont);
1736 }
1737
VisitLoadAndTest(InstructionSelector * selector,InstructionCode opcode,Node * node,Node * value,FlagsContinuation * cont,bool discard_output)1738 void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
1739 Node* node, Node* value, FlagsContinuation* cont,
1740 bool discard_output) {
1741 static_assert(kS390_LoadAndTestFloat64 - kS390_LoadAndTestWord32 == 3,
1742 "LoadAndTest Opcode shouldn't contain other opcodes.");
1743
1744 // TODO(john.yan): Add support for Float32/Float64.
1745 DCHECK(opcode >= kS390_LoadAndTestWord32 ||
1746 opcode <= kS390_LoadAndTestWord64);
1747
1748 S390OperandGenerator g(selector);
1749 InstructionOperand inputs[8];
1750 InstructionOperand outputs[2];
1751 size_t input_count = 0;
1752 size_t output_count = 0;
1753 bool use_value = false;
1754
1755 int effect_level = selector->GetEffectLevel(node);
1756 if (cont->IsBranch()) {
1757 effect_level = selector->GetEffectLevel(
1758 cont->true_block()->PredecessorAt(0)->control_input());
1759 }
1760
1761 if (g.CanBeMemoryOperand(opcode, node, value, effect_level)) {
1762 // generate memory operand
1763 AddressingMode addressing_mode =
1764 g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
1765 opcode |= AddressingModeField::encode(addressing_mode);
1766 } else {
1767 inputs[input_count++] = g.UseAnyExceptImmediate(value);
1768 use_value = true;
1769 }
1770
1771 if (!discard_output && !use_value) {
1772 outputs[output_count++] = g.DefineAsRegister(value);
1773 }
1774
1775 DCHECK(input_count <= 8 && output_count <= 2);
1776 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
1777 inputs, cont);
1778 }
1779
1780 } // namespace
1781
1782 // Shared routine for word comparisons against zero.
VisitWordCompareZero(Node * user,Node * value,FlagsContinuation * cont)1783 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
1784 FlagsContinuation* cont) {
1785 // Try to combine with comparisons against 0 by simply inverting the branch.
1786 while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
1787 Int32BinopMatcher m(value);
1788 if (!m.right().Is(0)) break;
1789
1790 user = value;
1791 value = m.left().node();
1792 cont->Negate();
1793 }
1794
1795 FlagsCondition fc = cont->condition();
1796 if (CanCover(user, value)) {
1797 switch (value->opcode()) {
1798 case IrOpcode::kWord32Equal: {
1799 cont->OverwriteAndNegateIfEqual(kEqual);
1800 Int32BinopMatcher m(value);
1801 if (m.right().Is(0)) {
1802 // Try to combine the branch with a comparison.
1803 Node* const user = m.node();
1804 Node* const value = m.left().node();
1805 if (CanCover(user, value)) {
1806 switch (value->opcode()) {
1807 case IrOpcode::kInt32Sub:
1808 return VisitWord32Compare(this, value, cont);
1809 case IrOpcode::kWord32And:
1810 return VisitTestUnderMask(this, value, cont);
1811 default:
1812 break;
1813 }
1814 }
1815 }
1816 return VisitWord32Compare(this, value, cont);
1817 }
1818 case IrOpcode::kInt32LessThan:
1819 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1820 return VisitWord32Compare(this, value, cont);
1821 case IrOpcode::kInt32LessThanOrEqual:
1822 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1823 return VisitWord32Compare(this, value, cont);
1824 case IrOpcode::kUint32LessThan:
1825 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1826 return VisitWord32Compare(this, value, cont);
1827 case IrOpcode::kUint32LessThanOrEqual:
1828 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1829 return VisitWord32Compare(this, value, cont);
1830 #if V8_TARGET_ARCH_S390X
1831 case IrOpcode::kWord64Equal: {
1832 cont->OverwriteAndNegateIfEqual(kEqual);
1833 Int64BinopMatcher m(value);
1834 if (m.right().Is(0)) {
1835 // Try to combine the branch with a comparison.
1836 Node* const user = m.node();
1837 Node* const value = m.left().node();
1838 if (CanCover(user, value)) {
1839 switch (value->opcode()) {
1840 case IrOpcode::kInt64Sub:
1841 return VisitWord64Compare(this, value, cont);
1842 case IrOpcode::kWord64And:
1843 return VisitTestUnderMask(this, value, cont);
1844 default:
1845 break;
1846 }
1847 }
1848 }
1849 return VisitWord64Compare(this, value, cont);
1850 }
1851 case IrOpcode::kInt64LessThan:
1852 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1853 return VisitWord64Compare(this, value, cont);
1854 case IrOpcode::kInt64LessThanOrEqual:
1855 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1856 return VisitWord64Compare(this, value, cont);
1857 case IrOpcode::kUint64LessThan:
1858 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1859 return VisitWord64Compare(this, value, cont);
1860 case IrOpcode::kUint64LessThanOrEqual:
1861 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1862 return VisitWord64Compare(this, value, cont);
1863 #endif
1864 case IrOpcode::kFloat32Equal:
1865 cont->OverwriteAndNegateIfEqual(kEqual);
1866 return VisitFloat32Compare(this, value, cont);
1867 case IrOpcode::kFloat32LessThan:
1868 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1869 return VisitFloat32Compare(this, value, cont);
1870 case IrOpcode::kFloat32LessThanOrEqual:
1871 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1872 return VisitFloat32Compare(this, value, cont);
1873 case IrOpcode::kFloat64Equal:
1874 cont->OverwriteAndNegateIfEqual(kEqual);
1875 return VisitFloat64Compare(this, value, cont);
1876 case IrOpcode::kFloat64LessThan:
1877 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1878 return VisitFloat64Compare(this, value, cont);
1879 case IrOpcode::kFloat64LessThanOrEqual:
1880 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1881 return VisitFloat64Compare(this, value, cont);
1882 case IrOpcode::kProjection:
1883 // Check if this is the overflow output projection of an
1884 // <Operation>WithOverflow node.
1885 if (ProjectionIndexOf(value->op()) == 1u) {
1886 // We cannot combine the <Operation>WithOverflow with this branch
1887 // unless the 0th projection (the use of the actual value of the
1888 // <Operation> is either nullptr, which means there's no use of the
1889 // actual value, or was already defined, which means it is scheduled
1890 // *AFTER* this branch).
1891 Node* const node = value->InputAt(0);
1892 Node* const result = NodeProperties::FindProjection(node, 0);
1893 if (result == nullptr || IsDefined(result)) {
1894 switch (node->opcode()) {
1895 case IrOpcode::kInt32AddWithOverflow:
1896 cont->OverwriteAndNegateIfEqual(kOverflow);
1897 return VisitWord32BinOp(this, node, kS390_Add32, AddOperandMode,
1898 cont);
1899 case IrOpcode::kInt32SubWithOverflow:
1900 cont->OverwriteAndNegateIfEqual(kOverflow);
1901 return VisitWord32BinOp(this, node, kS390_Sub32, SubOperandMode,
1902 cont);
1903 case IrOpcode::kInt32MulWithOverflow:
1904 if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
1905 cont->OverwriteAndNegateIfEqual(kOverflow);
1906 return VisitWord32BinOp(
1907 this, node, kS390_Mul32,
1908 OperandMode::kAllowRRR | OperandMode::kAllowRM, cont);
1909 } else {
1910 cont->OverwriteAndNegateIfEqual(kNotEqual);
1911 return VisitWord32BinOp(
1912 this, node, kS390_Mul32WithOverflow,
1913 OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
1914 cont);
1915 }
1916 case IrOpcode::kInt32AbsWithOverflow:
1917 cont->OverwriteAndNegateIfEqual(kOverflow);
1918 return VisitWord32UnaryOp(this, node, kS390_Abs32,
1919 OperandMode::kNone, cont);
1920 #if V8_TARGET_ARCH_S390X
1921 case IrOpcode::kInt64AbsWithOverflow:
1922 cont->OverwriteAndNegateIfEqual(kOverflow);
1923 return VisitWord64UnaryOp(this, node, kS390_Abs64,
1924 OperandMode::kNone, cont);
1925 case IrOpcode::kInt64AddWithOverflow:
1926 cont->OverwriteAndNegateIfEqual(kOverflow);
1927 return VisitWord64BinOp(this, node, kS390_Add64, AddOperandMode,
1928 cont);
1929 case IrOpcode::kInt64SubWithOverflow:
1930 cont->OverwriteAndNegateIfEqual(kOverflow);
1931 return VisitWord64BinOp(this, node, kS390_Sub64, SubOperandMode,
1932 cont);
1933 #endif
1934 default:
1935 break;
1936 }
1937 }
1938 }
1939 break;
1940 case IrOpcode::kInt32Sub:
1941 if (fc == kNotEqual || fc == kEqual)
1942 return VisitWord32Compare(this, value, cont);
1943 break;
1944 case IrOpcode::kWord32And:
1945 return VisitTestUnderMask(this, value, cont);
1946 case IrOpcode::kLoad: {
1947 LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1948 switch (load_rep.representation()) {
1949 case MachineRepresentation::kWord32:
1950 return VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value,
1951 cont);
1952 default:
1953 break;
1954 }
1955 break;
1956 }
1957 case IrOpcode::kInt32Add:
1958 // can't handle overflow case.
1959 break;
1960 case IrOpcode::kWord32Or:
1961 if (fc == kNotEqual || fc == kEqual)
1962 return VisitWord32BinOp(this, value, kS390_Or32, Or32OperandMode,
1963 cont);
1964 break;
1965 case IrOpcode::kWord32Xor:
1966 if (fc == kNotEqual || fc == kEqual)
1967 return VisitWord32BinOp(this, value, kS390_Xor32, Xor32OperandMode,
1968 cont);
1969 break;
1970 case IrOpcode::kWord32Sar:
1971 case IrOpcode::kWord32Shl:
1972 case IrOpcode::kWord32Shr:
1973 case IrOpcode::kWord32Ror:
1974 // doesn't generate cc, so ignore.
1975 break;
1976 #if V8_TARGET_ARCH_S390X
1977 case IrOpcode::kInt64Sub:
1978 if (fc == kNotEqual || fc == kEqual)
1979 return VisitWord64Compare(this, value, cont);
1980 break;
1981 case IrOpcode::kWord64And:
1982 return VisitTestUnderMask(this, value, cont);
1983 case IrOpcode::kInt64Add:
1984 // can't handle overflow case.
1985 break;
1986 case IrOpcode::kWord64Or:
1987 if (fc == kNotEqual || fc == kEqual)
1988 return VisitWord64BinOp(this, value, kS390_Or64, Or64OperandMode,
1989 cont);
1990 break;
1991 case IrOpcode::kWord64Xor:
1992 if (fc == kNotEqual || fc == kEqual)
1993 return VisitWord64BinOp(this, value, kS390_Xor64, Xor64OperandMode,
1994 cont);
1995 break;
1996 case IrOpcode::kWord64Sar:
1997 case IrOpcode::kWord64Shl:
1998 case IrOpcode::kWord64Shr:
1999 case IrOpcode::kWord64Ror:
2000 // doesn't generate cc, so ignore
2001 break;
2002 #endif
2003 default:
2004 break;
2005 }
2006 }
2007
2008 // Branch could not be combined with a compare, emit LoadAndTest
2009 VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value, cont, true);
2010 }
2011
VisitSwitch(Node * node,const SwitchInfo & sw)2012 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2013 S390OperandGenerator g(this);
2014 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2015
2016 // Emit either ArchTableSwitch or ArchLookupSwitch.
2017 if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
2018 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2019 size_t table_space_cost = 4 + sw.value_range();
2020 size_t table_time_cost = 3;
2021 size_t lookup_space_cost = 3 + 2 * sw.case_count();
2022 size_t lookup_time_cost = sw.case_count();
2023 if (sw.case_count() > 0 &&
2024 table_space_cost + 3 * table_time_cost <=
2025 lookup_space_cost + 3 * lookup_time_cost &&
2026 sw.min_value() > std::numeric_limits<int32_t>::min() &&
2027 sw.value_range() <= kMaxTableSwitchValueRange) {
2028 InstructionOperand index_operand = value_operand;
2029 if (sw.min_value()) {
2030 index_operand = g.TempRegister();
2031 Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
2032 value_operand, g.TempImmediate(-sw.min_value()));
2033 }
2034 #if V8_TARGET_ARCH_S390X
2035 InstructionOperand index_operand_zero_ext = g.TempRegister();
2036 Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
2037 index_operand = index_operand_zero_ext;
2038 #endif
2039 // Generate a table lookup.
2040 return EmitTableSwitch(sw, index_operand);
2041 }
2042 }
2043
2044 // Generate a tree of conditional jumps.
2045 return EmitBinarySearchSwitch(sw, value_operand);
2046 }
2047
VisitWord32Equal(Node * const node)2048 void InstructionSelector::VisitWord32Equal(Node* const node) {
2049 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2050 Int32BinopMatcher m(node);
2051 if (m.right().Is(0)) {
2052 return VisitLoadAndTest(this, kS390_LoadAndTestWord32, m.node(),
2053 m.left().node(), &cont, true);
2054 }
2055 VisitWord32Compare(this, node, &cont);
2056 }
2057
VisitInt32LessThan(Node * node)2058 void InstructionSelector::VisitInt32LessThan(Node* node) {
2059 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2060 VisitWord32Compare(this, node, &cont);
2061 }
2062
VisitInt32LessThanOrEqual(Node * node)2063 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2064 FlagsContinuation cont =
2065 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2066 VisitWord32Compare(this, node, &cont);
2067 }
2068
VisitUint32LessThan(Node * node)2069 void InstructionSelector::VisitUint32LessThan(Node* node) {
2070 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2071 VisitWord32Compare(this, node, &cont);
2072 }
2073
VisitUint32LessThanOrEqual(Node * node)2074 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2075 FlagsContinuation cont =
2076 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2077 VisitWord32Compare(this, node, &cont);
2078 }
2079
2080 #if V8_TARGET_ARCH_S390X
VisitWord64Equal(Node * const node)2081 void InstructionSelector::VisitWord64Equal(Node* const node) {
2082 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2083 Int64BinopMatcher m(node);
2084 if (m.right().Is(0)) {
2085 return VisitLoadAndTest(this, kS390_LoadAndTestWord64, m.node(),
2086 m.left().node(), &cont, true);
2087 }
2088 VisitWord64Compare(this, node, &cont);
2089 }
2090
VisitInt64LessThan(Node * node)2091 void InstructionSelector::VisitInt64LessThan(Node* node) {
2092 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2093 VisitWord64Compare(this, node, &cont);
2094 }
2095
VisitInt64LessThanOrEqual(Node * node)2096 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2097 FlagsContinuation cont =
2098 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2099 VisitWord64Compare(this, node, &cont);
2100 }
2101
VisitUint64LessThan(Node * node)2102 void InstructionSelector::VisitUint64LessThan(Node* node) {
2103 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2104 VisitWord64Compare(this, node, &cont);
2105 }
2106
VisitUint64LessThanOrEqual(Node * node)2107 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2108 FlagsContinuation cont =
2109 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2110 VisitWord64Compare(this, node, &cont);
2111 }
2112 #endif
2113
VisitFloat32Equal(Node * node)2114 void InstructionSelector::VisitFloat32Equal(Node* node) {
2115 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2116 VisitFloat32Compare(this, node, &cont);
2117 }
2118
VisitFloat32LessThan(Node * node)2119 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2120 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2121 VisitFloat32Compare(this, node, &cont);
2122 }
2123
VisitFloat32LessThanOrEqual(Node * node)2124 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2125 FlagsContinuation cont =
2126 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2127 VisitFloat32Compare(this, node, &cont);
2128 }
2129
VisitFloat64Equal(Node * node)2130 void InstructionSelector::VisitFloat64Equal(Node* node) {
2131 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2132 VisitFloat64Compare(this, node, &cont);
2133 }
2134
VisitFloat64LessThan(Node * node)2135 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2136 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2137 VisitFloat64Compare(this, node, &cont);
2138 }
2139
VisitFloat64LessThanOrEqual(Node * node)2140 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2141 FlagsContinuation cont =
2142 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2143 VisitFloat64Compare(this, node, &cont);
2144 }
2145
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * call_descriptor,Node * node)2146 void InstructionSelector::EmitPrepareArguments(
2147 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
2148 Node* node) {
2149 S390OperandGenerator g(this);
2150
2151 // Prepare for C function call.
2152 if (call_descriptor->IsCFunctionCall()) {
2153 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
2154 call_descriptor->ParameterCount())),
2155 0, nullptr, 0, nullptr);
2156
2157 // Poke any stack arguments.
2158 int slot = kStackFrameExtraParamSlot;
2159 for (PushParameter input : (*arguments)) {
2160 Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
2161 g.TempImmediate(slot));
2162 ++slot;
2163 }
2164 } else {
2165 // Push any stack arguments.
2166 int num_slots = 0;
2167 int slot = 0;
2168
2169 for (PushParameter input : *arguments) {
2170 if (input.node == nullptr) continue;
2171 num_slots += input.location.GetType().representation() ==
2172 MachineRepresentation::kFloat64
2173 ? kDoubleSize / kPointerSize
2174 : 1;
2175 }
2176 Emit(kS390_StackClaim, g.NoOutput(), g.TempImmediate(num_slots));
2177 for (PushParameter input : *arguments) {
2178 // Skip any alignment holes in pushed nodes.
2179 if (input.node) {
2180 Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
2181 g.TempImmediate(slot));
2182 slot += input.location.GetType().representation() ==
2183 MachineRepresentation::kFloat64
2184 ? (kDoubleSize / kPointerSize)
2185 : 1;
2186 }
2187 }
2188 DCHECK(num_slots == slot);
2189 }
2190 }
2191
IsTailCallAddressImmediate()2192 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
2193
GetTempsCountForTailCallFromJSFunction()2194 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
2195
VisitWord32AtomicLoad(Node * node)2196 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2197 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2198 S390OperandGenerator g(this);
2199 Node* base = node->InputAt(0);
2200 Node* index = node->InputAt(1);
2201 ArchOpcode opcode = kArchNop;
2202 switch (load_rep.representation()) {
2203 case MachineRepresentation::kWord8:
2204 opcode =
2205 load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
2206 break;
2207 case MachineRepresentation::kWord16:
2208 opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
2209 : kWord32AtomicLoadUint16;
2210 break;
2211 case MachineRepresentation::kWord32:
2212 opcode = kWord32AtomicLoadWord32;
2213 break;
2214 default:
2215 UNREACHABLE();
2216 return;
2217 }
2218 Emit(opcode | AddressingModeField::encode(kMode_MRR),
2219 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
2220 }
2221
VisitWord32AtomicStore(Node * node)2222 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2223 MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2224 S390OperandGenerator g(this);
2225 Node* base = node->InputAt(0);
2226 Node* index = node->InputAt(1);
2227 Node* value = node->InputAt(2);
2228 ArchOpcode opcode = kArchNop;
2229 switch (rep) {
2230 case MachineRepresentation::kWord8:
2231 opcode = kWord32AtomicStoreWord8;
2232 break;
2233 case MachineRepresentation::kWord16:
2234 opcode = kWord32AtomicStoreWord16;
2235 break;
2236 case MachineRepresentation::kWord32:
2237 opcode = kWord32AtomicStoreWord32;
2238 break;
2239 default:
2240 UNREACHABLE();
2241 return;
2242 }
2243
2244 InstructionOperand inputs[4];
2245 size_t input_count = 0;
2246 inputs[input_count++] = g.UseUniqueRegister(value);
2247 inputs[input_count++] = g.UseUniqueRegister(base);
2248 inputs[input_count++] = g.UseUniqueRegister(index);
2249 Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, input_count,
2250 inputs);
2251 }
2252
VisitWord32AtomicExchange(Node * node)2253 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2254 S390OperandGenerator g(this);
2255 Node* base = node->InputAt(0);
2256 Node* index = node->InputAt(1);
2257 Node* value = node->InputAt(2);
2258 ArchOpcode opcode = kArchNop;
2259 MachineType type = AtomicOpType(node->op());
2260 if (type == MachineType::Int8()) {
2261 opcode = kWord32AtomicExchangeInt8;
2262 } else if (type == MachineType::Uint8()) {
2263 opcode = kWord32AtomicExchangeUint8;
2264 } else if (type == MachineType::Int16()) {
2265 opcode = kWord32AtomicExchangeInt16;
2266 } else if (type == MachineType::Uint16()) {
2267 opcode = kWord32AtomicExchangeUint16;
2268 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2269 opcode = kWord32AtomicExchangeWord32;
2270 } else {
2271 UNREACHABLE();
2272 return;
2273 }
2274
2275 AddressingMode addressing_mode = kMode_MRR;
2276 InstructionOperand inputs[3];
2277 size_t input_count = 0;
2278 inputs[input_count++] = g.UseUniqueRegister(base);
2279 inputs[input_count++] = g.UseUniqueRegister(index);
2280 inputs[input_count++] = g.UseUniqueRegister(value);
2281 InstructionOperand outputs[1];
2282 outputs[0] = g.DefineAsRegister(node);
2283 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2284 Emit(code, 1, outputs, input_count, inputs);
2285 }
2286
VisitWord32AtomicCompareExchange(Node * node)2287 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2288 S390OperandGenerator g(this);
2289 Node* base = node->InputAt(0);
2290 Node* index = node->InputAt(1);
2291 Node* old_value = node->InputAt(2);
2292 Node* new_value = node->InputAt(3);
2293
2294 MachineType type = AtomicOpType(node->op());
2295 ArchOpcode opcode = kArchNop;
2296 if (type == MachineType::Int8()) {
2297 opcode = kWord32AtomicCompareExchangeInt8;
2298 } else if (type == MachineType::Uint8()) {
2299 opcode = kWord32AtomicCompareExchangeUint8;
2300 } else if (type == MachineType::Int16()) {
2301 opcode = kWord32AtomicCompareExchangeInt16;
2302 } else if (type == MachineType::Uint16()) {
2303 opcode = kWord32AtomicCompareExchangeUint16;
2304 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2305 opcode = kWord32AtomicCompareExchangeWord32;
2306 } else {
2307 UNREACHABLE();
2308 return;
2309 }
2310
2311 InstructionOperand inputs[4];
2312 size_t input_count = 0;
2313 inputs[input_count++] = g.UseUniqueRegister(old_value);
2314 inputs[input_count++] = g.UseUniqueRegister(new_value);
2315 inputs[input_count++] = g.UseUniqueRegister(base);
2316
2317 AddressingMode addressing_mode;
2318 if (g.CanBeImmediate(index, OperandMode::kInt20Imm)) {
2319 inputs[input_count++] = g.UseImmediate(index);
2320 addressing_mode = kMode_MRI;
2321 } else {
2322 inputs[input_count++] = g.UseUniqueRegister(index);
2323 addressing_mode = kMode_MRR;
2324 }
2325
2326 InstructionOperand outputs[1];
2327 size_t output_count = 0;
2328 outputs[output_count++] = g.DefineSameAsFirst(node);
2329
2330 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2331 Emit(code, output_count, outputs, input_count, inputs);
2332 }
2333
VisitWord32AtomicBinaryOperation(Node * node,ArchOpcode int8_op,ArchOpcode uint8_op,ArchOpcode int16_op,ArchOpcode uint16_op,ArchOpcode word32_op)2334 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2335 Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2336 ArchOpcode uint16_op, ArchOpcode word32_op) {
2337 S390OperandGenerator g(this);
2338 Node* base = node->InputAt(0);
2339 Node* index = node->InputAt(1);
2340 Node* value = node->InputAt(2);
2341
2342 MachineType type = AtomicOpType(node->op());
2343 ArchOpcode opcode = kArchNop;
2344
2345 if (type == MachineType::Int8()) {
2346 opcode = int8_op;
2347 } else if (type == MachineType::Uint8()) {
2348 opcode = uint8_op;
2349 } else if (type == MachineType::Int16()) {
2350 opcode = int16_op;
2351 } else if (type == MachineType::Uint16()) {
2352 opcode = uint16_op;
2353 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2354 opcode = word32_op;
2355 } else {
2356 UNREACHABLE();
2357 return;
2358 }
2359
2360 InstructionOperand inputs[3];
2361 size_t input_count = 0;
2362 inputs[input_count++] = g.UseUniqueRegister(base);
2363
2364 AddressingMode addressing_mode;
2365 if (g.CanBeImmediate(index, OperandMode::kInt20Imm)) {
2366 inputs[input_count++] = g.UseImmediate(index);
2367 addressing_mode = kMode_MRI;
2368 } else {
2369 inputs[input_count++] = g.UseUniqueRegister(index);
2370 addressing_mode = kMode_MRR;
2371 }
2372
2373 inputs[input_count++] = g.UseUniqueRegister(value);
2374
2375 InstructionOperand outputs[1];
2376 size_t output_count = 0;
2377 outputs[output_count++] = g.DefineAsRegister(node);
2378
2379 InstructionOperand temps[1];
2380 size_t temp_count = 0;
2381 temps[temp_count++] = g.TempRegister();
2382
2383 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2384 Emit(code, output_count, outputs, input_count, inputs, temp_count, temps);
2385 }
2386
2387 #define VISIT_ATOMIC_BINOP(op) \
2388 void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
2389 VisitWord32AtomicBinaryOperation( \
2390 node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
2391 kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
2392 kWord32Atomic##op##Word32); \
2393 }
2394 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2395 VISIT_ATOMIC_BINOP(Sub)
2396 VISIT_ATOMIC_BINOP(And)
2397 VISIT_ATOMIC_BINOP(Or)
2398 VISIT_ATOMIC_BINOP(Xor)
2399 #undef VISIT_ATOMIC_BINOP
2400
2401 void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
2402
VisitI32x4ExtractLane(Node * node)2403 void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
2404
VisitI32x4ReplaceLane(Node * node)2405 void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
2406
VisitI32x4Add(Node * node)2407 void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
2408
VisitI32x4Sub(Node * node)2409 void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
2410
VisitI32x4Shl(Node * node)2411 void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
2412
VisitI32x4ShrS(Node * node)2413 void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
2414
VisitI32x4Mul(Node * node)2415 void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
2416
VisitI32x4MaxS(Node * node)2417 void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
2418
VisitI32x4MinS(Node * node)2419 void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
2420
VisitI32x4Eq(Node * node)2421 void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
2422
VisitI32x4Ne(Node * node)2423 void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
2424
VisitI32x4MinU(Node * node)2425 void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
2426
VisitI32x4MaxU(Node * node)2427 void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
2428
VisitI32x4ShrU(Node * node)2429 void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
2430
VisitI32x4Neg(Node * node)2431 void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
2432
VisitI32x4GtS(Node * node)2433 void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
2434
VisitI32x4GeS(Node * node)2435 void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
2436
VisitI32x4GtU(Node * node)2437 void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
2438
VisitI32x4GeU(Node * node)2439 void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
2440
VisitI16x8Splat(Node * node)2441 void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
2442
VisitI16x8ExtractLane(Node * node)2443 void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
2444
VisitI16x8ReplaceLane(Node * node)2445 void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
2446
VisitI16x8Shl(Node * node)2447 void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
2448
VisitI16x8ShrS(Node * node)2449 void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
2450
VisitI16x8ShrU(Node * node)2451 void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
2452
VisitI16x8Add(Node * node)2453 void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
2454
VisitI16x8AddSaturateS(Node * node)2455 void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
2456 UNIMPLEMENTED();
2457 }
2458
VisitI16x8Sub(Node * node)2459 void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
2460
VisitI16x8SubSaturateS(Node * node)2461 void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
2462 UNIMPLEMENTED();
2463 }
2464
VisitI16x8Mul(Node * node)2465 void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
2466
VisitI16x8MinS(Node * node)2467 void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
2468
VisitI16x8MaxS(Node * node)2469 void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
2470
VisitI16x8Eq(Node * node)2471 void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
2472
VisitI16x8Ne(Node * node)2473 void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
2474
VisitI16x8AddSaturateU(Node * node)2475 void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
2476 UNIMPLEMENTED();
2477 }
2478
VisitI16x8SubSaturateU(Node * node)2479 void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
2480 UNIMPLEMENTED();
2481 }
2482
VisitI16x8MinU(Node * node)2483 void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
2484
VisitI16x8MaxU(Node * node)2485 void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
2486
VisitI16x8Neg(Node * node)2487 void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
2488
VisitI16x8GtS(Node * node)2489 void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
2490
VisitI16x8GeS(Node * node)2491 void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
2492
VisitI16x8GtU(Node * node)2493 void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
2494
VisitI16x8GeU(Node * node)2495 void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
2496
VisitI8x16Neg(Node * node)2497 void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
2498
VisitI8x16Splat(Node * node)2499 void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
2500
VisitI8x16ExtractLane(Node * node)2501 void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
2502
VisitI8x16ReplaceLane(Node * node)2503 void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
2504
VisitI8x16Add(Node * node)2505 void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
2506
VisitI8x16AddSaturateS(Node * node)2507 void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
2508 UNIMPLEMENTED();
2509 }
2510
VisitI8x16Sub(Node * node)2511 void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
2512
VisitI8x16SubSaturateS(Node * node)2513 void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
2514 UNIMPLEMENTED();
2515 }
2516
VisitI8x16MinS(Node * node)2517 void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
2518
VisitI8x16MaxS(Node * node)2519 void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
2520
VisitI8x16Eq(Node * node)2521 void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
2522
VisitI8x16Ne(Node * node)2523 void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
2524
VisitI8x16GtS(Node * node)2525 void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
2526
VisitI8x16GeS(Node * node)2527 void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
2528
VisitI8x16AddSaturateU(Node * node)2529 void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
2530 UNIMPLEMENTED();
2531 }
2532
VisitI8x16SubSaturateU(Node * node)2533 void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
2534 UNIMPLEMENTED();
2535 }
2536
VisitI8x16MinU(Node * node)2537 void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
2538
VisitI8x16MaxU(Node * node)2539 void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
2540
VisitI8x16GtU(Node * node)2541 void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
2542
VisitI8x16GeU(Node * node)2543 void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
2544
VisitS128And(Node * node)2545 void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
2546
VisitS128Or(Node * node)2547 void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
2548
VisitS128Xor(Node * node)2549 void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
2550
VisitS128Not(Node * node)2551 void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
2552
VisitS128Zero(Node * node)2553 void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
2554
VisitF32x4Eq(Node * node)2555 void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
2556
VisitF32x4Ne(Node * node)2557 void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
2558
VisitF32x4Lt(Node * node)2559 void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
2560
VisitF32x4Le(Node * node)2561 void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
2562
VisitF32x4Splat(Node * node)2563 void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
2564
VisitF32x4ExtractLane(Node * node)2565 void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
2566
VisitF32x4ReplaceLane(Node * node)2567 void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
2568
EmitPrepareResults(ZoneVector<PushParameter> * results,const CallDescriptor * call_descriptor,Node * node)2569 void InstructionSelector::EmitPrepareResults(
2570 ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
2571 Node* node) {
2572 // TODO(John): Port.
2573 }
2574
VisitF32x4Add(Node * node)2575 void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
2576
VisitF32x4Sub(Node * node)2577 void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
2578
VisitF32x4Mul(Node * node)2579 void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
2580
VisitF32x4Min(Node * node)2581 void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
2582
VisitF32x4Max(Node * node)2583 void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
2584
VisitS128Select(Node * node)2585 void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
2586
VisitF32x4Neg(Node * node)2587 void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
2588
VisitF32x4Abs(Node * node)2589 void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
2590
VisitF32x4RecipSqrtApprox(Node * node)2591 void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
2592 UNIMPLEMENTED();
2593 }
2594
VisitF32x4RecipApprox(Node * node)2595 void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
2596
VisitF32x4AddHoriz(Node * node)2597 void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
VisitI32x4AddHoriz(Node * node)2598 void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
VisitI16x8AddHoriz(Node * node)2599 void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
2600
2601 // static
2602 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2603 InstructionSelector::SupportedMachineOperatorFlags() {
2604 return MachineOperatorBuilder::kFloat32RoundDown |
2605 MachineOperatorBuilder::kFloat64RoundDown |
2606 MachineOperatorBuilder::kFloat32RoundUp |
2607 MachineOperatorBuilder::kFloat64RoundUp |
2608 MachineOperatorBuilder::kFloat32RoundTruncate |
2609 MachineOperatorBuilder::kFloat64RoundTruncate |
2610 MachineOperatorBuilder::kFloat64RoundTiesAway |
2611 MachineOperatorBuilder::kWord32Popcnt |
2612 MachineOperatorBuilder::kInt32AbsWithOverflow |
2613 MachineOperatorBuilder::kInt64AbsWithOverflow |
2614 MachineOperatorBuilder::kWord64Popcnt;
2615 }
2616
2617 // static
2618 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2619 InstructionSelector::AlignmentRequirements() {
2620 return MachineOperatorBuilder::AlignmentRequirements::
2621 FullUnalignedAccessSupport();
2622 }
2623
2624 } // namespace compiler
2625 } // namespace internal
2626 } // namespace v8
2627