1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/base/adapters.h"
6 #include "src/compiler/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
9 #include "src/s390/frames-s390.h"
10
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14
15 enum ImmediateMode {
16 kShift32Imm,
17 kShift64Imm,
18 kInt32Imm,
19 kInt32Imm_Negate,
20 kUint32Imm,
21 kInt20Imm,
22 kNoImmediate
23 };
24
25 // Adds S390-specific methods for generating operands.
26 class S390OperandGenerator final : public OperandGenerator {
27 public:
S390OperandGenerator(InstructionSelector * selector)28 explicit S390OperandGenerator(InstructionSelector* selector)
29 : OperandGenerator(selector) {}
30
UseOperand(Node * node,ImmediateMode mode)31 InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
32 if (CanBeImmediate(node, mode)) {
33 return UseImmediate(node);
34 }
35 return UseRegister(node);
36 }
37
GetImmediate(Node * node)38 int64_t GetImmediate(Node* node) {
39 if (node->opcode() == IrOpcode::kInt32Constant)
40 return OpParameter<int32_t>(node);
41 else if (node->opcode() == IrOpcode::kInt64Constant)
42 return OpParameter<int64_t>(node);
43 else
44 UNIMPLEMENTED();
45 return 0L;
46 }
47
CanBeImmediate(Node * node,ImmediateMode mode)48 bool CanBeImmediate(Node* node, ImmediateMode mode) {
49 int64_t value;
50 if (node->opcode() == IrOpcode::kInt32Constant)
51 value = OpParameter<int32_t>(node);
52 else if (node->opcode() == IrOpcode::kInt64Constant)
53 value = OpParameter<int64_t>(node);
54 else
55 return false;
56 return CanBeImmediate(value, mode);
57 }
58
CanBeImmediate(int64_t value,ImmediateMode mode)59 bool CanBeImmediate(int64_t value, ImmediateMode mode) {
60 switch (mode) {
61 case kShift32Imm:
62 return 0 <= value && value < 32;
63 case kShift64Imm:
64 return 0 <= value && value < 64;
65 case kInt32Imm:
66 return is_int32(value);
67 case kInt32Imm_Negate:
68 return is_int32(-value);
69 case kUint32Imm:
70 return is_uint32(value);
71 case kInt20Imm:
72 return is_int20(value);
73 case kNoImmediate:
74 return false;
75 }
76 return false;
77 }
78
GenerateMemoryOperandInputs(Node * index,Node * base,Node * displacement,DisplacementMode displacement_mode,InstructionOperand inputs[],size_t * input_count)79 AddressingMode GenerateMemoryOperandInputs(Node* index, Node* base,
80 Node* displacement,
81 DisplacementMode displacement_mode,
82 InstructionOperand inputs[],
83 size_t* input_count) {
84 AddressingMode mode = kMode_MRI;
85 if (base != nullptr) {
86 inputs[(*input_count)++] = UseRegister(base);
87 if (index != nullptr) {
88 inputs[(*input_count)++] = UseRegister(index);
89 if (displacement != nullptr) {
90 inputs[(*input_count)++] = displacement_mode
91 ? UseNegatedImmediate(displacement)
92 : UseImmediate(displacement);
93 mode = kMode_MRRI;
94 } else {
95 mode = kMode_MRR;
96 }
97 } else {
98 if (displacement == nullptr) {
99 mode = kMode_MR;
100 } else {
101 inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
102 ? UseNegatedImmediate(displacement)
103 : UseImmediate(displacement);
104 mode = kMode_MRI;
105 }
106 }
107 } else {
108 DCHECK_NOT_NULL(index);
109 inputs[(*input_count)++] = UseRegister(index);
110 if (displacement != nullptr) {
111 inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
112 ? UseNegatedImmediate(displacement)
113 : UseImmediate(displacement);
114 mode = kMode_MRI;
115 } else {
116 mode = kMode_MR;
117 }
118 }
119 return mode;
120 }
121
GetEffectiveAddressMemoryOperand(Node * operand,InstructionOperand inputs[],size_t * input_count)122 AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
123 InstructionOperand inputs[],
124 size_t* input_count) {
125 #if V8_TARGET_ARCH_S390X
126 BaseWithIndexAndDisplacement64Matcher m(operand,
127 AddressOption::kAllowInputSwap);
128 #else
129 BaseWithIndexAndDisplacement32Matcher m(operand,
130 AddressOption::kAllowInputSwap);
131 #endif
132 DCHECK(m.matches());
133 if ((m.displacement() == nullptr ||
134 CanBeImmediate(m.displacement(), kInt20Imm))) {
135 DCHECK(m.scale() == 0);
136 return GenerateMemoryOperandInputs(m.index(), m.base(), m.displacement(),
137 m.displacement_mode(), inputs,
138 input_count);
139 } else {
140 inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
141 inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
142 return kMode_MRR;
143 }
144 }
145
CanBeBetterLeftOperand(Node * node) const146 bool CanBeBetterLeftOperand(Node* node) const {
147 return !selector()->IsLive(node);
148 }
149
GetRepresentation(Node * node)150 MachineRepresentation GetRepresentation(Node* node) {
151 return sequence()->GetRepresentation(selector()->GetVirtualRegister(node));
152 }
153
Is64BitOperand(Node * node)154 bool Is64BitOperand(Node* node) {
155 return MachineRepresentation::kWord64 == GetRepresentation(node);
156 }
157 };
158
159 namespace {
160
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)161 void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
162 S390OperandGenerator g(selector);
163 selector->Emit(opcode, g.DefineAsRegister(node),
164 g.UseRegister(node->InputAt(0)));
165 }
166
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)167 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
168 S390OperandGenerator g(selector);
169 selector->Emit(opcode, g.DefineAsRegister(node),
170 g.UseRegister(node->InputAt(0)),
171 g.UseRegister(node->InputAt(1)));
172 }
173
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node,ImmediateMode operand_mode)174 void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
175 ImmediateMode operand_mode) {
176 S390OperandGenerator g(selector);
177 selector->Emit(opcode, g.DefineAsRegister(node),
178 g.UseRegister(node->InputAt(0)),
179 g.UseOperand(node->InputAt(1), operand_mode));
180 }
181
182 #if V8_TARGET_ARCH_S390X
VisitTryTruncateDouble(InstructionSelector * selector,ArchOpcode opcode,Node * node)183 void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
184 Node* node) {
185 S390OperandGenerator g(selector);
186 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
187 InstructionOperand outputs[2];
188 size_t output_count = 0;
189 outputs[output_count++] = g.DefineAsRegister(node);
190
191 Node* success_output = NodeProperties::FindProjection(node, 1);
192 if (success_output) {
193 outputs[output_count++] = g.DefineAsRegister(success_output);
194 }
195
196 selector->Emit(opcode, output_count, outputs, 1, inputs);
197 }
198 #endif
199
200 // Shared routine for multiple binary operations.
201 template <typename Matcher>
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,ImmediateMode operand_mode,FlagsContinuation * cont)202 void VisitBinop(InstructionSelector* selector, Node* node,
203 InstructionCode opcode, ImmediateMode operand_mode,
204 FlagsContinuation* cont) {
205 S390OperandGenerator g(selector);
206 Matcher m(node);
207 Node* left = m.left().node();
208 Node* right = m.right().node();
209 InstructionOperand inputs[4];
210 size_t input_count = 0;
211 InstructionOperand outputs[2];
212 size_t output_count = 0;
213
214 // TODO(turbofan): match complex addressing modes.
215 if (left == right) {
216 // If both inputs refer to the same operand, enforce allocating a register
217 // for both of them to ensure that we don't end up generating code like
218 // this:
219 //
220 // mov rax, [rbp-0x10]
221 // add rax, [rbp-0x10]
222 // jo label
223 InstructionOperand const input = g.UseRegister(left);
224 inputs[input_count++] = input;
225 inputs[input_count++] = input;
226 } else if (g.CanBeImmediate(right, operand_mode)) {
227 inputs[input_count++] = g.UseRegister(left);
228 inputs[input_count++] = g.UseImmediate(right);
229 } else {
230 if (node->op()->HasProperty(Operator::kCommutative) &&
231 g.CanBeBetterLeftOperand(right)) {
232 std::swap(left, right);
233 }
234 inputs[input_count++] = g.UseRegister(left);
235 inputs[input_count++] = g.UseRegister(right);
236 }
237
238 if (cont->IsBranch()) {
239 inputs[input_count++] = g.Label(cont->true_block());
240 inputs[input_count++] = g.Label(cont->false_block());
241 }
242
243 if (cont->IsDeoptimize()) {
244 // If we can deoptimize as a result of the binop, we need to make sure that
245 // the deopt inputs are not overwritten by the binop result. One way
246 // to achieve that is to declare the output register as same-as-first.
247 outputs[output_count++] = g.DefineSameAsFirst(node);
248 } else {
249 outputs[output_count++] = g.DefineAsRegister(node);
250 }
251 if (cont->IsSet()) {
252 outputs[output_count++] = g.DefineAsRegister(cont->result());
253 }
254
255 DCHECK_NE(0u, input_count);
256 DCHECK_NE(0u, output_count);
257 DCHECK_GE(arraysize(inputs), input_count);
258 DCHECK_GE(arraysize(outputs), output_count);
259
260 opcode = cont->Encode(opcode);
261 if (cont->IsDeoptimize()) {
262 selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
263 cont->reason(), cont->frame_state());
264 } else {
265 selector->Emit(opcode, output_count, outputs, input_count, inputs);
266 }
267 }
268
269 // Shared routine for multiple binary operations.
270 template <typename Matcher>
VisitBinop(InstructionSelector * selector,Node * node,ArchOpcode opcode,ImmediateMode operand_mode)271 void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
272 ImmediateMode operand_mode) {
273 FlagsContinuation cont;
274 VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
275 }
276
277 } // namespace
278
VisitLoad(Node * node)279 void InstructionSelector::VisitLoad(Node* node) {
280 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
281 S390OperandGenerator g(this);
282 ArchOpcode opcode = kArchNop;
283 switch (load_rep.representation()) {
284 case MachineRepresentation::kFloat32:
285 opcode = kS390_LoadFloat32;
286 break;
287 case MachineRepresentation::kFloat64:
288 opcode = kS390_LoadDouble;
289 break;
290 case MachineRepresentation::kBit: // Fall through.
291 case MachineRepresentation::kWord8:
292 opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
293 break;
294 case MachineRepresentation::kWord16:
295 opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
296 break;
297 #if !V8_TARGET_ARCH_S390X
298 case MachineRepresentation::kTaggedSigned: // Fall through.
299 case MachineRepresentation::kTaggedPointer: // Fall through.
300 case MachineRepresentation::kTagged: // Fall through.
301 #endif
302 case MachineRepresentation::kWord32:
303 opcode = kS390_LoadWordU32;
304 break;
305 #if V8_TARGET_ARCH_S390X
306 case MachineRepresentation::kTaggedSigned: // Fall through.
307 case MachineRepresentation::kTaggedPointer: // Fall through.
308 case MachineRepresentation::kTagged: // Fall through.
309 case MachineRepresentation::kWord64:
310 opcode = kS390_LoadWord64;
311 break;
312 #else
313 case MachineRepresentation::kWord64: // Fall through.
314 #endif
315 case MachineRepresentation::kSimd128: // Fall through.
316 case MachineRepresentation::kNone:
317 UNREACHABLE();
318 return;
319 }
320 InstructionOperand outputs[1];
321 outputs[0] = g.DefineAsRegister(node);
322 InstructionOperand inputs[3];
323 size_t input_count = 0;
324 AddressingMode mode =
325 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
326 InstructionCode code = opcode | AddressingModeField::encode(mode);
327 Emit(code, 1, outputs, input_count, inputs);
328 }
329
VisitProtectedLoad(Node * node)330 void InstructionSelector::VisitProtectedLoad(Node* node) {
331 // TODO(eholk)
332 UNIMPLEMENTED();
333 }
334
VisitStore(Node * node)335 void InstructionSelector::VisitStore(Node* node) {
336 S390OperandGenerator g(this);
337 Node* base = node->InputAt(0);
338 Node* offset = node->InputAt(1);
339 Node* value = node->InputAt(2);
340
341 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
342 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
343 MachineRepresentation rep = store_rep.representation();
344
345 if (write_barrier_kind != kNoWriteBarrier) {
346 DCHECK(CanBeTaggedPointer(rep));
347 AddressingMode addressing_mode;
348 InstructionOperand inputs[3];
349 size_t input_count = 0;
350 inputs[input_count++] = g.UseUniqueRegister(base);
351 // OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
352 // for the store itself, so we must check compatibility with both.
353 if (g.CanBeImmediate(offset, kInt20Imm)) {
354 inputs[input_count++] = g.UseImmediate(offset);
355 addressing_mode = kMode_MRI;
356 } else {
357 inputs[input_count++] = g.UseUniqueRegister(offset);
358 addressing_mode = kMode_MRR;
359 }
360 inputs[input_count++] = g.UseUniqueRegister(value);
361 RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
362 switch (write_barrier_kind) {
363 case kNoWriteBarrier:
364 UNREACHABLE();
365 break;
366 case kMapWriteBarrier:
367 record_write_mode = RecordWriteMode::kValueIsMap;
368 break;
369 case kPointerWriteBarrier:
370 record_write_mode = RecordWriteMode::kValueIsPointer;
371 break;
372 case kFullWriteBarrier:
373 record_write_mode = RecordWriteMode::kValueIsAny;
374 break;
375 }
376 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
377 size_t const temp_count = arraysize(temps);
378 InstructionCode code = kArchStoreWithWriteBarrier;
379 code |= AddressingModeField::encode(addressing_mode);
380 code |= MiscField::encode(static_cast<int>(record_write_mode));
381 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
382 } else {
383 ArchOpcode opcode = kArchNop;
384 NodeMatcher m(value);
385 switch (rep) {
386 case MachineRepresentation::kFloat32:
387 opcode = kS390_StoreFloat32;
388 break;
389 case MachineRepresentation::kFloat64:
390 opcode = kS390_StoreDouble;
391 break;
392 case MachineRepresentation::kBit: // Fall through.
393 case MachineRepresentation::kWord8:
394 opcode = kS390_StoreWord8;
395 break;
396 case MachineRepresentation::kWord16:
397 opcode = kS390_StoreWord16;
398 break;
399 #if !V8_TARGET_ARCH_S390X
400 case MachineRepresentation::kTaggedSigned: // Fall through.
401 case MachineRepresentation::kTaggedPointer: // Fall through.
402 case MachineRepresentation::kTagged: // Fall through.
403 #endif
404 case MachineRepresentation::kWord32:
405 opcode = kS390_StoreWord32;
406 if (m.IsWord32ReverseBytes()) {
407 opcode = kS390_StoreReverse32;
408 value = value->InputAt(0);
409 }
410 break;
411 #if V8_TARGET_ARCH_S390X
412 case MachineRepresentation::kTaggedSigned: // Fall through.
413 case MachineRepresentation::kTaggedPointer: // Fall through.
414 case MachineRepresentation::kTagged: // Fall through.
415 case MachineRepresentation::kWord64:
416 opcode = kS390_StoreWord64;
417 if (m.IsWord64ReverseBytes()) {
418 opcode = kS390_StoreReverse64;
419 value = value->InputAt(0);
420 }
421 break;
422 #else
423 case MachineRepresentation::kWord64: // Fall through.
424 #endif
425 case MachineRepresentation::kSimd128: // Fall through.
426 case MachineRepresentation::kNone:
427 UNREACHABLE();
428 return;
429 }
430 InstructionOperand inputs[4];
431 size_t input_count = 0;
432 AddressingMode addressing_mode =
433 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
434 InstructionCode code =
435 opcode | AddressingModeField::encode(addressing_mode);
436 InstructionOperand value_operand = g.UseRegister(value);
437 inputs[input_count++] = value_operand;
438 Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
439 inputs);
440 }
441 }
442
443 // Architecture supports unaligned access, therefore VisitLoad is used instead
VisitUnalignedLoad(Node * node)444 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
445
446 // Architecture supports unaligned access, therefore VisitStore is used instead
VisitUnalignedStore(Node * node)447 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
448
VisitCheckedLoad(Node * node)449 void InstructionSelector::VisitCheckedLoad(Node* node) {
450 CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
451 S390OperandGenerator g(this);
452 Node* const base = node->InputAt(0);
453 Node* const offset = node->InputAt(1);
454 Node* const length = node->InputAt(2);
455 ArchOpcode opcode = kArchNop;
456 switch (load_rep.representation()) {
457 case MachineRepresentation::kWord8:
458 opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
459 break;
460 case MachineRepresentation::kWord16:
461 opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
462 break;
463 case MachineRepresentation::kWord32:
464 opcode = kCheckedLoadWord32;
465 break;
466 #if V8_TARGET_ARCH_S390X
467 case MachineRepresentation::kWord64:
468 opcode = kCheckedLoadWord64;
469 break;
470 #endif
471 case MachineRepresentation::kFloat32:
472 opcode = kCheckedLoadFloat32;
473 break;
474 case MachineRepresentation::kFloat64:
475 opcode = kCheckedLoadFloat64;
476 break;
477 case MachineRepresentation::kBit: // Fall through.
478 case MachineRepresentation::kTaggedSigned: // Fall through.
479 case MachineRepresentation::kTaggedPointer: // Fall through.
480 case MachineRepresentation::kTagged: // Fall through.
481 #if !V8_TARGET_ARCH_S390X
482 case MachineRepresentation::kWord64: // Fall through.
483 #endif
484 case MachineRepresentation::kSimd128: // Fall through.
485 case MachineRepresentation::kNone:
486 UNREACHABLE();
487 return;
488 }
489 AddressingMode addressingMode = kMode_MRR;
490 Emit(opcode | AddressingModeField::encode(addressingMode),
491 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
492 g.UseOperand(length, kUint32Imm));
493 }
494
VisitCheckedStore(Node * node)495 void InstructionSelector::VisitCheckedStore(Node* node) {
496 MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
497 S390OperandGenerator g(this);
498 Node* const base = node->InputAt(0);
499 Node* const offset = node->InputAt(1);
500 Node* const length = node->InputAt(2);
501 Node* const value = node->InputAt(3);
502 ArchOpcode opcode = kArchNop;
503 switch (rep) {
504 case MachineRepresentation::kWord8:
505 opcode = kCheckedStoreWord8;
506 break;
507 case MachineRepresentation::kWord16:
508 opcode = kCheckedStoreWord16;
509 break;
510 case MachineRepresentation::kWord32:
511 opcode = kCheckedStoreWord32;
512 break;
513 #if V8_TARGET_ARCH_S390X
514 case MachineRepresentation::kWord64:
515 opcode = kCheckedStoreWord64;
516 break;
517 #endif
518 case MachineRepresentation::kFloat32:
519 opcode = kCheckedStoreFloat32;
520 break;
521 case MachineRepresentation::kFloat64:
522 opcode = kCheckedStoreFloat64;
523 break;
524 case MachineRepresentation::kBit: // Fall through.
525 case MachineRepresentation::kTaggedSigned: // Fall through.
526 case MachineRepresentation::kTaggedPointer: // Fall through.
527 case MachineRepresentation::kTagged: // Fall through.
528 #if !V8_TARGET_ARCH_S390X
529 case MachineRepresentation::kWord64: // Fall through.
530 #endif
531 case MachineRepresentation::kSimd128: // Fall through.
532 case MachineRepresentation::kNone:
533 UNREACHABLE();
534 return;
535 }
536 AddressingMode addressingMode = kMode_MRR;
537 Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
538 g.UseRegister(base), g.UseRegister(offset),
539 g.UseOperand(length, kUint32Imm), g.UseRegister(value));
540 }
541
IsContiguousMask32(uint32_t value,int * mb,int * me)542 static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
543 int mask_width = base::bits::CountPopulation32(value);
544 int mask_msb = base::bits::CountLeadingZeros32(value);
545 int mask_lsb = base::bits::CountTrailingZeros32(value);
546 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
547 return false;
548 *mb = mask_lsb + mask_width - 1;
549 *me = mask_lsb;
550 return true;
551 }
552
553 #if V8_TARGET_ARCH_S390X
IsContiguousMask64(uint64_t value,int * mb,int * me)554 static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
555 int mask_width = base::bits::CountPopulation64(value);
556 int mask_msb = base::bits::CountLeadingZeros64(value);
557 int mask_lsb = base::bits::CountTrailingZeros64(value);
558 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
559 return false;
560 *mb = mask_lsb + mask_width - 1;
561 *me = mask_lsb;
562 return true;
563 }
564 #endif
565
VisitWord32And(Node * node)566 void InstructionSelector::VisitWord32And(Node* node) {
567 S390OperandGenerator g(this);
568 Int32BinopMatcher m(node);
569 int mb = 0;
570 int me = 0;
571 if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
572 int sh = 0;
573 Node* left = m.left().node();
574 if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
575 CanCover(node, left)) {
576 Int32BinopMatcher mleft(m.left().node());
577 if (mleft.right().IsInRange(0, 31)) {
578 left = mleft.left().node();
579 sh = mleft.right().Value();
580 if (m.left().IsWord32Shr()) {
581 // Adjust the mask such that it doesn't include any rotated bits.
582 if (mb > 31 - sh) mb = 31 - sh;
583 sh = (32 - sh) & 0x1f;
584 } else {
585 // Adjust the mask such that it doesn't include any rotated bits.
586 if (me < sh) me = sh;
587 }
588 }
589 }
590 if (mb >= me) {
591 Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
592 g.UseRegister(left), g.TempImmediate(sh), g.TempImmediate(mb),
593 g.TempImmediate(me));
594 return;
595 }
596 }
597 VisitBinop<Int32BinopMatcher>(this, node, kS390_And32, kUint32Imm);
598 }
599
600 #if V8_TARGET_ARCH_S390X
VisitWord64And(Node * node)601 void InstructionSelector::VisitWord64And(Node* node) {
602 S390OperandGenerator g(this);
603 Int64BinopMatcher m(node);
604 int mb = 0;
605 int me = 0;
606 if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
607 int sh = 0;
608 Node* left = m.left().node();
609 if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
610 CanCover(node, left)) {
611 Int64BinopMatcher mleft(m.left().node());
612 if (mleft.right().IsInRange(0, 63)) {
613 left = mleft.left().node();
614 sh = mleft.right().Value();
615 if (m.left().IsWord64Shr()) {
616 // Adjust the mask such that it doesn't include any rotated bits.
617 if (mb > 63 - sh) mb = 63 - sh;
618 sh = (64 - sh) & 0x3f;
619 } else {
620 // Adjust the mask such that it doesn't include any rotated bits.
621 if (me < sh) me = sh;
622 }
623 }
624 }
625 if (mb >= me) {
626 bool match = false;
627 ArchOpcode opcode;
628 int mask;
629 if (me == 0) {
630 match = true;
631 opcode = kS390_RotLeftAndClearLeft64;
632 mask = mb;
633 } else if (mb == 63) {
634 match = true;
635 opcode = kS390_RotLeftAndClearRight64;
636 mask = me;
637 } else if (sh && me <= sh && m.left().IsWord64Shl()) {
638 match = true;
639 opcode = kS390_RotLeftAndClear64;
640 mask = mb;
641 }
642 if (match) {
643 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
644 g.TempImmediate(sh), g.TempImmediate(mask));
645 return;
646 }
647 }
648 }
649 VisitBinop<Int64BinopMatcher>(this, node, kS390_And64, kUint32Imm);
650 }
651 #endif
652
VisitWord32Or(Node * node)653 void InstructionSelector::VisitWord32Or(Node* node) {
654 Int32BinopMatcher m(node);
655 VisitBinop<Int32BinopMatcher>(this, node, kS390_Or32, kUint32Imm);
656 }
657
658 #if V8_TARGET_ARCH_S390X
VisitWord64Or(Node * node)659 void InstructionSelector::VisitWord64Or(Node* node) {
660 Int64BinopMatcher m(node);
661 VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64, kUint32Imm);
662 }
663 #endif
664
VisitWord32Xor(Node * node)665 void InstructionSelector::VisitWord32Xor(Node* node) {
666 S390OperandGenerator g(this);
667 Int32BinopMatcher m(node);
668 if (m.right().Is(-1)) {
669 Emit(kS390_Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
670 } else {
671 VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor32, kUint32Imm);
672 }
673 }
674
675 #if V8_TARGET_ARCH_S390X
VisitWord64Xor(Node * node)676 void InstructionSelector::VisitWord64Xor(Node* node) {
677 S390OperandGenerator g(this);
678 Int64BinopMatcher m(node);
679 if (m.right().Is(-1)) {
680 Emit(kS390_Not64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
681 } else {
682 VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64, kUint32Imm);
683 }
684 }
685 #endif
686
VisitWord32Shl(Node * node)687 void InstructionSelector::VisitWord32Shl(Node* node) {
688 S390OperandGenerator g(this);
689 Int32BinopMatcher m(node);
690 if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
691 Int32BinopMatcher mleft(m.left().node());
692 int sh = m.right().Value();
693 int mb;
694 int me;
695 if (mleft.right().HasValue() &&
696 IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
697 // Adjust the mask such that it doesn't include any rotated bits.
698 if (me < sh) me = sh;
699 if (mb >= me) {
700 Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
701 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
702 g.TempImmediate(mb), g.TempImmediate(me));
703 return;
704 }
705 }
706 }
707 VisitRRO(this, kS390_ShiftLeft32, node, kShift32Imm);
708 }
709
710 #if V8_TARGET_ARCH_S390X
VisitWord64Shl(Node * node)711 void InstructionSelector::VisitWord64Shl(Node* node) {
712 S390OperandGenerator g(this);
713 Int64BinopMatcher m(node);
714 // TODO(mbrandy): eliminate left sign extension if right >= 32
715 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
716 Int64BinopMatcher mleft(m.left().node());
717 int sh = m.right().Value();
718 int mb;
719 int me;
720 if (mleft.right().HasValue() &&
721 IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
722 // Adjust the mask such that it doesn't include any rotated bits.
723 if (me < sh) me = sh;
724 if (mb >= me) {
725 bool match = false;
726 ArchOpcode opcode;
727 int mask;
728 if (me == 0) {
729 match = true;
730 opcode = kS390_RotLeftAndClearLeft64;
731 mask = mb;
732 } else if (mb == 63) {
733 match = true;
734 opcode = kS390_RotLeftAndClearRight64;
735 mask = me;
736 } else if (sh && me <= sh) {
737 match = true;
738 opcode = kS390_RotLeftAndClear64;
739 mask = mb;
740 }
741 if (match) {
742 Emit(opcode, g.DefineAsRegister(node),
743 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
744 g.TempImmediate(mask));
745 return;
746 }
747 }
748 }
749 }
750 VisitRRO(this, kS390_ShiftLeft64, node, kShift64Imm);
751 }
752 #endif
753
VisitWord32Shr(Node * node)754 void InstructionSelector::VisitWord32Shr(Node* node) {
755 S390OperandGenerator g(this);
756 Int32BinopMatcher m(node);
757 if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
758 Int32BinopMatcher mleft(m.left().node());
759 int sh = m.right().Value();
760 int mb;
761 int me;
762 if (mleft.right().HasValue() &&
763 IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
764 // Adjust the mask such that it doesn't include any rotated bits.
765 if (mb > 31 - sh) mb = 31 - sh;
766 sh = (32 - sh) & 0x1f;
767 if (mb >= me) {
768 Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
769 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
770 g.TempImmediate(mb), g.TempImmediate(me));
771 return;
772 }
773 }
774 }
775 VisitRRO(this, kS390_ShiftRight32, node, kShift32Imm);
776 }
777
778 #if V8_TARGET_ARCH_S390X
VisitWord64Shr(Node * node)779 void InstructionSelector::VisitWord64Shr(Node* node) {
780 S390OperandGenerator g(this);
781 Int64BinopMatcher m(node);
782 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
783 Int64BinopMatcher mleft(m.left().node());
784 int sh = m.right().Value();
785 int mb;
786 int me;
787 if (mleft.right().HasValue() &&
788 IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
789 // Adjust the mask such that it doesn't include any rotated bits.
790 if (mb > 63 - sh) mb = 63 - sh;
791 sh = (64 - sh) & 0x3f;
792 if (mb >= me) {
793 bool match = false;
794 ArchOpcode opcode;
795 int mask;
796 if (me == 0) {
797 match = true;
798 opcode = kS390_RotLeftAndClearLeft64;
799 mask = mb;
800 } else if (mb == 63) {
801 match = true;
802 opcode = kS390_RotLeftAndClearRight64;
803 mask = me;
804 }
805 if (match) {
806 Emit(opcode, g.DefineAsRegister(node),
807 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
808 g.TempImmediate(mask));
809 return;
810 }
811 }
812 }
813 }
814 VisitRRO(this, kS390_ShiftRight64, node, kShift64Imm);
815 }
816 #endif
817
VisitWord32Sar(Node * node)818 void InstructionSelector::VisitWord32Sar(Node* node) {
819 S390OperandGenerator g(this);
820 Int32BinopMatcher m(node);
821 // Replace with sign extension for (x << K) >> K where K is 16 or 24.
822 if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
823 Int32BinopMatcher mleft(m.left().node());
824 if (mleft.right().Is(16) && m.right().Is(16)) {
825 Emit(kS390_ExtendSignWord16, g.DefineAsRegister(node),
826 g.UseRegister(mleft.left().node()));
827 return;
828 } else if (mleft.right().Is(24) && m.right().Is(24)) {
829 Emit(kS390_ExtendSignWord8, g.DefineAsRegister(node),
830 g.UseRegister(mleft.left().node()));
831 return;
832 }
833 }
834 VisitRRO(this, kS390_ShiftRightArith32, node, kShift32Imm);
835 }
836
837 #if !V8_TARGET_ARCH_S390X
VisitPairBinop(InstructionSelector * selector,InstructionCode opcode,InstructionCode opcode2,Node * node)838 void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
839 InstructionCode opcode2, Node* node) {
840 S390OperandGenerator g(selector);
841
842 Node* projection1 = NodeProperties::FindProjection(node, 1);
843 if (projection1) {
844 // We use UseUniqueRegister here to avoid register sharing with the output
845 // registers.
846 InstructionOperand inputs[] = {
847 g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
848 g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
849
850 InstructionOperand outputs[] = {
851 g.DefineAsRegister(node),
852 g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
853
854 selector->Emit(opcode, 2, outputs, 4, inputs);
855 } else {
856 // The high word of the result is not used, so we emit the standard 32 bit
857 // instruction.
858 selector->Emit(opcode2, g.DefineSameAsFirst(node),
859 g.UseRegister(node->InputAt(0)),
860 g.UseRegister(node->InputAt(2)));
861 }
862 }
863
VisitInt32PairAdd(Node * node)864 void InstructionSelector::VisitInt32PairAdd(Node* node) {
865 VisitPairBinop(this, kS390_AddPair, kS390_Add32, node);
866 }
867
VisitInt32PairSub(Node * node)868 void InstructionSelector::VisitInt32PairSub(Node* node) {
869 VisitPairBinop(this, kS390_SubPair, kS390_Sub32, node);
870 }
871
VisitInt32PairMul(Node * node)872 void InstructionSelector::VisitInt32PairMul(Node* node) {
873 S390OperandGenerator g(this);
874 Node* projection1 = NodeProperties::FindProjection(node, 1);
875 if (projection1) {
876 InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
877 g.UseUniqueRegister(node->InputAt(1)),
878 g.UseUniqueRegister(node->InputAt(2)),
879 g.UseUniqueRegister(node->InputAt(3))};
880
881 InstructionOperand outputs[] = {
882 g.DefineAsRegister(node),
883 g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
884
885 Emit(kS390_MulPair, 2, outputs, 4, inputs);
886 } else {
887 // The high word of the result is not used, so we emit the standard 32 bit
888 // instruction.
889 Emit(kS390_Mul32, g.DefineSameAsFirst(node),
890 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(2)));
891 }
892 }
893
894 namespace {
895 // Shared routine for multiple shift operations.
VisitPairShift(InstructionSelector * selector,InstructionCode opcode,Node * node)896 void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
897 Node* node) {
898 S390OperandGenerator g(selector);
899 // We use g.UseUniqueRegister here to guarantee that there is
900 // no register aliasing of input registers with output registers.
901 Int32Matcher m(node->InputAt(2));
902 InstructionOperand shift_operand;
903 if (m.HasValue()) {
904 shift_operand = g.UseImmediate(m.node());
905 } else {
906 shift_operand = g.UseUniqueRegister(m.node());
907 }
908
909 InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
910 g.UseUniqueRegister(node->InputAt(1)),
911 shift_operand};
912
913 Node* projection1 = NodeProperties::FindProjection(node, 1);
914
915 InstructionOperand outputs[2];
916 InstructionOperand temps[1];
917 int32_t output_count = 0;
918 int32_t temp_count = 0;
919
920 outputs[output_count++] = g.DefineAsRegister(node);
921 if (projection1) {
922 outputs[output_count++] = g.DefineAsRegister(projection1);
923 } else {
924 temps[temp_count++] = g.TempRegister();
925 }
926
927 selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
928 }
929 } // namespace
930
VisitWord32PairShl(Node * node)931 void InstructionSelector::VisitWord32PairShl(Node* node) {
932 VisitPairShift(this, kS390_ShiftLeftPair, node);
933 }
934
VisitWord32PairShr(Node * node)935 void InstructionSelector::VisitWord32PairShr(Node* node) {
936 VisitPairShift(this, kS390_ShiftRightPair, node);
937 }
938
VisitWord32PairSar(Node * node)939 void InstructionSelector::VisitWord32PairSar(Node* node) {
940 VisitPairShift(this, kS390_ShiftRightArithPair, node);
941 }
942 #endif
943
944 #if V8_TARGET_ARCH_S390X
VisitWord64Sar(Node * node)945 void InstructionSelector::VisitWord64Sar(Node* node) {
946 VisitRRO(this, kS390_ShiftRightArith64, node, kShift64Imm);
947 }
948 #endif
949
VisitWord32Ror(Node * node)950 void InstructionSelector::VisitWord32Ror(Node* node) {
951 VisitRRO(this, kS390_RotRight32, node, kShift32Imm);
952 }
953
954 #if V8_TARGET_ARCH_S390X
VisitWord64Ror(Node * node)955 void InstructionSelector::VisitWord64Ror(Node* node) {
956 VisitRRO(this, kS390_RotRight64, node, kShift64Imm);
957 }
958 #endif
959
VisitWord32Clz(Node * node)960 void InstructionSelector::VisitWord32Clz(Node* node) {
961 S390OperandGenerator g(this);
962 Emit(kS390_Cntlz32, g.DefineAsRegister(node),
963 g.UseRegister(node->InputAt(0)));
964 }
965
966 #if V8_TARGET_ARCH_S390X
VisitWord64Clz(Node * node)967 void InstructionSelector::VisitWord64Clz(Node* node) {
968 S390OperandGenerator g(this);
969 Emit(kS390_Cntlz64, g.DefineAsRegister(node),
970 g.UseRegister(node->InputAt(0)));
971 }
972 #endif
973
VisitWord32Popcnt(Node * node)974 void InstructionSelector::VisitWord32Popcnt(Node* node) {
975 S390OperandGenerator g(this);
976 Emit(kS390_Popcnt32, g.DefineAsRegister(node),
977 g.UseRegister(node->InputAt(0)));
978 }
979
980 #if V8_TARGET_ARCH_S390X
VisitWord64Popcnt(Node * node)981 void InstructionSelector::VisitWord64Popcnt(Node* node) {
982 S390OperandGenerator g(this);
983 Emit(kS390_Popcnt64, g.DefineAsRegister(node),
984 g.UseRegister(node->InputAt(0)));
985 }
986 #endif
987
VisitWord32Ctz(Node * node)988 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
989
990 #if V8_TARGET_ARCH_S390X
VisitWord64Ctz(Node * node)991 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
992 #endif
993
VisitWord32ReverseBits(Node * node)994 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
995
996 #if V8_TARGET_ARCH_S390X
VisitWord64ReverseBits(Node * node)997 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
998 #endif
999
VisitWord64ReverseBytes(Node * node)1000 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
1001 S390OperandGenerator g(this);
1002 Emit(kS390_LoadReverse64RR, g.DefineAsRegister(node),
1003 g.UseRegister(node->InputAt(0)));
1004 }
1005
VisitWord32ReverseBytes(Node * node)1006 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
1007 S390OperandGenerator g(this);
1008 NodeMatcher input(node->InputAt(0));
1009 if (CanCover(node, input.node()) && input.IsLoad()) {
1010 LoadRepresentation load_rep = LoadRepresentationOf(input.node()->op());
1011 if (load_rep.representation() == MachineRepresentation::kWord32) {
1012 Node* base = input.node()->InputAt(0);
1013 Node* offset = input.node()->InputAt(1);
1014 Emit(kS390_LoadReverse32 | AddressingModeField::encode(kMode_MRR),
1015 // TODO(john.yan): one of the base and offset can be imm.
1016 g.DefineAsRegister(node), g.UseRegister(base),
1017 g.UseRegister(offset));
1018 return;
1019 }
1020 }
1021 Emit(kS390_LoadReverse32RR, g.DefineAsRegister(node),
1022 g.UseRegister(node->InputAt(0)));
1023 }
1024
VisitInt32Add(Node * node)1025 void InstructionSelector::VisitInt32Add(Node* node) {
1026 VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm);
1027 }
1028
1029 #if V8_TARGET_ARCH_S390X
VisitInt64Add(Node * node)1030 void InstructionSelector::VisitInt64Add(Node* node) {
1031 VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm);
1032 }
1033 #endif
1034
VisitInt32Sub(Node * node)1035 void InstructionSelector::VisitInt32Sub(Node* node) {
1036 S390OperandGenerator g(this);
1037 Int32BinopMatcher m(node);
1038 if (m.left().Is(0)) {
1039 Emit(kS390_Neg32, g.DefineAsRegister(node),
1040 g.UseRegister(m.right().node()));
1041 } else {
1042 VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32, kInt32Imm_Negate);
1043 }
1044 }
1045
1046 #if V8_TARGET_ARCH_S390X
VisitInt64Sub(Node * node)1047 void InstructionSelector::VisitInt64Sub(Node* node) {
1048 S390OperandGenerator g(this);
1049 Int64BinopMatcher m(node);
1050 if (m.left().Is(0)) {
1051 Emit(kS390_Neg64, g.DefineAsRegister(node),
1052 g.UseRegister(m.right().node()));
1053 } else {
1054 VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64, kInt32Imm_Negate);
1055 }
1056 }
1057 #endif
1058
1059 namespace {
1060
1061 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1062 InstructionOperand left, InstructionOperand right,
1063 FlagsContinuation* cont);
EmitInt32MulWithOverflow(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1064 void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
1065 FlagsContinuation* cont) {
1066 S390OperandGenerator g(selector);
1067 Int32BinopMatcher m(node);
1068 InstructionOperand result_operand = g.DefineAsRegister(node);
1069 InstructionOperand high32_operand = g.TempRegister();
1070 InstructionOperand temp_operand = g.TempRegister();
1071 {
1072 InstructionOperand outputs[] = {result_operand, high32_operand};
1073 InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
1074 g.UseRegister(m.right().node())};
1075 selector->Emit(kS390_Mul32WithHigh32, 2, outputs, 2, inputs);
1076 }
1077 {
1078 InstructionOperand shift_31 = g.UseImmediate(31);
1079 InstructionOperand outputs[] = {temp_operand};
1080 InstructionOperand inputs[] = {result_operand, shift_31};
1081 selector->Emit(kS390_ShiftRightArith32, 1, outputs, 2, inputs);
1082 }
1083
1084 VisitCompare(selector, kS390_Cmp32, high32_operand, temp_operand, cont);
1085 }
1086
VisitMul(InstructionSelector * selector,Node * node,ArchOpcode opcode)1087 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
1088 S390OperandGenerator g(selector);
1089 Int32BinopMatcher m(node);
1090 Node* left = m.left().node();
1091 Node* right = m.right().node();
1092 if (g.CanBeImmediate(right, kInt32Imm)) {
1093 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
1094 g.UseImmediate(right));
1095 } else {
1096 if (g.CanBeBetterLeftOperand(right)) {
1097 std::swap(left, right);
1098 }
1099 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
1100 g.Use(right));
1101 }
1102 }
1103
1104 } // namespace
1105
VisitInt32MulWithOverflow(Node * node)1106 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
1107 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1108 FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
1109 return EmitInt32MulWithOverflow(this, node, &cont);
1110 }
1111 VisitMul(this, node, kS390_Mul32);
1112 // FlagsContinuation cont;
1113 // EmitInt32MulWithOverflow(this, node, &cont);
1114 }
1115
VisitInt32Mul(Node * node)1116 void InstructionSelector::VisitInt32Mul(Node* node) {
1117 S390OperandGenerator g(this);
1118 Int32BinopMatcher m(node);
1119 Node* left = m.left().node();
1120 Node* right = m.right().node();
1121 if (g.CanBeImmediate(right, kInt32Imm) &&
1122 base::bits::IsPowerOfTwo32(g.GetImmediate(right))) {
1123 int power = 31 - base::bits::CountLeadingZeros32(g.GetImmediate(right));
1124 Emit(kS390_ShiftLeft32, g.DefineSameAsFirst(node), g.UseRegister(left),
1125 g.UseImmediate(power));
1126 return;
1127 }
1128 VisitMul(this, node, kS390_Mul32);
1129 }
1130
1131 #if V8_TARGET_ARCH_S390X
VisitInt64Mul(Node * node)1132 void InstructionSelector::VisitInt64Mul(Node* node) {
1133 S390OperandGenerator g(this);
1134 Int64BinopMatcher m(node);
1135 Node* left = m.left().node();
1136 Node* right = m.right().node();
1137 if (g.CanBeImmediate(right, kInt32Imm) &&
1138 base::bits::IsPowerOfTwo64(g.GetImmediate(right))) {
1139 int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
1140 Emit(kS390_ShiftLeft64, g.DefineSameAsFirst(node), g.UseRegister(left),
1141 g.UseImmediate(power));
1142 return;
1143 }
1144 VisitMul(this, node, kS390_Mul64);
1145 }
1146 #endif
1147
VisitInt32MulHigh(Node * node)1148 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1149 S390OperandGenerator g(this);
1150 Int32BinopMatcher m(node);
1151 Node* left = m.left().node();
1152 Node* right = m.right().node();
1153 if (g.CanBeBetterLeftOperand(right)) {
1154 std::swap(left, right);
1155 }
1156 Emit(kS390_MulHigh32, g.DefineAsRegister(node), g.UseRegister(left),
1157 g.Use(right));
1158 }
1159
VisitUint32MulHigh(Node * node)1160 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1161 S390OperandGenerator g(this);
1162 Int32BinopMatcher m(node);
1163 Node* left = m.left().node();
1164 Node* right = m.right().node();
1165 if (g.CanBeBetterLeftOperand(right)) {
1166 std::swap(left, right);
1167 }
1168 Emit(kS390_MulHighU32, g.DefineAsRegister(node), g.UseRegister(left),
1169 g.Use(right));
1170 }
1171
VisitInt32Div(Node * node)1172 void InstructionSelector::VisitInt32Div(Node* node) {
1173 VisitRRR(this, kS390_Div32, node);
1174 }
1175
1176 #if V8_TARGET_ARCH_S390X
VisitInt64Div(Node * node)1177 void InstructionSelector::VisitInt64Div(Node* node) {
1178 VisitRRR(this, kS390_Div64, node);
1179 }
1180 #endif
1181
VisitUint32Div(Node * node)1182 void InstructionSelector::VisitUint32Div(Node* node) {
1183 VisitRRR(this, kS390_DivU32, node);
1184 }
1185
1186 #if V8_TARGET_ARCH_S390X
VisitUint64Div(Node * node)1187 void InstructionSelector::VisitUint64Div(Node* node) {
1188 VisitRRR(this, kS390_DivU64, node);
1189 }
1190 #endif
1191
VisitInt32Mod(Node * node)1192 void InstructionSelector::VisitInt32Mod(Node* node) {
1193 VisitRRR(this, kS390_Mod32, node);
1194 }
1195
1196 #if V8_TARGET_ARCH_S390X
VisitInt64Mod(Node * node)1197 void InstructionSelector::VisitInt64Mod(Node* node) {
1198 VisitRRR(this, kS390_Mod64, node);
1199 }
1200 #endif
1201
VisitUint32Mod(Node * node)1202 void InstructionSelector::VisitUint32Mod(Node* node) {
1203 VisitRRR(this, kS390_ModU32, node);
1204 }
1205
1206 #if V8_TARGET_ARCH_S390X
VisitUint64Mod(Node * node)1207 void InstructionSelector::VisitUint64Mod(Node* node) {
1208 VisitRRR(this, kS390_ModU64, node);
1209 }
1210 #endif
1211
VisitChangeFloat32ToFloat64(Node * node)1212 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1213 VisitRR(this, kS390_Float32ToDouble, node);
1214 }
1215
VisitRoundInt32ToFloat32(Node * node)1216 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1217 VisitRR(this, kS390_Int32ToFloat32, node);
1218 }
1219
VisitRoundUint32ToFloat32(Node * node)1220 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1221 VisitRR(this, kS390_Uint32ToFloat32, node);
1222 }
1223
VisitChangeInt32ToFloat64(Node * node)1224 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1225 VisitRR(this, kS390_Int32ToDouble, node);
1226 }
1227
VisitChangeUint32ToFloat64(Node * node)1228 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1229 VisitRR(this, kS390_Uint32ToDouble, node);
1230 }
1231
VisitChangeFloat64ToInt32(Node * node)1232 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1233 VisitRR(this, kS390_DoubleToInt32, node);
1234 }
1235
VisitChangeFloat64ToUint32(Node * node)1236 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1237 VisitRR(this, kS390_DoubleToUint32, node);
1238 }
1239
VisitTruncateFloat64ToUint32(Node * node)1240 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1241 VisitRR(this, kS390_DoubleToUint32, node);
1242 }
1243
1244 #if V8_TARGET_ARCH_S390X
VisitTryTruncateFloat32ToInt64(Node * node)1245 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1246 VisitTryTruncateDouble(this, kS390_Float32ToInt64, node);
1247 }
1248
VisitTryTruncateFloat64ToInt64(Node * node)1249 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1250 VisitTryTruncateDouble(this, kS390_DoubleToInt64, node);
1251 }
1252
VisitTryTruncateFloat32ToUint64(Node * node)1253 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1254 VisitTryTruncateDouble(this, kS390_Float32ToUint64, node);
1255 }
1256
VisitTryTruncateFloat64ToUint64(Node * node)1257 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1258 VisitTryTruncateDouble(this, kS390_DoubleToUint64, node);
1259 }
1260
VisitChangeInt32ToInt64(Node * node)1261 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1262 // TODO(mbrandy): inspect input to see if nop is appropriate.
1263 VisitRR(this, kS390_ExtendSignWord32, node);
1264 }
1265
VisitChangeUint32ToUint64(Node * node)1266 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1267 // TODO(mbrandy): inspect input to see if nop is appropriate.
1268 VisitRR(this, kS390_Uint32ToUint64, node);
1269 }
1270 #endif
1271
VisitTruncateFloat64ToFloat32(Node * node)1272 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1273 VisitRR(this, kS390_DoubleToFloat32, node);
1274 }
1275
VisitTruncateFloat64ToWord32(Node * node)1276 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1277 VisitRR(this, kArchTruncateDoubleToI, node);
1278 }
1279
VisitRoundFloat64ToInt32(Node * node)1280 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1281 VisitRR(this, kS390_DoubleToInt32, node);
1282 }
1283
VisitTruncateFloat32ToInt32(Node * node)1284 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1285 VisitRR(this, kS390_Float32ToInt32, node);
1286 }
1287
VisitTruncateFloat32ToUint32(Node * node)1288 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1289 VisitRR(this, kS390_Float32ToUint32, node);
1290 }
1291
1292 #if V8_TARGET_ARCH_S390X
VisitTruncateInt64ToInt32(Node * node)1293 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1294 // TODO(mbrandy): inspect input to see if nop is appropriate.
1295 VisitRR(this, kS390_Int64ToInt32, node);
1296 }
1297
VisitRoundInt64ToFloat32(Node * node)1298 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1299 VisitRR(this, kS390_Int64ToFloat32, node);
1300 }
1301
VisitRoundInt64ToFloat64(Node * node)1302 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1303 VisitRR(this, kS390_Int64ToDouble, node);
1304 }
1305
VisitRoundUint64ToFloat32(Node * node)1306 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1307 VisitRR(this, kS390_Uint64ToFloat32, node);
1308 }
1309
VisitRoundUint64ToFloat64(Node * node)1310 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1311 VisitRR(this, kS390_Uint64ToDouble, node);
1312 }
1313 #endif
1314
VisitBitcastFloat32ToInt32(Node * node)1315 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1316 VisitRR(this, kS390_BitcastFloat32ToInt32, node);
1317 }
1318
1319 #if V8_TARGET_ARCH_S390X
VisitBitcastFloat64ToInt64(Node * node)1320 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1321 VisitRR(this, kS390_BitcastDoubleToInt64, node);
1322 }
1323 #endif
1324
VisitBitcastInt32ToFloat32(Node * node)1325 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1326 VisitRR(this, kS390_BitcastInt32ToFloat32, node);
1327 }
1328
1329 #if V8_TARGET_ARCH_S390X
VisitBitcastInt64ToFloat64(Node * node)1330 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1331 VisitRR(this, kS390_BitcastInt64ToDouble, node);
1332 }
1333 #endif
1334
VisitFloat32Add(Node * node)1335 void InstructionSelector::VisitFloat32Add(Node* node) {
1336 VisitRRR(this, kS390_AddFloat, node);
1337 }
1338
VisitFloat64Add(Node * node)1339 void InstructionSelector::VisitFloat64Add(Node* node) {
1340 // TODO(mbrandy): detect multiply-add
1341 VisitRRR(this, kS390_AddDouble, node);
1342 }
1343
VisitFloat32Sub(Node * node)1344 void InstructionSelector::VisitFloat32Sub(Node* node) {
1345 VisitRRR(this, kS390_SubFloat, node);
1346 }
1347
VisitFloat64Sub(Node * node)1348 void InstructionSelector::VisitFloat64Sub(Node* node) {
1349 // TODO(mbrandy): detect multiply-subtract
1350 VisitRRR(this, kS390_SubDouble, node);
1351 }
1352
VisitFloat32Mul(Node * node)1353 void InstructionSelector::VisitFloat32Mul(Node* node) {
1354 VisitRRR(this, kS390_MulFloat, node);
1355 }
1356
VisitFloat64Mul(Node * node)1357 void InstructionSelector::VisitFloat64Mul(Node* node) {
1358 // TODO(mbrandy): detect negate
1359 VisitRRR(this, kS390_MulDouble, node);
1360 }
1361
VisitFloat32Div(Node * node)1362 void InstructionSelector::VisitFloat32Div(Node* node) {
1363 VisitRRR(this, kS390_DivFloat, node);
1364 }
1365
VisitFloat64Div(Node * node)1366 void InstructionSelector::VisitFloat64Div(Node* node) {
1367 VisitRRR(this, kS390_DivDouble, node);
1368 }
1369
VisitFloat64Mod(Node * node)1370 void InstructionSelector::VisitFloat64Mod(Node* node) {
1371 S390OperandGenerator g(this);
1372 Emit(kS390_ModDouble, g.DefineAsFixed(node, d1),
1373 g.UseFixed(node->InputAt(0), d1), g.UseFixed(node->InputAt(1), d2))
1374 ->MarkAsCall();
1375 }
1376
VisitFloat32Max(Node * node)1377 void InstructionSelector::VisitFloat32Max(Node* node) {
1378 VisitRRR(this, kS390_MaxFloat, node);
1379 }
1380
VisitFloat64Max(Node * node)1381 void InstructionSelector::VisitFloat64Max(Node* node) {
1382 VisitRRR(this, kS390_MaxDouble, node);
1383 }
1384
VisitFloat64SilenceNaN(Node * node)1385 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
1386 VisitRR(this, kS390_Float64SilenceNaN, node);
1387 }
1388
VisitFloat32Min(Node * node)1389 void InstructionSelector::VisitFloat32Min(Node* node) {
1390 VisitRRR(this, kS390_MinFloat, node);
1391 }
1392
VisitFloat64Min(Node * node)1393 void InstructionSelector::VisitFloat64Min(Node* node) {
1394 VisitRRR(this, kS390_MinDouble, node);
1395 }
1396
VisitFloat32Abs(Node * node)1397 void InstructionSelector::VisitFloat32Abs(Node* node) {
1398 VisitRR(this, kS390_AbsFloat, node);
1399 }
1400
VisitFloat64Abs(Node * node)1401 void InstructionSelector::VisitFloat64Abs(Node* node) {
1402 VisitRR(this, kS390_AbsDouble, node);
1403 }
1404
VisitFloat32Sqrt(Node * node)1405 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1406 VisitRR(this, kS390_SqrtFloat, node);
1407 }
1408
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1409 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1410 InstructionCode opcode) {
1411 S390OperandGenerator g(this);
1412 Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1))
1413 ->MarkAsCall();
1414 }
1415
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1416 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1417 InstructionCode opcode) {
1418 S390OperandGenerator g(this);
1419 Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1),
1420 g.UseFixed(node->InputAt(1), d2))
1421 ->MarkAsCall();
1422 }
1423
VisitFloat64Sqrt(Node * node)1424 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1425 VisitRR(this, kS390_SqrtDouble, node);
1426 }
1427
VisitFloat32RoundDown(Node * node)1428 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1429 VisitRR(this, kS390_FloorFloat, node);
1430 }
1431
VisitFloat64RoundDown(Node * node)1432 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1433 VisitRR(this, kS390_FloorDouble, node);
1434 }
1435
VisitFloat32RoundUp(Node * node)1436 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1437 VisitRR(this, kS390_CeilFloat, node);
1438 }
1439
VisitFloat64RoundUp(Node * node)1440 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1441 VisitRR(this, kS390_CeilDouble, node);
1442 }
1443
VisitFloat32RoundTruncate(Node * node)1444 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1445 VisitRR(this, kS390_TruncateFloat, node);
1446 }
1447
VisitFloat64RoundTruncate(Node * node)1448 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1449 VisitRR(this, kS390_TruncateDouble, node);
1450 }
1451
VisitFloat64RoundTiesAway(Node * node)1452 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1453 VisitRR(this, kS390_RoundDouble, node);
1454 }
1455
VisitFloat32RoundTiesEven(Node * node)1456 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1457 UNREACHABLE();
1458 }
1459
VisitFloat64RoundTiesEven(Node * node)1460 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1461 UNREACHABLE();
1462 }
1463
VisitFloat32Neg(Node * node)1464 void InstructionSelector::VisitFloat32Neg(Node* node) {
1465 VisitRR(this, kS390_NegFloat, node);
1466 }
1467
VisitFloat64Neg(Node * node)1468 void InstructionSelector::VisitFloat64Neg(Node* node) {
1469 VisitRR(this, kS390_NegDouble, node);
1470 }
1471
VisitInt32AddWithOverflow(Node * node)1472 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1473 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1474 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1475 return VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm,
1476 &cont);
1477 }
1478 FlagsContinuation cont;
1479 VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm, &cont);
1480 }
1481
VisitInt32SubWithOverflow(Node * node)1482 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1483 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1484 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1485 return VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32,
1486 kInt32Imm_Negate, &cont);
1487 }
1488 FlagsContinuation cont;
1489 VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32, kInt32Imm_Negate,
1490 &cont);
1491 }
1492
1493 #if V8_TARGET_ARCH_S390X
VisitInt64AddWithOverflow(Node * node)1494 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
1495 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1496 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1497 return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm,
1498 &cont);
1499 }
1500 FlagsContinuation cont;
1501 VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm, &cont);
1502 }
1503
VisitInt64SubWithOverflow(Node * node)1504 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
1505 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1506 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1507 return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
1508 kInt32Imm_Negate, &cont);
1509 }
1510 FlagsContinuation cont;
1511 VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64, kInt32Imm_Negate,
1512 &cont);
1513 }
1514 #endif
1515
CompareLogical(FlagsContinuation * cont)1516 static bool CompareLogical(FlagsContinuation* cont) {
1517 switch (cont->condition()) {
1518 case kUnsignedLessThan:
1519 case kUnsignedGreaterThanOrEqual:
1520 case kUnsignedLessThanOrEqual:
1521 case kUnsignedGreaterThan:
1522 return true;
1523 default:
1524 return false;
1525 }
1526 UNREACHABLE();
1527 return false;
1528 }
1529
1530 namespace {
1531
1532 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1533 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1534 InstructionOperand left, InstructionOperand right,
1535 FlagsContinuation* cont) {
1536 S390OperandGenerator g(selector);
1537 opcode = cont->Encode(opcode);
1538 if (cont->IsBranch()) {
1539 selector->Emit(opcode, g.NoOutput(), left, right,
1540 g.Label(cont->true_block()), g.Label(cont->false_block()));
1541 } else if (cont->IsDeoptimize()) {
1542 selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
1543 cont->frame_state());
1544 } else {
1545 DCHECK(cont->IsSet());
1546 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1547 }
1548 }
1549
1550 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative,ImmediateMode immediate_mode)1551 void VisitWordCompare(InstructionSelector* selector, Node* node,
1552 InstructionCode opcode, FlagsContinuation* cont,
1553 bool commutative, ImmediateMode immediate_mode) {
1554 S390OperandGenerator g(selector);
1555 Node* left = node->InputAt(0);
1556 Node* right = node->InputAt(1);
1557
1558 // Match immediates on left or right side of comparison.
1559 if (g.CanBeImmediate(right, immediate_mode)) {
1560 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1561 cont);
1562 } else if (g.CanBeImmediate(left, immediate_mode)) {
1563 if (!commutative) cont->Commute();
1564 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1565 cont);
1566 } else {
1567 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1568 cont);
1569 }
1570 }
1571
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1572 void VisitWord32Compare(InstructionSelector* selector, Node* node,
1573 FlagsContinuation* cont) {
1574 ImmediateMode mode = (CompareLogical(cont) ? kUint32Imm : kInt32Imm);
1575 VisitWordCompare(selector, node, kS390_Cmp32, cont, false, mode);
1576 }
1577
1578 #if V8_TARGET_ARCH_S390X
VisitWord64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1579 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1580 FlagsContinuation* cont) {
1581 ImmediateMode mode = (CompareLogical(cont) ? kUint32Imm : kUint32Imm);
1582 VisitWordCompare(selector, node, kS390_Cmp64, cont, false, mode);
1583 }
1584 #endif
1585
1586 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1587 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1588 FlagsContinuation* cont) {
1589 S390OperandGenerator g(selector);
1590 Node* left = node->InputAt(0);
1591 Node* right = node->InputAt(1);
1592 VisitCompare(selector, kS390_CmpFloat, g.UseRegister(left),
1593 g.UseRegister(right), cont);
1594 }
1595
1596 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1597 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1598 FlagsContinuation* cont) {
1599 S390OperandGenerator g(selector);
1600 Node* left = node->InputAt(0);
1601 Node* right = node->InputAt(1);
1602 VisitCompare(selector, kS390_CmpDouble, g.UseRegister(left),
1603 g.UseRegister(right), cont);
1604 }
1605
1606 // Shared routine for word comparisons against zero.
VisitWordCompareZero(InstructionSelector * selector,Node * user,Node * value,InstructionCode opcode,FlagsContinuation * cont)1607 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1608 Node* value, InstructionCode opcode,
1609 FlagsContinuation* cont) {
1610 // Try to combine with comparisons against 0 by simply inverting the branch.
1611 while (value->opcode() == IrOpcode::kWord32Equal &&
1612 selector->CanCover(user, value)) {
1613 Int32BinopMatcher m(value);
1614 if (!m.right().Is(0)) break;
1615
1616 user = value;
1617 value = m.left().node();
1618 cont->Negate();
1619 }
1620
1621 if (selector->CanCover(user, value)) {
1622 switch (value->opcode()) {
1623 case IrOpcode::kWord32Equal:
1624 cont->OverwriteAndNegateIfEqual(kEqual);
1625 return VisitWord32Compare(selector, value, cont);
1626 case IrOpcode::kInt32LessThan:
1627 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1628 return VisitWord32Compare(selector, value, cont);
1629 case IrOpcode::kInt32LessThanOrEqual:
1630 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1631 return VisitWord32Compare(selector, value, cont);
1632 case IrOpcode::kUint32LessThan:
1633 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1634 return VisitWord32Compare(selector, value, cont);
1635 case IrOpcode::kUint32LessThanOrEqual:
1636 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1637 return VisitWord32Compare(selector, value, cont);
1638 #if V8_TARGET_ARCH_S390X
1639 case IrOpcode::kWord64Equal:
1640 cont->OverwriteAndNegateIfEqual(kEqual);
1641 return VisitWord64Compare(selector, value, cont);
1642 case IrOpcode::kInt64LessThan:
1643 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1644 return VisitWord64Compare(selector, value, cont);
1645 case IrOpcode::kInt64LessThanOrEqual:
1646 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1647 return VisitWord64Compare(selector, value, cont);
1648 case IrOpcode::kUint64LessThan:
1649 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1650 return VisitWord64Compare(selector, value, cont);
1651 case IrOpcode::kUint64LessThanOrEqual:
1652 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1653 return VisitWord64Compare(selector, value, cont);
1654 #endif
1655 case IrOpcode::kFloat32Equal:
1656 cont->OverwriteAndNegateIfEqual(kEqual);
1657 return VisitFloat32Compare(selector, value, cont);
1658 case IrOpcode::kFloat32LessThan:
1659 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1660 return VisitFloat32Compare(selector, value, cont);
1661 case IrOpcode::kFloat32LessThanOrEqual:
1662 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1663 return VisitFloat32Compare(selector, value, cont);
1664 case IrOpcode::kFloat64Equal:
1665 cont->OverwriteAndNegateIfEqual(kEqual);
1666 return VisitFloat64Compare(selector, value, cont);
1667 case IrOpcode::kFloat64LessThan:
1668 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1669 return VisitFloat64Compare(selector, value, cont);
1670 case IrOpcode::kFloat64LessThanOrEqual:
1671 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1672 return VisitFloat64Compare(selector, value, cont);
1673 case IrOpcode::kProjection:
1674 // Check if this is the overflow output projection of an
1675 // <Operation>WithOverflow node.
1676 if (ProjectionIndexOf(value->op()) == 1u) {
1677 // We cannot combine the <Operation>WithOverflow with this branch
1678 // unless the 0th projection (the use of the actual value of the
1679 // <Operation> is either nullptr, which means there's no use of the
1680 // actual value, or was already defined, which means it is scheduled
1681 // *AFTER* this branch).
1682 Node* const node = value->InputAt(0);
1683 Node* const result = NodeProperties::FindProjection(node, 0);
1684 if (result == nullptr || selector->IsDefined(result)) {
1685 switch (node->opcode()) {
1686 case IrOpcode::kInt32AddWithOverflow:
1687 cont->OverwriteAndNegateIfEqual(kOverflow);
1688 return VisitBinop<Int32BinopMatcher>(
1689 selector, node, kS390_Add32, kInt32Imm, cont);
1690 case IrOpcode::kInt32SubWithOverflow:
1691 cont->OverwriteAndNegateIfEqual(kOverflow);
1692 return VisitBinop<Int32BinopMatcher>(
1693 selector, node, kS390_Sub32, kInt32Imm_Negate, cont);
1694 case IrOpcode::kInt32MulWithOverflow:
1695 cont->OverwriteAndNegateIfEqual(kNotEqual);
1696 return EmitInt32MulWithOverflow(selector, node, cont);
1697 #if V8_TARGET_ARCH_S390X
1698 case IrOpcode::kInt64AddWithOverflow:
1699 cont->OverwriteAndNegateIfEqual(kOverflow);
1700 return VisitBinop<Int64BinopMatcher>(
1701 selector, node, kS390_Add64, kInt32Imm, cont);
1702 case IrOpcode::kInt64SubWithOverflow:
1703 cont->OverwriteAndNegateIfEqual(kOverflow);
1704 return VisitBinop<Int64BinopMatcher>(
1705 selector, node, kS390_Sub64, kInt32Imm_Negate, cont);
1706 #endif
1707 default:
1708 break;
1709 }
1710 }
1711 }
1712 break;
1713 case IrOpcode::kInt32Sub:
1714 return VisitWord32Compare(selector, value, cont);
1715 case IrOpcode::kWord32And:
1716 return VisitWordCompare(selector, value, kS390_Tst32, cont, true,
1717 kUint32Imm);
1718 // TODO(mbrandy): Handle?
1719 // case IrOpcode::kInt32Add:
1720 // case IrOpcode::kWord32Or:
1721 // case IrOpcode::kWord32Xor:
1722 // case IrOpcode::kWord32Sar:
1723 // case IrOpcode::kWord32Shl:
1724 // case IrOpcode::kWord32Shr:
1725 // case IrOpcode::kWord32Ror:
1726 #if V8_TARGET_ARCH_S390X
1727 case IrOpcode::kInt64Sub:
1728 return VisitWord64Compare(selector, value, cont);
1729 case IrOpcode::kWord64And:
1730 return VisitWordCompare(selector, value, kS390_Tst64, cont, true,
1731 kUint32Imm);
1732 // TODO(mbrandy): Handle?
1733 // case IrOpcode::kInt64Add:
1734 // case IrOpcode::kWord64Or:
1735 // case IrOpcode::kWord64Xor:
1736 // case IrOpcode::kWord64Sar:
1737 // case IrOpcode::kWord64Shl:
1738 // case IrOpcode::kWord64Shr:
1739 // case IrOpcode::kWord64Ror:
1740 #endif
1741 default:
1742 break;
1743 }
1744 }
1745
1746 // Branch could not be combined with a compare, emit compare against 0.
1747 S390OperandGenerator g(selector);
1748 VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
1749 cont);
1750 }
1751
VisitWord32CompareZero(InstructionSelector * selector,Node * user,Node * value,FlagsContinuation * cont)1752 void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
1753 Node* value, FlagsContinuation* cont) {
1754 VisitWordCompareZero(selector, user, value, kS390_Cmp32, cont);
1755 }
1756
1757 #if V8_TARGET_ARCH_S390X
VisitWord64CompareZero(InstructionSelector * selector,Node * user,Node * value,FlagsContinuation * cont)1758 void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
1759 Node* value, FlagsContinuation* cont) {
1760 VisitWordCompareZero(selector, user, value, kS390_Cmp64, cont);
1761 }
1762 #endif
1763
1764 } // namespace
1765
VisitBranch(Node * branch,BasicBlock * tbranch,BasicBlock * fbranch)1766 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1767 BasicBlock* fbranch) {
1768 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1769 VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
1770 }
1771
VisitDeoptimizeIf(Node * node)1772 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
1773 FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
1774 kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
1775 VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
1776 }
1777
VisitDeoptimizeUnless(Node * node)1778 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
1779 FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
1780 kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
1781 VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
1782 }
1783
VisitSwitch(Node * node,const SwitchInfo & sw)1784 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1785 S390OperandGenerator g(this);
1786 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1787
1788 // Emit either ArchTableSwitch or ArchLookupSwitch.
1789 size_t table_space_cost = 4 + sw.value_range;
1790 size_t table_time_cost = 3;
1791 size_t lookup_space_cost = 3 + 2 * sw.case_count;
1792 size_t lookup_time_cost = sw.case_count;
1793 if (sw.case_count > 0 &&
1794 table_space_cost + 3 * table_time_cost <=
1795 lookup_space_cost + 3 * lookup_time_cost &&
1796 sw.min_value > std::numeric_limits<int32_t>::min()) {
1797 InstructionOperand index_operand = value_operand;
1798 if (sw.min_value) {
1799 index_operand = g.TempRegister();
1800 Emit(kS390_Sub32, index_operand, value_operand,
1801 g.TempImmediate(sw.min_value));
1802 }
1803 // Generate a table lookup.
1804 return EmitTableSwitch(sw, index_operand);
1805 }
1806
1807 // Generate a sequence of conditional jumps.
1808 return EmitLookupSwitch(sw, value_operand);
1809 }
1810
VisitWord32Equal(Node * const node)1811 void InstructionSelector::VisitWord32Equal(Node* const node) {
1812 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1813 Int32BinopMatcher m(node);
1814 if (m.right().Is(0)) {
1815 return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
1816 }
1817 VisitWord32Compare(this, node, &cont);
1818 }
1819
VisitInt32LessThan(Node * node)1820 void InstructionSelector::VisitInt32LessThan(Node* node) {
1821 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1822 VisitWord32Compare(this, node, &cont);
1823 }
1824
VisitInt32LessThanOrEqual(Node * node)1825 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1826 FlagsContinuation cont =
1827 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1828 VisitWord32Compare(this, node, &cont);
1829 }
1830
VisitUint32LessThan(Node * node)1831 void InstructionSelector::VisitUint32LessThan(Node* node) {
1832 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1833 VisitWord32Compare(this, node, &cont);
1834 }
1835
VisitUint32LessThanOrEqual(Node * node)1836 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1837 FlagsContinuation cont =
1838 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1839 VisitWord32Compare(this, node, &cont);
1840 }
1841
1842 #if V8_TARGET_ARCH_S390X
VisitWord64Equal(Node * const node)1843 void InstructionSelector::VisitWord64Equal(Node* const node) {
1844 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1845 Int64BinopMatcher m(node);
1846 if (m.right().Is(0)) {
1847 return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
1848 }
1849 VisitWord64Compare(this, node, &cont);
1850 }
1851
VisitInt64LessThan(Node * node)1852 void InstructionSelector::VisitInt64LessThan(Node* node) {
1853 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1854 VisitWord64Compare(this, node, &cont);
1855 }
1856
VisitInt64LessThanOrEqual(Node * node)1857 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1858 FlagsContinuation cont =
1859 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1860 VisitWord64Compare(this, node, &cont);
1861 }
1862
VisitUint64LessThan(Node * node)1863 void InstructionSelector::VisitUint64LessThan(Node* node) {
1864 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1865 VisitWord64Compare(this, node, &cont);
1866 }
1867
VisitUint64LessThanOrEqual(Node * node)1868 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
1869 FlagsContinuation cont =
1870 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1871 VisitWord64Compare(this, node, &cont);
1872 }
1873 #endif
1874
VisitFloat32Equal(Node * node)1875 void InstructionSelector::VisitFloat32Equal(Node* node) {
1876 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1877 VisitFloat32Compare(this, node, &cont);
1878 }
1879
VisitFloat32LessThan(Node * node)1880 void InstructionSelector::VisitFloat32LessThan(Node* node) {
1881 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1882 VisitFloat32Compare(this, node, &cont);
1883 }
1884
VisitFloat32LessThanOrEqual(Node * node)1885 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1886 FlagsContinuation cont =
1887 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1888 VisitFloat32Compare(this, node, &cont);
1889 }
1890
VisitFloat64Equal(Node * node)1891 void InstructionSelector::VisitFloat64Equal(Node* node) {
1892 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1893 VisitFloat64Compare(this, node, &cont);
1894 }
1895
VisitFloat64LessThan(Node * node)1896 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1897 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1898 VisitFloat64Compare(this, node, &cont);
1899 }
1900
VisitFloat64LessThanOrEqual(Node * node)1901 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1902 FlagsContinuation cont =
1903 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1904 VisitFloat64Compare(this, node, &cont);
1905 }
1906
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * descriptor,Node * node)1907 void InstructionSelector::EmitPrepareArguments(
1908 ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1909 Node* node) {
1910 S390OperandGenerator g(this);
1911
1912 // Prepare for C function call.
1913 if (descriptor->IsCFunctionCall()) {
1914 Emit(kArchPrepareCallCFunction |
1915 MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
1916 0, nullptr, 0, nullptr);
1917
1918 // Poke any stack arguments.
1919 int slot = kStackFrameExtraParamSlot;
1920 for (PushParameter input : (*arguments)) {
1921 Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
1922 g.TempImmediate(slot));
1923 ++slot;
1924 }
1925 } else {
1926 // Push any stack arguments.
1927 int num_slots = static_cast<int>(descriptor->StackParameterCount());
1928 int slot = 0;
1929 for (PushParameter input : (*arguments)) {
1930 if (slot == 0) {
1931 DCHECK(input.node());
1932 Emit(kS390_PushFrame, g.NoOutput(), g.UseRegister(input.node()),
1933 g.TempImmediate(num_slots));
1934 } else {
1935 // Skip any alignment holes in pushed nodes.
1936 if (input.node()) {
1937 Emit(kS390_StoreToStackSlot, g.NoOutput(),
1938 g.UseRegister(input.node()), g.TempImmediate(slot));
1939 }
1940 }
1941 ++slot;
1942 }
1943 }
1944 }
1945
IsTailCallAddressImmediate()1946 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1947
GetTempsCountForTailCallFromJSFunction()1948 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1949
VisitFloat64ExtractLowWord32(Node * node)1950 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1951 S390OperandGenerator g(this);
1952 Emit(kS390_DoubleExtractLowWord32, g.DefineAsRegister(node),
1953 g.UseRegister(node->InputAt(0)));
1954 }
1955
VisitFloat64ExtractHighWord32(Node * node)1956 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1957 S390OperandGenerator g(this);
1958 Emit(kS390_DoubleExtractHighWord32, g.DefineAsRegister(node),
1959 g.UseRegister(node->InputAt(0)));
1960 }
1961
VisitFloat64InsertLowWord32(Node * node)1962 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1963 S390OperandGenerator g(this);
1964 Node* left = node->InputAt(0);
1965 Node* right = node->InputAt(1);
1966 if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
1967 CanCover(node, left)) {
1968 left = left->InputAt(1);
1969 Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(left),
1970 g.UseRegister(right));
1971 return;
1972 }
1973 Emit(kS390_DoubleInsertLowWord32, g.DefineSameAsFirst(node),
1974 g.UseRegister(left), g.UseRegister(right));
1975 }
1976
VisitFloat64InsertHighWord32(Node * node)1977 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1978 S390OperandGenerator g(this);
1979 Node* left = node->InputAt(0);
1980 Node* right = node->InputAt(1);
1981 if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
1982 CanCover(node, left)) {
1983 left = left->InputAt(1);
1984 Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(right),
1985 g.UseRegister(left));
1986 return;
1987 }
1988 Emit(kS390_DoubleInsertHighWord32, g.DefineSameAsFirst(node),
1989 g.UseRegister(left), g.UseRegister(right));
1990 }
1991
VisitAtomicLoad(Node * node)1992 void InstructionSelector::VisitAtomicLoad(Node* node) {
1993 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1994 S390OperandGenerator g(this);
1995 Node* base = node->InputAt(0);
1996 Node* index = node->InputAt(1);
1997 ArchOpcode opcode = kArchNop;
1998 switch (load_rep.representation()) {
1999 case MachineRepresentation::kWord8:
2000 opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
2001 break;
2002 case MachineRepresentation::kWord16:
2003 opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
2004 break;
2005 case MachineRepresentation::kWord32:
2006 opcode = kAtomicLoadWord32;
2007 break;
2008 default:
2009 UNREACHABLE();
2010 return;
2011 }
2012 Emit(opcode | AddressingModeField::encode(kMode_MRR),
2013 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
2014 }
2015
VisitAtomicStore(Node * node)2016 void InstructionSelector::VisitAtomicStore(Node* node) {
2017 MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2018 S390OperandGenerator g(this);
2019 Node* base = node->InputAt(0);
2020 Node* index = node->InputAt(1);
2021 Node* value = node->InputAt(2);
2022 ArchOpcode opcode = kArchNop;
2023 switch (rep) {
2024 case MachineRepresentation::kWord8:
2025 opcode = kAtomicStoreWord8;
2026 break;
2027 case MachineRepresentation::kWord16:
2028 opcode = kAtomicStoreWord16;
2029 break;
2030 case MachineRepresentation::kWord32:
2031 opcode = kAtomicStoreWord32;
2032 break;
2033 default:
2034 UNREACHABLE();
2035 return;
2036 }
2037
2038 InstructionOperand inputs[4];
2039 size_t input_count = 0;
2040 inputs[input_count++] = g.UseUniqueRegister(value);
2041 inputs[input_count++] = g.UseUniqueRegister(base);
2042 inputs[input_count++] = g.UseUniqueRegister(index);
2043 Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, input_count,
2044 inputs);
2045 }
2046
2047 // static
2048 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2049 InstructionSelector::SupportedMachineOperatorFlags() {
2050 return MachineOperatorBuilder::kFloat32RoundDown |
2051 MachineOperatorBuilder::kFloat64RoundDown |
2052 MachineOperatorBuilder::kFloat32RoundUp |
2053 MachineOperatorBuilder::kFloat64RoundUp |
2054 MachineOperatorBuilder::kFloat32RoundTruncate |
2055 MachineOperatorBuilder::kFloat64RoundTruncate |
2056 MachineOperatorBuilder::kFloat64RoundTiesAway |
2057 MachineOperatorBuilder::kWord32Popcnt |
2058 MachineOperatorBuilder::kWord32ReverseBytes |
2059 MachineOperatorBuilder::kWord64ReverseBytes |
2060 MachineOperatorBuilder::kWord64Popcnt;
2061 }
2062
2063 // static
2064 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2065 InstructionSelector::AlignmentRequirements() {
2066 return MachineOperatorBuilder::AlignmentRequirements::
2067 FullUnalignedAccessSupport();
2068 }
2069
2070 } // namespace compiler
2071 } // namespace internal
2072 } // namespace v8
2073