1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/compilation-info.h"
8 #include "src/compiler/code-generator-impl.h"
9 #include "src/compiler/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/compiler/osr.h"
12 #include "src/s390/macro-assembler-s390.h"
13
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17
18 #define __ masm()->
19
20 #define kScratchReg ip
21
22 // Adds S390-specific methods to convert InstructionOperands.
23 class S390OperandConverter final : public InstructionOperandConverter {
24 public:
S390OperandConverter(CodeGenerator * gen,Instruction * instr)25 S390OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
27
OutputCount()28 size_t OutputCount() { return instr_->OutputCount(); }
29
Is64BitOperand(int index)30 bool Is64BitOperand(int index) {
31 return LocationOperand::cast(instr_->InputAt(index))->representation() ==
32 MachineRepresentation::kWord64;
33 }
34
Is32BitOperand(int index)35 bool Is32BitOperand(int index) {
36 return LocationOperand::cast(instr_->InputAt(index))->representation() ==
37 MachineRepresentation::kWord32;
38 }
39
CompareLogical() const40 bool CompareLogical() const {
41 switch (instr_->flags_condition()) {
42 case kUnsignedLessThan:
43 case kUnsignedGreaterThanOrEqual:
44 case kUnsignedLessThanOrEqual:
45 case kUnsignedGreaterThan:
46 return true;
47 default:
48 return false;
49 }
50 UNREACHABLE();
51 return false;
52 }
53
InputImmediate(size_t index)54 Operand InputImmediate(size_t index) {
55 Constant constant = ToConstant(instr_->InputAt(index));
56 switch (constant.type()) {
57 case Constant::kInt32:
58 return Operand(constant.ToInt32());
59 case Constant::kFloat32:
60 return Operand(
61 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
62 case Constant::kFloat64:
63 return Operand(
64 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
65 case Constant::kInt64:
66 #if V8_TARGET_ARCH_S390X
67 return Operand(constant.ToInt64());
68 #endif
69 case Constant::kExternalReference:
70 case Constant::kHeapObject:
71 case Constant::kRpoNumber:
72 break;
73 }
74 UNREACHABLE();
75 return Operand::Zero();
76 }
77
MemoryOperand(AddressingMode * mode,size_t * first_index)78 MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
79 const size_t index = *first_index;
80 if (mode) *mode = AddressingModeField::decode(instr_->opcode());
81 switch (AddressingModeField::decode(instr_->opcode())) {
82 case kMode_None:
83 break;
84 case kMode_MR:
85 *first_index += 1;
86 return MemOperand(InputRegister(index + 0), 0);
87 case kMode_MRI:
88 *first_index += 2;
89 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
90 case kMode_MRR:
91 *first_index += 2;
92 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
93 case kMode_MRRI:
94 *first_index += 3;
95 return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
96 InputInt32(index + 2));
97 }
98 UNREACHABLE();
99 return MemOperand(r0);
100 }
101
MemoryOperand(AddressingMode * mode=NULL,size_t first_index=0)102 MemOperand MemoryOperand(AddressingMode* mode = NULL,
103 size_t first_index = 0) {
104 return MemoryOperand(mode, &first_index);
105 }
106
ToMemOperand(InstructionOperand * op) const107 MemOperand ToMemOperand(InstructionOperand* op) const {
108 DCHECK_NOT_NULL(op);
109 DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
110 return SlotToMemOperand(AllocatedOperand::cast(op)->index());
111 }
112
SlotToMemOperand(int slot) const113 MemOperand SlotToMemOperand(int slot) const {
114 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
115 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
116 }
117
InputStackSlot(size_t index)118 MemOperand InputStackSlot(size_t index) {
119 InstructionOperand* op = instr_->InputAt(index);
120 return SlotToMemOperand(AllocatedOperand::cast(op)->index());
121 }
122 };
123
HasRegisterInput(Instruction * instr,int index)124 static inline bool HasRegisterInput(Instruction* instr, int index) {
125 return instr->InputAt(index)->IsRegister();
126 }
127
HasImmediateInput(Instruction * instr,size_t index)128 static inline bool HasImmediateInput(Instruction* instr, size_t index) {
129 return instr->InputAt(index)->IsImmediate();
130 }
131
HasStackSlotInput(Instruction * instr,size_t index)132 static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
133 return instr->InputAt(index)->IsStackSlot();
134 }
135
136 namespace {
137
138 class OutOfLineLoadNAN32 final : public OutOfLineCode {
139 public:
OutOfLineLoadNAN32(CodeGenerator * gen,DoubleRegister result)140 OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
141 : OutOfLineCode(gen), result_(result) {}
142
Generate()143 void Generate() final {
144 __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
145 kScratchReg);
146 }
147
148 private:
149 DoubleRegister const result_;
150 };
151
152 class OutOfLineLoadNAN64 final : public OutOfLineCode {
153 public:
OutOfLineLoadNAN64(CodeGenerator * gen,DoubleRegister result)154 OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
155 : OutOfLineCode(gen), result_(result) {}
156
Generate()157 void Generate() final {
158 __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
159 kScratchReg);
160 }
161
162 private:
163 DoubleRegister const result_;
164 };
165
166 class OutOfLineLoadZero final : public OutOfLineCode {
167 public:
OutOfLineLoadZero(CodeGenerator * gen,Register result)168 OutOfLineLoadZero(CodeGenerator* gen, Register result)
169 : OutOfLineCode(gen), result_(result) {}
170
Generate()171 void Generate() final { __ LoadImmP(result_, Operand::Zero()); }
172
173 private:
174 Register const result_;
175 };
176
177 class OutOfLineRecordWrite final : public OutOfLineCode {
178 public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register offset,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)179 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
180 Register value, Register scratch0, Register scratch1,
181 RecordWriteMode mode)
182 : OutOfLineCode(gen),
183 object_(object),
184 offset_(offset),
185 offset_immediate_(0),
186 value_(value),
187 scratch0_(scratch0),
188 scratch1_(scratch1),
189 mode_(mode),
190 must_save_lr_(!gen->frame_access_state()->has_frame()) {}
191
OutOfLineRecordWrite(CodeGenerator * gen,Register object,int32_t offset,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)192 OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
193 Register value, Register scratch0, Register scratch1,
194 RecordWriteMode mode)
195 : OutOfLineCode(gen),
196 object_(object),
197 offset_(no_reg),
198 offset_immediate_(offset),
199 value_(value),
200 scratch0_(scratch0),
201 scratch1_(scratch1),
202 mode_(mode),
203 must_save_lr_(!gen->frame_access_state()->has_frame()) {}
204
Generate()205 void Generate() final {
206 if (mode_ > RecordWriteMode::kValueIsPointer) {
207 __ JumpIfSmi(value_, exit());
208 }
209 __ CheckPageFlag(value_, scratch0_,
210 MemoryChunk::kPointersToHereAreInterestingMask, eq,
211 exit());
212 RememberedSetAction const remembered_set_action =
213 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
214 : OMIT_REMEMBERED_SET;
215 SaveFPRegsMode const save_fp_mode =
216 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
217 if (must_save_lr_) {
218 // We need to save and restore r14 if the frame was elided.
219 __ Push(r14);
220 }
221 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
222 remembered_set_action, save_fp_mode);
223 if (offset_.is(no_reg)) {
224 __ AddP(scratch1_, object_, Operand(offset_immediate_));
225 } else {
226 DCHECK_EQ(0, offset_immediate_);
227 __ AddP(scratch1_, object_, offset_);
228 }
229 __ CallStub(&stub);
230 if (must_save_lr_) {
231 // We need to save and restore r14 if the frame was elided.
232 __ Pop(r14);
233 }
234 }
235
236 private:
237 Register const object_;
238 Register const offset_;
239 int32_t const offset_immediate_; // Valid if offset_.is(no_reg).
240 Register const value_;
241 Register const scratch0_;
242 Register const scratch1_;
243 RecordWriteMode const mode_;
244 bool must_save_lr_;
245 };
246
FlagsConditionToCondition(FlagsCondition condition,ArchOpcode op)247 Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
248 switch (condition) {
249 case kEqual:
250 return eq;
251 case kNotEqual:
252 return ne;
253 case kSignedLessThan:
254 case kUnsignedLessThan:
255 return lt;
256 case kSignedGreaterThanOrEqual:
257 case kUnsignedGreaterThanOrEqual:
258 return ge;
259 case kSignedLessThanOrEqual:
260 case kUnsignedLessThanOrEqual:
261 return le;
262 case kSignedGreaterThan:
263 case kUnsignedGreaterThan:
264 return gt;
265 case kOverflow:
266 // Overflow checked for AddP/SubP only.
267 switch (op) {
268 case kS390_Add32:
269 case kS390_Add64:
270 case kS390_Sub32:
271 case kS390_Sub64:
272 return overflow;
273 default:
274 break;
275 }
276 break;
277 case kNotOverflow:
278 switch (op) {
279 case kS390_Add32:
280 case kS390_Add64:
281 case kS390_Sub32:
282 case kS390_Sub64:
283 return nooverflow;
284 default:
285 break;
286 }
287 break;
288 default:
289 break;
290 }
291 UNREACHABLE();
292 return kNoCondition;
293 }
294
295 } // namespace
296
297 #define ASSEMBLE_FLOAT_UNOP(asm_instr) \
298 do { \
299 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
300 } while (0)
301
302 #define ASSEMBLE_FLOAT_BINOP(asm_instr) \
303 do { \
304 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
305 i.InputDoubleRegister(1)); \
306 } while (0)
307
308 #define ASSEMBLE_BINOP(asm_instr) \
309 do { \
310 if (HasRegisterInput(instr, 1)) { \
311 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
312 i.InputRegister(1)); \
313 } else if (HasImmediateInput(instr, 1)) { \
314 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
315 i.InputImmediate(1)); \
316 } else { \
317 UNIMPLEMENTED(); \
318 } \
319 } while (0)
320
321 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
322 do { \
323 if (HasRegisterInput(instr, 1)) { \
324 if (i.CompareLogical()) { \
325 __ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
326 } else { \
327 __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
328 } \
329 } else { \
330 if (i.CompareLogical()) { \
331 __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
332 } else { \
333 __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
334 } \
335 } \
336 } while (0)
337
338 #define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
339 do { \
340 __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1); \
341 } while (0)
342
343 // Divide instruction dr will implicity use register pair
344 // r0 & r1 below.
345 // R0:R1 = R1 / divisor - R0 remainder
346 // Copy remainder to output reg
347 #define ASSEMBLE_MODULO(div_instr, shift_instr) \
348 do { \
349 __ LoadRR(r0, i.InputRegister(0)); \
350 __ shift_instr(r0, Operand(32)); \
351 __ div_instr(r0, i.InputRegister(1)); \
352 __ ltr(i.OutputRegister(), r0); \
353 } while (0)
354
355 #define ASSEMBLE_FLOAT_MODULO() \
356 do { \
357 FrameScope scope(masm(), StackFrame::MANUAL); \
358 __ PrepareCallCFunction(0, 2, kScratchReg); \
359 __ MovToFloatParameters(i.InputDoubleRegister(0), \
360 i.InputDoubleRegister(1)); \
361 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
362 0, 2); \
363 __ MovFromFloatResult(i.OutputDoubleRegister()); \
364 } while (0)
365
366 #define ASSEMBLE_IEEE754_UNOP(name) \
367 do { \
368 /* TODO(bmeurer): We should really get rid of this special instruction, */ \
369 /* and generate a CallAddress instruction instead. */ \
370 FrameScope scope(masm(), StackFrame::MANUAL); \
371 __ PrepareCallCFunction(0, 1, kScratchReg); \
372 __ MovToFloatParameter(i.InputDoubleRegister(0)); \
373 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
374 0, 1); \
375 /* Move the result in the double result register. */ \
376 __ MovFromFloatResult(i.OutputDoubleRegister()); \
377 } while (0)
378
379 #define ASSEMBLE_IEEE754_BINOP(name) \
380 do { \
381 /* TODO(bmeurer): We should really get rid of this special instruction, */ \
382 /* and generate a CallAddress instruction instead. */ \
383 FrameScope scope(masm(), StackFrame::MANUAL); \
384 __ PrepareCallCFunction(0, 2, kScratchReg); \
385 __ MovToFloatParameters(i.InputDoubleRegister(0), \
386 i.InputDoubleRegister(1)); \
387 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
388 0, 2); \
389 /* Move the result in the double result register. */ \
390 __ MovFromFloatResult(i.OutputDoubleRegister()); \
391 } while (0)
392
393 #define ASSEMBLE_DOUBLE_MAX() \
394 do { \
395 DoubleRegister left_reg = i.InputDoubleRegister(0); \
396 DoubleRegister right_reg = i.InputDoubleRegister(1); \
397 DoubleRegister result_reg = i.OutputDoubleRegister(); \
398 Label check_nan_left, check_zero, return_left, return_right, done; \
399 __ cdbr(left_reg, right_reg); \
400 __ bunordered(&check_nan_left, Label::kNear); \
401 __ beq(&check_zero); \
402 __ bge(&return_left, Label::kNear); \
403 __ b(&return_right, Label::kNear); \
404 \
405 __ bind(&check_zero); \
406 __ lzdr(kDoubleRegZero); \
407 __ cdbr(left_reg, kDoubleRegZero); \
408 /* left == right != 0. */ \
409 __ bne(&return_left, Label::kNear); \
410 /* At this point, both left and right are either 0 or -0. */ \
411 /* N.B. The following works because +0 + -0 == +0 */ \
412 /* For max we want logical-and of sign bit: (L + R) */ \
413 __ ldr(result_reg, left_reg); \
414 __ adbr(result_reg, right_reg); \
415 __ b(&done, Label::kNear); \
416 \
417 __ bind(&check_nan_left); \
418 __ cdbr(left_reg, left_reg); \
419 /* left == NaN. */ \
420 __ bunordered(&return_left, Label::kNear); \
421 \
422 __ bind(&return_right); \
423 if (!right_reg.is(result_reg)) { \
424 __ ldr(result_reg, right_reg); \
425 } \
426 __ b(&done, Label::kNear); \
427 \
428 __ bind(&return_left); \
429 if (!left_reg.is(result_reg)) { \
430 __ ldr(result_reg, left_reg); \
431 } \
432 __ bind(&done); \
433 } while (0)
434
435 #define ASSEMBLE_DOUBLE_MIN() \
436 do { \
437 DoubleRegister left_reg = i.InputDoubleRegister(0); \
438 DoubleRegister right_reg = i.InputDoubleRegister(1); \
439 DoubleRegister result_reg = i.OutputDoubleRegister(); \
440 Label check_nan_left, check_zero, return_left, return_right, done; \
441 __ cdbr(left_reg, right_reg); \
442 __ bunordered(&check_nan_left, Label::kNear); \
443 __ beq(&check_zero); \
444 __ ble(&return_left, Label::kNear); \
445 __ b(&return_right, Label::kNear); \
446 \
447 __ bind(&check_zero); \
448 __ lzdr(kDoubleRegZero); \
449 __ cdbr(left_reg, kDoubleRegZero); \
450 /* left == right != 0. */ \
451 __ bne(&return_left, Label::kNear); \
452 /* At this point, both left and right are either 0 or -0. */ \
453 /* N.B. The following works because +0 + -0 == +0 */ \
454 /* For min we want logical-or of sign bit: -(-L + -R) */ \
455 __ lcdbr(left_reg, left_reg); \
456 __ ldr(result_reg, left_reg); \
457 if (left_reg.is(right_reg)) { \
458 __ adbr(result_reg, right_reg); \
459 } else { \
460 __ sdbr(result_reg, right_reg); \
461 } \
462 __ lcdbr(result_reg, result_reg); \
463 __ b(&done, Label::kNear); \
464 \
465 __ bind(&check_nan_left); \
466 __ cdbr(left_reg, left_reg); \
467 /* left == NaN. */ \
468 __ bunordered(&return_left, Label::kNear); \
469 \
470 __ bind(&return_right); \
471 if (!right_reg.is(result_reg)) { \
472 __ ldr(result_reg, right_reg); \
473 } \
474 __ b(&done, Label::kNear); \
475 \
476 __ bind(&return_left); \
477 if (!left_reg.is(result_reg)) { \
478 __ ldr(result_reg, left_reg); \
479 } \
480 __ bind(&done); \
481 } while (0)
482
483 #define ASSEMBLE_FLOAT_MAX() \
484 do { \
485 DoubleRegister left_reg = i.InputDoubleRegister(0); \
486 DoubleRegister right_reg = i.InputDoubleRegister(1); \
487 DoubleRegister result_reg = i.OutputDoubleRegister(); \
488 Label check_nan_left, check_zero, return_left, return_right, done; \
489 __ cebr(left_reg, right_reg); \
490 __ bunordered(&check_nan_left, Label::kNear); \
491 __ beq(&check_zero); \
492 __ bge(&return_left, Label::kNear); \
493 __ b(&return_right, Label::kNear); \
494 \
495 __ bind(&check_zero); \
496 __ lzdr(kDoubleRegZero); \
497 __ cebr(left_reg, kDoubleRegZero); \
498 /* left == right != 0. */ \
499 __ bne(&return_left, Label::kNear); \
500 /* At this point, both left and right are either 0 or -0. */ \
501 /* N.B. The following works because +0 + -0 == +0 */ \
502 /* For max we want logical-and of sign bit: (L + R) */ \
503 __ ldr(result_reg, left_reg); \
504 __ aebr(result_reg, right_reg); \
505 __ b(&done, Label::kNear); \
506 \
507 __ bind(&check_nan_left); \
508 __ cebr(left_reg, left_reg); \
509 /* left == NaN. */ \
510 __ bunordered(&return_left, Label::kNear); \
511 \
512 __ bind(&return_right); \
513 if (!right_reg.is(result_reg)) { \
514 __ ldr(result_reg, right_reg); \
515 } \
516 __ b(&done, Label::kNear); \
517 \
518 __ bind(&return_left); \
519 if (!left_reg.is(result_reg)) { \
520 __ ldr(result_reg, left_reg); \
521 } \
522 __ bind(&done); \
523 } while (0)
524
525 #define ASSEMBLE_FLOAT_MIN() \
526 do { \
527 DoubleRegister left_reg = i.InputDoubleRegister(0); \
528 DoubleRegister right_reg = i.InputDoubleRegister(1); \
529 DoubleRegister result_reg = i.OutputDoubleRegister(); \
530 Label check_nan_left, check_zero, return_left, return_right, done; \
531 __ cebr(left_reg, right_reg); \
532 __ bunordered(&check_nan_left, Label::kNear); \
533 __ beq(&check_zero); \
534 __ ble(&return_left, Label::kNear); \
535 __ b(&return_right, Label::kNear); \
536 \
537 __ bind(&check_zero); \
538 __ lzdr(kDoubleRegZero); \
539 __ cebr(left_reg, kDoubleRegZero); \
540 /* left == right != 0. */ \
541 __ bne(&return_left, Label::kNear); \
542 /* At this point, both left and right are either 0 or -0. */ \
543 /* N.B. The following works because +0 + -0 == +0 */ \
544 /* For min we want logical-or of sign bit: -(-L + -R) */ \
545 __ lcebr(left_reg, left_reg); \
546 __ ldr(result_reg, left_reg); \
547 if (left_reg.is(right_reg)) { \
548 __ aebr(result_reg, right_reg); \
549 } else { \
550 __ sebr(result_reg, right_reg); \
551 } \
552 __ lcebr(result_reg, result_reg); \
553 __ b(&done, Label::kNear); \
554 \
555 __ bind(&check_nan_left); \
556 __ cebr(left_reg, left_reg); \
557 /* left == NaN. */ \
558 __ bunordered(&return_left, Label::kNear); \
559 \
560 __ bind(&return_right); \
561 if (!right_reg.is(result_reg)) { \
562 __ ldr(result_reg, right_reg); \
563 } \
564 __ b(&done, Label::kNear); \
565 \
566 __ bind(&return_left); \
567 if (!left_reg.is(result_reg)) { \
568 __ ldr(result_reg, left_reg); \
569 } \
570 __ bind(&done); \
571 } while (0)
572 // Only MRI mode for these instructions available
573 #define ASSEMBLE_LOAD_FLOAT(asm_instr) \
574 do { \
575 DoubleRegister result = i.OutputDoubleRegister(); \
576 AddressingMode mode = kMode_None; \
577 MemOperand operand = i.MemoryOperand(&mode); \
578 __ asm_instr(result, operand); \
579 } while (0)
580
581 #define ASSEMBLE_LOAD_INTEGER(asm_instr) \
582 do { \
583 Register result = i.OutputRegister(); \
584 AddressingMode mode = kMode_None; \
585 MemOperand operand = i.MemoryOperand(&mode); \
586 __ asm_instr(result, operand); \
587 } while (0)
588
589 #define ASSEMBLE_STORE_FLOAT32() \
590 do { \
591 size_t index = 0; \
592 AddressingMode mode = kMode_None; \
593 MemOperand operand = i.MemoryOperand(&mode, &index); \
594 DoubleRegister value = i.InputDoubleRegister(index); \
595 __ StoreFloat32(value, operand); \
596 } while (0)
597
598 #define ASSEMBLE_STORE_DOUBLE() \
599 do { \
600 size_t index = 0; \
601 AddressingMode mode = kMode_None; \
602 MemOperand operand = i.MemoryOperand(&mode, &index); \
603 DoubleRegister value = i.InputDoubleRegister(index); \
604 __ StoreDouble(value, operand); \
605 } while (0)
606
607 #define ASSEMBLE_STORE_INTEGER(asm_instr) \
608 do { \
609 size_t index = 0; \
610 AddressingMode mode = kMode_None; \
611 MemOperand operand = i.MemoryOperand(&mode, &index); \
612 Register value = i.InputRegister(index); \
613 __ asm_instr(value, operand); \
614 } while (0)
615
616 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, width) \
617 do { \
618 DoubleRegister result = i.OutputDoubleRegister(); \
619 size_t index = 0; \
620 AddressingMode mode = kMode_None; \
621 MemOperand operand = i.MemoryOperand(&mode, index); \
622 Register offset = operand.rb(); \
623 if (HasRegisterInput(instr, 2)) { \
624 __ CmpLogical32(offset, i.InputRegister(2)); \
625 } else { \
626 __ CmpLogical32(offset, i.InputImmediate(2)); \
627 } \
628 auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
629 __ bge(ool->entry()); \
630 __ CleanUInt32(offset); \
631 __ asm_instr(result, operand); \
632 __ bind(ool->exit()); \
633 } while (0)
634
635 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
636 do { \
637 Register result = i.OutputRegister(); \
638 size_t index = 0; \
639 AddressingMode mode = kMode_None; \
640 MemOperand operand = i.MemoryOperand(&mode, index); \
641 Register offset = operand.rb(); \
642 if (HasRegisterInput(instr, 2)) { \
643 __ CmpLogical32(offset, i.InputRegister(2)); \
644 } else { \
645 __ CmpLogical32(offset, i.InputImmediate(2)); \
646 } \
647 auto ool = new (zone()) OutOfLineLoadZero(this, result); \
648 __ bge(ool->entry()); \
649 __ CleanUInt32(offset); \
650 __ asm_instr(result, operand); \
651 __ bind(ool->exit()); \
652 } while (0)
653
654 #define ASSEMBLE_CHECKED_STORE_FLOAT32() \
655 do { \
656 Label done; \
657 size_t index = 0; \
658 AddressingMode mode = kMode_None; \
659 MemOperand operand = i.MemoryOperand(&mode, index); \
660 Register offset = operand.rb(); \
661 if (HasRegisterInput(instr, 2)) { \
662 __ CmpLogical32(offset, i.InputRegister(2)); \
663 } else { \
664 __ CmpLogical32(offset, i.InputImmediate(2)); \
665 } \
666 __ bge(&done); \
667 DoubleRegister value = i.InputDoubleRegister(3); \
668 __ CleanUInt32(offset); \
669 __ StoreFloat32(value, operand); \
670 __ bind(&done); \
671 } while (0)
672
673 #define ASSEMBLE_CHECKED_STORE_DOUBLE() \
674 do { \
675 Label done; \
676 size_t index = 0; \
677 AddressingMode mode = kMode_None; \
678 MemOperand operand = i.MemoryOperand(&mode, index); \
679 DCHECK_EQ(kMode_MRR, mode); \
680 Register offset = operand.rb(); \
681 if (HasRegisterInput(instr, 2)) { \
682 __ CmpLogical32(offset, i.InputRegister(2)); \
683 } else { \
684 __ CmpLogical32(offset, i.InputImmediate(2)); \
685 } \
686 __ bge(&done); \
687 DoubleRegister value = i.InputDoubleRegister(3); \
688 __ CleanUInt32(offset); \
689 __ StoreDouble(value, operand); \
690 __ bind(&done); \
691 } while (0)
692
693 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
694 do { \
695 Label done; \
696 size_t index = 0; \
697 AddressingMode mode = kMode_None; \
698 MemOperand operand = i.MemoryOperand(&mode, index); \
699 Register offset = operand.rb(); \
700 if (HasRegisterInput(instr, 2)) { \
701 __ CmpLogical32(offset, i.InputRegister(2)); \
702 } else { \
703 __ CmpLogical32(offset, i.InputImmediate(2)); \
704 } \
705 __ bge(&done); \
706 Register value = i.InputRegister(3); \
707 __ CleanUInt32(offset); \
708 __ asm_instr(value, operand); \
709 __ bind(&done); \
710 } while (0)
711
AssembleDeconstructFrame()712 void CodeGenerator::AssembleDeconstructFrame() {
713 __ LeaveFrame(StackFrame::MANUAL);
714 }
715
AssemblePrepareTailCall()716 void CodeGenerator::AssemblePrepareTailCall() {
717 if (frame_access_state()->has_frame()) {
718 __ RestoreFrameStateForTailCall();
719 }
720 frame_access_state()->SetFrameAccessToSP();
721 }
722
AssemblePopArgumentsAdaptorFrame(Register args_reg,Register scratch1,Register scratch2,Register scratch3)723 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
724 Register scratch1,
725 Register scratch2,
726 Register scratch3) {
727 DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
728 Label done;
729
730 // Check if current frame is an arguments adaptor frame.
731 __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
732 __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
733 __ bne(&done);
734
735 // Load arguments count from current arguments adaptor frame (note, it
736 // does not include receiver).
737 Register caller_args_count_reg = scratch1;
738 __ LoadP(caller_args_count_reg,
739 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
740 __ SmiUntag(caller_args_count_reg);
741
742 ParameterCount callee_args_count(args_reg);
743 __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
744 scratch3);
745 __ bind(&done);
746 }
747
748 namespace {
749
FlushPendingPushRegisters(MacroAssembler * masm,FrameAccessState * frame_access_state,ZoneVector<Register> * pending_pushes)750 void FlushPendingPushRegisters(MacroAssembler* masm,
751 FrameAccessState* frame_access_state,
752 ZoneVector<Register>* pending_pushes) {
753 switch (pending_pushes->size()) {
754 case 0:
755 break;
756 case 1:
757 masm->Push((*pending_pushes)[0]);
758 break;
759 case 2:
760 masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
761 break;
762 case 3:
763 masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
764 (*pending_pushes)[2]);
765 break;
766 default:
767 UNREACHABLE();
768 break;
769 }
770 frame_access_state->IncreaseSPDelta(pending_pushes->size());
771 pending_pushes->resize(0);
772 }
773
AddPendingPushRegister(MacroAssembler * masm,FrameAccessState * frame_access_state,ZoneVector<Register> * pending_pushes,Register reg)774 void AddPendingPushRegister(MacroAssembler* masm,
775 FrameAccessState* frame_access_state,
776 ZoneVector<Register>* pending_pushes,
777 Register reg) {
778 pending_pushes->push_back(reg);
779 if (pending_pushes->size() == 3 || reg.is(ip)) {
780 FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
781 }
782 }
AdjustStackPointerForTailCall(MacroAssembler * masm,FrameAccessState * state,int new_slot_above_sp,ZoneVector<Register> * pending_pushes=nullptr,bool allow_shrinkage=true)783 void AdjustStackPointerForTailCall(
784 MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
785 ZoneVector<Register>* pending_pushes = nullptr,
786 bool allow_shrinkage = true) {
787 int current_sp_offset = state->GetSPToFPSlotCount() +
788 StandardFrameConstants::kFixedSlotCountAboveFp;
789 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
790 if (stack_slot_delta > 0) {
791 if (pending_pushes != nullptr) {
792 FlushPendingPushRegisters(masm, state, pending_pushes);
793 }
794 masm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
795 state->IncreaseSPDelta(stack_slot_delta);
796 } else if (allow_shrinkage && stack_slot_delta < 0) {
797 if (pending_pushes != nullptr) {
798 FlushPendingPushRegisters(masm, state, pending_pushes);
799 }
800 masm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
801 state->IncreaseSPDelta(stack_slot_delta);
802 }
803 }
804
805 } // namespace
806
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_stack_slot)807 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
808 int first_unused_stack_slot) {
809 CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
810 ZoneVector<MoveOperands*> pushes(zone());
811 GetPushCompatibleMoves(instr, flags, &pushes);
812
813 if (!pushes.empty() &&
814 (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
815 first_unused_stack_slot)) {
816 S390OperandConverter g(this, instr);
817 ZoneVector<Register> pending_pushes(zone());
818 for (auto move : pushes) {
819 LocationOperand destination_location(
820 LocationOperand::cast(move->destination()));
821 InstructionOperand source(move->source());
822 AdjustStackPointerForTailCall(
823 masm(), frame_access_state(),
824 destination_location.index() - pending_pushes.size(),
825 &pending_pushes);
826 if (source.IsStackSlot()) {
827 LocationOperand source_location(LocationOperand::cast(source));
828 __ LoadP(ip, g.SlotToMemOperand(source_location.index()));
829 AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
830 ip);
831 } else if (source.IsRegister()) {
832 LocationOperand source_location(LocationOperand::cast(source));
833 AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
834 source_location.GetRegister());
835 } else if (source.IsImmediate()) {
836 AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
837 ip);
838 } else {
839 // Pushes of non-scalar data types is not supported.
840 UNIMPLEMENTED();
841 }
842 move->Eliminate();
843 }
844 FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
845 }
846 AdjustStackPointerForTailCall(masm(), frame_access_state(),
847 first_unused_stack_slot, nullptr, false);
848 }
849
AssembleTailCallAfterGap(Instruction * instr,int first_unused_stack_slot)850 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
851 int first_unused_stack_slot) {
852 AdjustStackPointerForTailCall(masm(), frame_access_state(),
853 first_unused_stack_slot);
854 }
855
856 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)857 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
858 Instruction* instr) {
859 S390OperandConverter i(this, instr);
860 ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
861
862 switch (opcode) {
863 case kArchComment: {
864 Address comment_string = i.InputExternalReference(0).address();
865 __ RecordComment(reinterpret_cast<const char*>(comment_string));
866 break;
867 }
868 case kArchCallCodeObject: {
869 EnsureSpaceForLazyDeopt();
870 if (HasRegisterInput(instr, 0)) {
871 __ AddP(ip, i.InputRegister(0),
872 Operand(Code::kHeaderSize - kHeapObjectTag));
873 __ Call(ip);
874 } else {
875 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
876 RelocInfo::CODE_TARGET);
877 }
878 RecordCallPosition(instr);
879 frame_access_state()->ClearSPDelta();
880 break;
881 }
882 case kArchTailCallCodeObjectFromJSFunction:
883 case kArchTailCallCodeObject: {
884 if (opcode == kArchTailCallCodeObjectFromJSFunction) {
885 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
886 i.TempRegister(0), i.TempRegister(1),
887 i.TempRegister(2));
888 }
889 if (HasRegisterInput(instr, 0)) {
890 __ AddP(ip, i.InputRegister(0),
891 Operand(Code::kHeaderSize - kHeapObjectTag));
892 __ Jump(ip);
893 } else {
894 // We cannot use the constant pool to load the target since
895 // we've already restored the caller's frame.
896 ConstantPoolUnavailableScope constant_pool_unavailable(masm());
897 __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
898 RelocInfo::CODE_TARGET);
899 }
900 frame_access_state()->ClearSPDelta();
901 frame_access_state()->SetFrameAccessToDefault();
902 break;
903 }
904 case kArchTailCallAddress: {
905 CHECK(!instr->InputAt(0)->IsImmediate());
906 __ Jump(i.InputRegister(0));
907 frame_access_state()->ClearSPDelta();
908 frame_access_state()->SetFrameAccessToDefault();
909 break;
910 }
911 case kArchCallJSFunction: {
912 EnsureSpaceForLazyDeopt();
913 Register func = i.InputRegister(0);
914 if (FLAG_debug_code) {
915 // Check the function's context matches the context argument.
916 __ LoadP(kScratchReg,
917 FieldMemOperand(func, JSFunction::kContextOffset));
918 __ CmpP(cp, kScratchReg);
919 __ Assert(eq, kWrongFunctionContext);
920 }
921 __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
922 __ Call(ip);
923 RecordCallPosition(instr);
924 frame_access_state()->ClearSPDelta();
925 break;
926 }
927 case kArchTailCallJSFunctionFromJSFunction: {
928 Register func = i.InputRegister(0);
929 if (FLAG_debug_code) {
930 // Check the function's context matches the context argument.
931 __ LoadP(kScratchReg,
932 FieldMemOperand(func, JSFunction::kContextOffset));
933 __ CmpP(cp, kScratchReg);
934 __ Assert(eq, kWrongFunctionContext);
935 }
936 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
937 i.TempRegister(0), i.TempRegister(1),
938 i.TempRegister(2));
939 __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
940 __ Jump(ip);
941 frame_access_state()->ClearSPDelta();
942 frame_access_state()->SetFrameAccessToDefault();
943 break;
944 }
945 case kArchPrepareCallCFunction: {
946 int const num_parameters = MiscField::decode(instr->opcode());
947 __ PrepareCallCFunction(num_parameters, kScratchReg);
948 // Frame alignment requires using FP-relative frame addressing.
949 frame_access_state()->SetFrameAccessToFP();
950 break;
951 }
952 case kArchPrepareTailCall:
953 AssemblePrepareTailCall();
954 break;
955 case kArchCallCFunction: {
956 int const num_parameters = MiscField::decode(instr->opcode());
957 if (instr->InputAt(0)->IsImmediate()) {
958 ExternalReference ref = i.InputExternalReference(0);
959 __ CallCFunction(ref, num_parameters);
960 } else {
961 Register func = i.InputRegister(0);
962 __ CallCFunction(func, num_parameters);
963 }
964 frame_access_state()->SetFrameAccessToDefault();
965 frame_access_state()->ClearSPDelta();
966 break;
967 }
968 case kArchJmp:
969 AssembleArchJump(i.InputRpo(0));
970 break;
971 case kArchLookupSwitch:
972 AssembleArchLookupSwitch(instr);
973 break;
974 case kArchTableSwitch:
975 AssembleArchTableSwitch(instr);
976 break;
977 case kArchDebugBreak:
978 __ stop("kArchDebugBreak");
979 break;
980 case kArchNop:
981 case kArchThrowTerminator:
982 // don't emit code for nops.
983 break;
984 case kArchDeoptimize: {
985 int deopt_state_id =
986 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
987 Deoptimizer::BailoutType bailout_type =
988 Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
989 CodeGenResult result = AssembleDeoptimizerCall(
990 deopt_state_id, bailout_type, current_source_position_);
991 if (result != kSuccess) return result;
992 break;
993 }
994 case kArchRet:
995 AssembleReturn(instr->InputAt(0));
996 break;
997 case kArchStackPointer:
998 __ LoadRR(i.OutputRegister(), sp);
999 break;
1000 case kArchFramePointer:
1001 __ LoadRR(i.OutputRegister(), fp);
1002 break;
1003 case kArchParentFramePointer:
1004 if (frame_access_state()->has_frame()) {
1005 __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
1006 } else {
1007 __ LoadRR(i.OutputRegister(), fp);
1008 }
1009 break;
1010 case kArchTruncateDoubleToI:
1011 // TODO(mbrandy): move slow call to stub out of line.
1012 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
1013 break;
1014 case kArchStoreWithWriteBarrier: {
1015 RecordWriteMode mode =
1016 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
1017 Register object = i.InputRegister(0);
1018 Register value = i.InputRegister(2);
1019 Register scratch0 = i.TempRegister(0);
1020 Register scratch1 = i.TempRegister(1);
1021 OutOfLineRecordWrite* ool;
1022
1023 AddressingMode addressing_mode =
1024 AddressingModeField::decode(instr->opcode());
1025 if (addressing_mode == kMode_MRI) {
1026 int32_t offset = i.InputInt32(1);
1027 ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
1028 scratch0, scratch1, mode);
1029 __ StoreP(value, MemOperand(object, offset));
1030 } else {
1031 DCHECK_EQ(kMode_MRR, addressing_mode);
1032 Register offset(i.InputRegister(1));
1033 ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
1034 scratch0, scratch1, mode);
1035 __ StoreP(value, MemOperand(object, offset));
1036 }
1037 __ CheckPageFlag(object, scratch0,
1038 MemoryChunk::kPointersFromHereAreInterestingMask, ne,
1039 ool->entry());
1040 __ bind(ool->exit());
1041 break;
1042 }
1043 case kArchStackSlot: {
1044 FrameOffset offset =
1045 frame_access_state()->GetFrameOffset(i.InputInt32(0));
1046 __ AddP(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
1047 Operand(offset.offset()));
1048 break;
1049 }
1050 case kS390_And32:
1051 ASSEMBLE_BINOP(And);
1052 break;
1053 case kS390_And64:
1054 ASSEMBLE_BINOP(AndP);
1055 break;
1056 case kS390_Or32:
1057 ASSEMBLE_BINOP(Or);
1058 case kS390_Or64:
1059 ASSEMBLE_BINOP(OrP);
1060 break;
1061 case kS390_Xor32:
1062 ASSEMBLE_BINOP(Xor);
1063 break;
1064 case kS390_Xor64:
1065 ASSEMBLE_BINOP(XorP);
1066 break;
1067 case kS390_ShiftLeft32:
1068 if (HasRegisterInput(instr, 1)) {
1069 if (i.OutputRegister().is(i.InputRegister(1)) &&
1070 !CpuFeatures::IsSupported(DISTINCT_OPS)) {
1071 __ LoadRR(kScratchReg, i.InputRegister(1));
1072 __ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1073 } else {
1074 ASSEMBLE_BINOP(ShiftLeft);
1075 }
1076 } else {
1077 ASSEMBLE_BINOP(ShiftLeft);
1078 }
1079 __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
1080 break;
1081 #if V8_TARGET_ARCH_S390X
1082 case kS390_ShiftLeft64:
1083 ASSEMBLE_BINOP(sllg);
1084 break;
1085 #endif
1086 case kS390_ShiftRight32:
1087 if (HasRegisterInput(instr, 1)) {
1088 if (i.OutputRegister().is(i.InputRegister(1)) &&
1089 !CpuFeatures::IsSupported(DISTINCT_OPS)) {
1090 __ LoadRR(kScratchReg, i.InputRegister(1));
1091 __ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1092 } else {
1093 ASSEMBLE_BINOP(ShiftRight);
1094 }
1095 } else {
1096 ASSEMBLE_BINOP(ShiftRight);
1097 }
1098 __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
1099 break;
1100 #if V8_TARGET_ARCH_S390X
1101 case kS390_ShiftRight64:
1102 ASSEMBLE_BINOP(srlg);
1103 break;
1104 #endif
1105 case kS390_ShiftRightArith32:
1106 if (HasRegisterInput(instr, 1)) {
1107 if (i.OutputRegister().is(i.InputRegister(1)) &&
1108 !CpuFeatures::IsSupported(DISTINCT_OPS)) {
1109 __ LoadRR(kScratchReg, i.InputRegister(1));
1110 __ ShiftRightArith(i.OutputRegister(), i.InputRegister(0),
1111 kScratchReg);
1112 } else {
1113 ASSEMBLE_BINOP(ShiftRightArith);
1114 }
1115 } else {
1116 ASSEMBLE_BINOP(ShiftRightArith);
1117 }
1118 __ LoadlW(i.OutputRegister(), i.OutputRegister());
1119 break;
1120 #if V8_TARGET_ARCH_S390X
1121 case kS390_ShiftRightArith64:
1122 ASSEMBLE_BINOP(srag);
1123 break;
1124 #endif
1125 #if !V8_TARGET_ARCH_S390X
1126 case kS390_AddPair:
1127 // i.InputRegister(0) ... left low word.
1128 // i.InputRegister(1) ... left high word.
1129 // i.InputRegister(2) ... right low word.
1130 // i.InputRegister(3) ... right high word.
1131 __ AddLogical32(i.OutputRegister(0), i.InputRegister(0),
1132 i.InputRegister(2));
1133 __ AddLogicalWithCarry32(i.OutputRegister(1), i.InputRegister(1),
1134 i.InputRegister(3));
1135 break;
1136 case kS390_SubPair:
1137 // i.InputRegister(0) ... left low word.
1138 // i.InputRegister(1) ... left high word.
1139 // i.InputRegister(2) ... right low word.
1140 // i.InputRegister(3) ... right high word.
1141 __ SubLogical32(i.OutputRegister(0), i.InputRegister(0),
1142 i.InputRegister(2));
1143 __ SubLogicalWithBorrow32(i.OutputRegister(1), i.InputRegister(1),
1144 i.InputRegister(3));
1145 break;
1146 case kS390_MulPair:
1147 // i.InputRegister(0) ... left low word.
1148 // i.InputRegister(1) ... left high word.
1149 // i.InputRegister(2) ... right low word.
1150 // i.InputRegister(3) ... right high word.
1151 __ sllg(r0, i.InputRegister(1), Operand(32));
1152 __ sllg(r1, i.InputRegister(3), Operand(32));
1153 __ lr(r0, i.InputRegister(0));
1154 __ lr(r1, i.InputRegister(2));
1155 __ msgr(r1, r0);
1156 __ lr(i.OutputRegister(0), r1);
1157 __ srag(i.OutputRegister(1), r1, Operand(32));
1158 break;
1159 case kS390_ShiftLeftPair: {
1160 Register second_output =
1161 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1162 if (instr->InputAt(2)->IsImmediate()) {
1163 __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1164 i.InputRegister(1), i.InputInt32(2));
1165 } else {
1166 __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1167 i.InputRegister(1), kScratchReg, i.InputRegister(2));
1168 }
1169 break;
1170 }
1171 case kS390_ShiftRightPair: {
1172 Register second_output =
1173 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1174 if (instr->InputAt(2)->IsImmediate()) {
1175 __ ShiftRightPair(i.OutputRegister(0), second_output,
1176 i.InputRegister(0), i.InputRegister(1),
1177 i.InputInt32(2));
1178 } else {
1179 __ ShiftRightPair(i.OutputRegister(0), second_output,
1180 i.InputRegister(0), i.InputRegister(1), kScratchReg,
1181 i.InputRegister(2));
1182 }
1183 break;
1184 }
1185 case kS390_ShiftRightArithPair: {
1186 Register second_output =
1187 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1188 if (instr->InputAt(2)->IsImmediate()) {
1189 __ ShiftRightArithPair(i.OutputRegister(0), second_output,
1190 i.InputRegister(0), i.InputRegister(1),
1191 i.InputInt32(2));
1192 } else {
1193 __ ShiftRightArithPair(i.OutputRegister(0), second_output,
1194 i.InputRegister(0), i.InputRegister(1),
1195 kScratchReg, i.InputRegister(2));
1196 }
1197 break;
1198 }
1199 #endif
1200 case kS390_RotRight32:
1201 if (HasRegisterInput(instr, 1)) {
1202 __ LoadComplementRR(kScratchReg, i.InputRegister(1));
1203 __ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1204 } else {
1205 __ rll(i.OutputRegister(), i.InputRegister(0),
1206 Operand(32 - i.InputInt32(1)));
1207 }
1208 break;
1209 #if V8_TARGET_ARCH_S390X
1210 case kS390_RotRight64:
1211 if (HasRegisterInput(instr, 1)) {
1212 __ LoadComplementRR(kScratchReg, i.InputRegister(1));
1213 __ rllg(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1214 } else {
1215 __ rllg(i.OutputRegister(), i.InputRegister(0),
1216 Operand(64 - i.InputInt32(1)));
1217 }
1218 break;
1219 #endif
1220 case kS390_Not32:
1221 __ Not32(i.OutputRegister(), i.InputRegister(0));
1222 break;
1223 case kS390_Not64:
1224 __ Not64(i.OutputRegister(), i.InputRegister(0));
1225 break;
1226 case kS390_RotLeftAndMask32:
1227 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1228 int shiftAmount = i.InputInt32(1);
1229 int endBit = 63 - i.InputInt32(3);
1230 int startBit = 63 - i.InputInt32(2);
1231 __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1232 __ risbg(i.OutputRegister(), i.OutputRegister(), Operand(startBit),
1233 Operand(endBit), Operand::Zero(), true);
1234 } else {
1235 int shiftAmount = i.InputInt32(1);
1236 int clearBitLeft = 63 - i.InputInt32(2);
1237 int clearBitRight = i.InputInt32(3);
1238 __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1239 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitLeft));
1240 __ srlg(i.OutputRegister(), i.OutputRegister(),
1241 Operand((clearBitLeft + clearBitRight)));
1242 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitRight));
1243 }
1244 break;
1245 #if V8_TARGET_ARCH_S390X
1246 case kS390_RotLeftAndClear64:
1247 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1248 int shiftAmount = i.InputInt32(1);
1249 int endBit = 63 - shiftAmount;
1250 int startBit = 63 - i.InputInt32(2);
1251 __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
1252 Operand(endBit), Operand(shiftAmount), true);
1253 } else {
1254 int shiftAmount = i.InputInt32(1);
1255 int clearBit = 63 - i.InputInt32(2);
1256 __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1257 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1258 __ srlg(i.OutputRegister(), i.OutputRegister(),
1259 Operand(clearBit + shiftAmount));
1260 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(shiftAmount));
1261 }
1262 break;
1263 case kS390_RotLeftAndClearLeft64:
1264 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1265 int shiftAmount = i.InputInt32(1);
1266 int endBit = 63;
1267 int startBit = 63 - i.InputInt32(2);
1268 __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
1269 Operand(endBit), Operand(shiftAmount), true);
1270 } else {
1271 int shiftAmount = i.InputInt32(1);
1272 int clearBit = 63 - i.InputInt32(2);
1273 __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1274 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1275 __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1276 }
1277 break;
1278 case kS390_RotLeftAndClearRight64:
1279 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1280 int shiftAmount = i.InputInt32(1);
1281 int endBit = 63 - i.InputInt32(2);
1282 int startBit = 0;
1283 __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
1284 Operand(endBit), Operand(shiftAmount), true);
1285 } else {
1286 int shiftAmount = i.InputInt32(1);
1287 int clearBit = i.InputInt32(2);
1288 __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1289 __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1290 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1291 }
1292 break;
1293 #endif
1294 case kS390_Add32:
1295 ASSEMBLE_BINOP(Add32);
1296 __ LoadW(i.OutputRegister(), i.OutputRegister());
1297 break;
1298 case kS390_Add64:
1299 ASSEMBLE_BINOP(AddP);
1300 break;
1301 case kS390_AddFloat:
1302 // Ensure we don't clobber right/InputReg(1)
1303 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1304 ASSEMBLE_FLOAT_UNOP(aebr);
1305 } else {
1306 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1307 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1308 __ aebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1309 }
1310 break;
1311 case kS390_AddDouble:
1312 // Ensure we don't clobber right/InputReg(1)
1313 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1314 ASSEMBLE_FLOAT_UNOP(adbr);
1315 } else {
1316 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1317 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1318 __ adbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1319 }
1320 break;
1321 case kS390_Sub32:
1322 ASSEMBLE_BINOP(Sub32);
1323 __ LoadW(i.OutputRegister(), i.OutputRegister());
1324 break;
1325 case kS390_Sub64:
1326 ASSEMBLE_BINOP(SubP);
1327 break;
1328 case kS390_SubFloat:
1329 // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
1330 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1331 __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
1332 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1333 __ sebr(i.OutputDoubleRegister(), kScratchDoubleReg);
1334 } else {
1335 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
1336 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1337 }
1338 __ sebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1339 }
1340 break;
1341 case kS390_SubDouble:
1342 // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
1343 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1344 __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
1345 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1346 __ sdbr(i.OutputDoubleRegister(), kScratchDoubleReg);
1347 } else {
1348 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
1349 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1350 }
1351 __ sdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1352 }
1353 break;
1354 case kS390_Mul32:
1355 if (HasRegisterInput(instr, 1)) {
1356 __ Mul32(i.InputRegister(0), i.InputRegister(1));
1357 } else if (HasImmediateInput(instr, 1)) {
1358 __ Mul32(i.InputRegister(0), i.InputImmediate(1));
1359 } else if (HasStackSlotInput(instr, 1)) {
1360 #ifdef V8_TARGET_ARCH_S390X
1361 // Avoid endian-issue here:
1362 // stg r1, 0(fp)
1363 // ...
1364 // msy r2, 0(fp) <-- This will read the upper 32 bits
1365 __ lg(kScratchReg, i.InputStackSlot(1));
1366 __ Mul32(i.InputRegister(0), kScratchReg);
1367 #else
1368 __ Mul32(i.InputRegister(0), i.InputStackSlot(1));
1369 #endif
1370 } else {
1371 UNIMPLEMENTED();
1372 }
1373 break;
1374 case kS390_Mul64:
1375 if (HasRegisterInput(instr, 1)) {
1376 __ Mul64(i.InputRegister(0), i.InputRegister(1));
1377 } else if (HasImmediateInput(instr, 1)) {
1378 __ Mul64(i.InputRegister(0), i.InputImmediate(1));
1379 } else if (HasStackSlotInput(instr, 1)) {
1380 __ Mul64(i.InputRegister(0), i.InputStackSlot(1));
1381 } else {
1382 UNIMPLEMENTED();
1383 }
1384 break;
1385 case kS390_MulHigh32:
1386 __ LoadRR(r1, i.InputRegister(0));
1387 if (HasRegisterInput(instr, 1)) {
1388 __ mr_z(r0, i.InputRegister(1));
1389 } else if (HasStackSlotInput(instr, 1)) {
1390 #ifdef V8_TARGET_ARCH_S390X
1391 // Avoid endian-issue here:
1392 // stg r1, 0(fp)
1393 // ...
1394 // mfy r2, 0(fp) <-- This will read the upper 32 bits
1395 __ lg(kScratchReg, i.InputStackSlot(1));
1396 __ mr_z(r0, kScratchReg);
1397 #else
1398 __ mfy(r0, i.InputStackSlot(1));
1399 #endif
1400 } else {
1401 UNIMPLEMENTED();
1402 }
1403 __ LoadW(i.OutputRegister(), r0);
1404 break;
1405 case kS390_Mul32WithHigh32:
1406 __ LoadRR(r1, i.InputRegister(0));
1407 __ mr_z(r0, i.InputRegister(1));
1408 __ LoadW(i.OutputRegister(0), r1); // low
1409 __ LoadW(i.OutputRegister(1), r0); // high
1410 break;
1411 case kS390_MulHighU32:
1412 __ LoadRR(r1, i.InputRegister(0));
1413 if (HasRegisterInput(instr, 1)) {
1414 __ mlr(r0, i.InputRegister(1));
1415 } else if (HasStackSlotInput(instr, 1)) {
1416 #ifdef V8_TARGET_ARCH_S390X
1417 // Avoid endian-issue here:
1418 // stg r1, 0(fp)
1419 // ...
1420 // mfy r2, 0(fp) <-- This will read the upper 32 bits
1421 __ lg(kScratchReg, i.InputStackSlot(1));
1422 __ mlr(r0, kScratchReg);
1423 #else
1424 __ ml(r0, i.InputStackSlot(1));
1425 #endif
1426 } else {
1427 UNIMPLEMENTED();
1428 }
1429 __ LoadlW(i.OutputRegister(), r0);
1430 break;
1431 case kS390_MulFloat:
1432 // Ensure we don't clobber right
1433 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1434 ASSEMBLE_FLOAT_UNOP(meebr);
1435 } else {
1436 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1437 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1438 __ meebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1439 }
1440 break;
1441 case kS390_MulDouble:
1442 // Ensure we don't clobber right
1443 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1444 ASSEMBLE_FLOAT_UNOP(mdbr);
1445 } else {
1446 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1447 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1448 __ mdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1449 }
1450 break;
1451 #if V8_TARGET_ARCH_S390X
1452 case kS390_Div64:
1453 __ LoadRR(r1, i.InputRegister(0));
1454 __ dsgr(r0, i.InputRegister(1)); // R1: Dividend
1455 __ ltgr(i.OutputRegister(), r1); // Copy R1: Quotient to output
1456 break;
1457 #endif
1458 case kS390_Div32:
1459 __ LoadRR(r0, i.InputRegister(0));
1460 __ srda(r0, Operand(32));
1461 __ dr(r0, i.InputRegister(1));
1462 __ LoadAndTestP_ExtendSrc(i.OutputRegister(),
1463 r1); // Copy R1: Quotient to output
1464 break;
1465 #if V8_TARGET_ARCH_S390X
1466 case kS390_DivU64:
1467 __ LoadRR(r1, i.InputRegister(0));
1468 __ LoadImmP(r0, Operand::Zero());
1469 __ dlgr(r0, i.InputRegister(1)); // R0:R1: Dividend
1470 __ ltgr(i.OutputRegister(), r1); // Copy R1: Quotient to output
1471 break;
1472 #endif
1473 case kS390_DivU32:
1474 __ LoadRR(r0, i.InputRegister(0));
1475 __ srdl(r0, Operand(32));
1476 __ dlr(r0, i.InputRegister(1)); // R0:R1: Dividend
1477 __ LoadlW(i.OutputRegister(), r1); // Copy R1: Quotient to output
1478 __ LoadAndTestP_ExtendSrc(r1, r1);
1479 break;
1480
1481 case kS390_DivFloat:
1482 // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
1483 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1484 __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
1485 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1486 __ debr(i.OutputDoubleRegister(), kScratchDoubleReg);
1487 } else {
1488 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1489 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1490 __ debr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1491 }
1492 break;
1493 case kS390_DivDouble:
1494 // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
1495 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1496 __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
1497 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1498 __ ddbr(i.OutputDoubleRegister(), kScratchDoubleReg);
1499 } else {
1500 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1501 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1502 __ ddbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1503 }
1504 break;
1505 case kS390_Mod32:
1506 ASSEMBLE_MODULO(dr, srda);
1507 break;
1508 case kS390_ModU32:
1509 ASSEMBLE_MODULO(dlr, srdl);
1510 break;
1511 #if V8_TARGET_ARCH_S390X
1512 case kS390_Mod64:
1513 __ LoadRR(r1, i.InputRegister(0));
1514 __ dsgr(r0, i.InputRegister(1)); // R1: Dividend
1515 __ ltgr(i.OutputRegister(), r0); // Copy R0: Remainder to output
1516 break;
1517 case kS390_ModU64:
1518 __ LoadRR(r1, i.InputRegister(0));
1519 __ LoadImmP(r0, Operand::Zero());
1520 __ dlgr(r0, i.InputRegister(1)); // R0:R1: Dividend
1521 __ ltgr(i.OutputRegister(), r0); // Copy R0: Remainder to output
1522 break;
1523 #endif
1524 case kS390_AbsFloat:
1525 __ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1526 break;
1527 case kS390_SqrtFloat:
1528 ASSEMBLE_FLOAT_UNOP(sqebr);
1529 break;
1530 case kS390_FloorFloat:
1531 __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1532 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
1533 break;
1534 case kS390_CeilFloat:
1535 __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1536 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
1537 break;
1538 case kS390_TruncateFloat:
1539 __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1540 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
1541 break;
1542 // Double operations
1543 case kS390_ModDouble:
1544 ASSEMBLE_FLOAT_MODULO();
1545 break;
1546 case kIeee754Float64Acos:
1547 ASSEMBLE_IEEE754_UNOP(acos);
1548 break;
1549 case kIeee754Float64Acosh:
1550 ASSEMBLE_IEEE754_UNOP(acosh);
1551 break;
1552 case kIeee754Float64Asin:
1553 ASSEMBLE_IEEE754_UNOP(asin);
1554 break;
1555 case kIeee754Float64Asinh:
1556 ASSEMBLE_IEEE754_UNOP(asinh);
1557 break;
1558 case kIeee754Float64Atanh:
1559 ASSEMBLE_IEEE754_UNOP(atanh);
1560 break;
1561 case kIeee754Float64Atan:
1562 ASSEMBLE_IEEE754_UNOP(atan);
1563 break;
1564 case kIeee754Float64Atan2:
1565 ASSEMBLE_IEEE754_BINOP(atan2);
1566 break;
1567 case kIeee754Float64Tan:
1568 ASSEMBLE_IEEE754_UNOP(tan);
1569 break;
1570 case kIeee754Float64Tanh:
1571 ASSEMBLE_IEEE754_UNOP(tanh);
1572 break;
1573 case kIeee754Float64Cbrt:
1574 ASSEMBLE_IEEE754_UNOP(cbrt);
1575 break;
1576 case kIeee754Float64Sin:
1577 ASSEMBLE_IEEE754_UNOP(sin);
1578 break;
1579 case kIeee754Float64Sinh:
1580 ASSEMBLE_IEEE754_UNOP(sinh);
1581 break;
1582 case kIeee754Float64Cos:
1583 ASSEMBLE_IEEE754_UNOP(cos);
1584 break;
1585 case kIeee754Float64Cosh:
1586 ASSEMBLE_IEEE754_UNOP(cosh);
1587 break;
1588 case kIeee754Float64Exp:
1589 ASSEMBLE_IEEE754_UNOP(exp);
1590 break;
1591 case kIeee754Float64Expm1:
1592 ASSEMBLE_IEEE754_UNOP(expm1);
1593 break;
1594 case kIeee754Float64Log:
1595 ASSEMBLE_IEEE754_UNOP(log);
1596 break;
1597 case kIeee754Float64Log1p:
1598 ASSEMBLE_IEEE754_UNOP(log1p);
1599 break;
1600 case kIeee754Float64Log2:
1601 ASSEMBLE_IEEE754_UNOP(log2);
1602 break;
1603 case kIeee754Float64Log10:
1604 ASSEMBLE_IEEE754_UNOP(log10);
1605 break;
1606 case kIeee754Float64Pow: {
1607 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
1608 __ CallStub(&stub);
1609 __ Move(d1, d3);
1610 break;
1611 }
1612 case kS390_Neg32:
1613 __ lcr(i.OutputRegister(), i.InputRegister(0));
1614 __ LoadW(i.OutputRegister(), i.OutputRegister());
1615 break;
1616 case kS390_Neg64:
1617 __ lcgr(i.OutputRegister(), i.InputRegister(0));
1618 break;
1619 case kS390_MaxFloat:
1620 ASSEMBLE_FLOAT_MAX();
1621 break;
1622 case kS390_MaxDouble:
1623 ASSEMBLE_DOUBLE_MAX();
1624 break;
1625 case kS390_MinFloat:
1626 ASSEMBLE_FLOAT_MIN();
1627 break;
1628 case kS390_MinDouble:
1629 ASSEMBLE_DOUBLE_MIN();
1630 break;
1631 case kS390_AbsDouble:
1632 __ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1633 break;
1634 case kS390_SqrtDouble:
1635 ASSEMBLE_FLOAT_UNOP(sqdbr);
1636 break;
1637 case kS390_FloorDouble:
1638 __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1639 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
1640 break;
1641 case kS390_CeilDouble:
1642 __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1643 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
1644 break;
1645 case kS390_TruncateDouble:
1646 __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1647 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
1648 break;
1649 case kS390_RoundDouble:
1650 __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1651 v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0);
1652 break;
1653 case kS390_NegFloat:
1654 ASSEMBLE_FLOAT_UNOP(lcebr);
1655 break;
1656 case kS390_NegDouble:
1657 ASSEMBLE_FLOAT_UNOP(lcdbr);
1658 break;
1659 case kS390_Cntlz32: {
1660 __ llgfr(i.OutputRegister(), i.InputRegister(0));
1661 __ flogr(r0, i.OutputRegister());
1662 __ LoadRR(i.OutputRegister(), r0);
1663 __ SubP(i.OutputRegister(), Operand(32));
1664 } break;
1665 #if V8_TARGET_ARCH_S390X
1666 case kS390_Cntlz64: {
1667 __ flogr(r0, i.InputRegister(0));
1668 __ LoadRR(i.OutputRegister(), r0);
1669 } break;
1670 #endif
1671 case kS390_Popcnt32:
1672 __ Popcnt32(i.OutputRegister(), i.InputRegister(0));
1673 break;
1674 #if V8_TARGET_ARCH_S390X
1675 case kS390_Popcnt64:
1676 __ Popcnt64(i.OutputRegister(), i.InputRegister(0));
1677 break;
1678 #endif
1679 case kS390_Cmp32:
1680 ASSEMBLE_COMPARE(Cmp32, CmpLogical32);
1681 break;
1682 #if V8_TARGET_ARCH_S390X
1683 case kS390_Cmp64:
1684 ASSEMBLE_COMPARE(CmpP, CmpLogicalP);
1685 break;
1686 #endif
1687 case kS390_CmpFloat:
1688 __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1689 break;
1690 case kS390_CmpDouble:
1691 __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1692 break;
1693 case kS390_Tst32:
1694 if (HasRegisterInput(instr, 1)) {
1695 __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
1696 } else {
1697 __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
1698 }
1699 __ LoadAndTestP_ExtendSrc(r0, r0);
1700 break;
1701 #if V8_TARGET_ARCH_S390X
1702 case kS390_Tst64:
1703 if (HasRegisterInput(instr, 1)) {
1704 __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
1705 } else {
1706 __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
1707 }
1708 break;
1709 #endif
1710 case kS390_Float64SilenceNaN: {
1711 DoubleRegister value = i.InputDoubleRegister(0);
1712 DoubleRegister result = i.OutputDoubleRegister();
1713 __ CanonicalizeNaN(result, value);
1714 break;
1715 }
1716 case kS390_Push:
1717 if (instr->InputAt(0)->IsFPRegister()) {
1718 __ lay(sp, MemOperand(sp, -kDoubleSize));
1719 __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
1720 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1721 } else {
1722 __ Push(i.InputRegister(0));
1723 frame_access_state()->IncreaseSPDelta(1);
1724 }
1725 break;
1726 case kS390_PushFrame: {
1727 int num_slots = i.InputInt32(1);
1728 __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
1729 if (instr->InputAt(0)->IsFPRegister()) {
1730 LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1731 if (op->representation() == MachineRepresentation::kFloat64) {
1732 __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
1733 } else {
1734 DCHECK(op->representation() == MachineRepresentation::kFloat32);
1735 __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
1736 }
1737 } else {
1738 __ StoreP(i.InputRegister(0),
1739 MemOperand(sp));
1740 }
1741 break;
1742 }
1743 case kS390_StoreToStackSlot: {
1744 int slot = i.InputInt32(1);
1745 if (instr->InputAt(0)->IsFPRegister()) {
1746 LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1747 if (op->representation() == MachineRepresentation::kFloat64) {
1748 __ StoreDouble(i.InputDoubleRegister(0),
1749 MemOperand(sp, slot * kPointerSize));
1750 } else {
1751 DCHECK(op->representation() == MachineRepresentation::kFloat32);
1752 __ StoreFloat32(i.InputDoubleRegister(0),
1753 MemOperand(sp, slot * kPointerSize));
1754 }
1755 } else {
1756 __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
1757 }
1758 break;
1759 }
1760 case kS390_ExtendSignWord8:
1761 #if V8_TARGET_ARCH_S390X
1762 __ lgbr(i.OutputRegister(), i.InputRegister(0));
1763 #else
1764 __ lbr(i.OutputRegister(), i.InputRegister(0));
1765 #endif
1766 break;
1767 case kS390_ExtendSignWord16:
1768 #if V8_TARGET_ARCH_S390X
1769 __ lghr(i.OutputRegister(), i.InputRegister(0));
1770 #else
1771 __ lhr(i.OutputRegister(), i.InputRegister(0));
1772 #endif
1773 break;
1774 #if V8_TARGET_ARCH_S390X
1775 case kS390_ExtendSignWord32:
1776 __ lgfr(i.OutputRegister(), i.InputRegister(0));
1777 break;
1778 case kS390_Uint32ToUint64:
1779 // Zero extend
1780 __ llgfr(i.OutputRegister(), i.InputRegister(0));
1781 break;
1782 case kS390_Int64ToInt32:
1783 // sign extend
1784 __ lgfr(i.OutputRegister(), i.InputRegister(0));
1785 break;
1786 case kS390_Int64ToFloat32:
1787 __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
1788 break;
1789 case kS390_Int64ToDouble:
1790 __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
1791 break;
1792 case kS390_Uint64ToFloat32:
1793 __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
1794 i.OutputDoubleRegister());
1795 break;
1796 case kS390_Uint64ToDouble:
1797 __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
1798 i.OutputDoubleRegister());
1799 break;
1800 #endif
1801 case kS390_Int32ToFloat32:
1802 __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
1803 break;
1804 case kS390_Int32ToDouble:
1805 __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
1806 break;
1807 case kS390_Uint32ToFloat32:
1808 __ ConvertUnsignedIntToFloat(i.InputRegister(0),
1809 i.OutputDoubleRegister());
1810 break;
1811 case kS390_Uint32ToDouble:
1812 __ ConvertUnsignedIntToDouble(i.InputRegister(0),
1813 i.OutputDoubleRegister());
1814 break;
1815 case kS390_DoubleToInt32:
1816 case kS390_DoubleToUint32:
1817 case kS390_DoubleToInt64: {
1818 #if V8_TARGET_ARCH_S390X
1819 bool check_conversion =
1820 (opcode == kS390_DoubleToInt64 && i.OutputCount() > 1);
1821 #endif
1822 __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
1823 #if !V8_TARGET_ARCH_S390X
1824 kScratchReg,
1825 #endif
1826 i.OutputRegister(0), kScratchDoubleReg);
1827 #if V8_TARGET_ARCH_S390X
1828 if (check_conversion) {
1829 Label conversion_done;
1830 __ LoadImmP(i.OutputRegister(1), Operand::Zero());
1831 __ b(Condition(1), &conversion_done); // special case
1832 __ LoadImmP(i.OutputRegister(1), Operand(1));
1833 __ bind(&conversion_done);
1834 }
1835 #endif
1836 break;
1837 }
1838 case kS390_Float32ToInt32: {
1839 bool check_conversion = (i.OutputCount() > 1);
1840 __ ConvertFloat32ToInt32(i.InputDoubleRegister(0), i.OutputRegister(0),
1841 kScratchDoubleReg, kRoundToZero);
1842 if (check_conversion) {
1843 Label conversion_done;
1844 __ LoadImmP(i.OutputRegister(1), Operand::Zero());
1845 __ b(Condition(1), &conversion_done); // special case
1846 __ LoadImmP(i.OutputRegister(1), Operand(1));
1847 __ bind(&conversion_done);
1848 }
1849 break;
1850 }
1851 case kS390_Float32ToUint32: {
1852 bool check_conversion = (i.OutputCount() > 1);
1853 __ ConvertFloat32ToUnsignedInt32(i.InputDoubleRegister(0),
1854 i.OutputRegister(0), kScratchDoubleReg);
1855 if (check_conversion) {
1856 Label conversion_done;
1857 __ LoadImmP(i.OutputRegister(1), Operand::Zero());
1858 __ b(Condition(1), &conversion_done); // special case
1859 __ LoadImmP(i.OutputRegister(1), Operand(1));
1860 __ bind(&conversion_done);
1861 }
1862 break;
1863 }
1864 #if V8_TARGET_ARCH_S390X
1865 case kS390_Float32ToUint64: {
1866 bool check_conversion = (i.OutputCount() > 1);
1867 __ ConvertFloat32ToUnsignedInt64(i.InputDoubleRegister(0),
1868 i.OutputRegister(0), kScratchDoubleReg);
1869 if (check_conversion) {
1870 Label conversion_done;
1871 __ LoadImmP(i.OutputRegister(1), Operand::Zero());
1872 __ b(Condition(1), &conversion_done); // special case
1873 __ LoadImmP(i.OutputRegister(1), Operand(1));
1874 __ bind(&conversion_done);
1875 }
1876 break;
1877 }
1878 #endif
1879 case kS390_Float32ToInt64: {
1880 #if V8_TARGET_ARCH_S390X
1881 bool check_conversion =
1882 (opcode == kS390_Float32ToInt64 && i.OutputCount() > 1);
1883 #endif
1884 __ ConvertFloat32ToInt64(i.InputDoubleRegister(0),
1885 #if !V8_TARGET_ARCH_S390X
1886 kScratchReg,
1887 #endif
1888 i.OutputRegister(0), kScratchDoubleReg);
1889 #if V8_TARGET_ARCH_S390X
1890 if (check_conversion) {
1891 Label conversion_done;
1892 __ LoadImmP(i.OutputRegister(1), Operand::Zero());
1893 __ b(Condition(1), &conversion_done); // special case
1894 __ LoadImmP(i.OutputRegister(1), Operand(1));
1895 __ bind(&conversion_done);
1896 }
1897 #endif
1898 break;
1899 }
1900 #if V8_TARGET_ARCH_S390X
1901 case kS390_DoubleToUint64: {
1902 bool check_conversion = (i.OutputCount() > 1);
1903 __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
1904 i.OutputRegister(0), kScratchDoubleReg);
1905 if (check_conversion) {
1906 Label conversion_done;
1907 __ LoadImmP(i.OutputRegister(1), Operand::Zero());
1908 __ b(Condition(1), &conversion_done); // special case
1909 __ LoadImmP(i.OutputRegister(1), Operand(1));
1910 __ bind(&conversion_done);
1911 }
1912 break;
1913 }
1914 #endif
1915 case kS390_DoubleToFloat32:
1916 __ ledbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1917 break;
1918 case kS390_Float32ToDouble:
1919 __ ldebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1920 break;
1921 case kS390_DoubleExtractLowWord32:
1922 __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
1923 __ llgfr(i.OutputRegister(), i.OutputRegister());
1924 break;
1925 case kS390_DoubleExtractHighWord32:
1926 __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
1927 __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(32));
1928 break;
1929 case kS390_DoubleInsertLowWord32:
1930 __ lgdr(kScratchReg, i.OutputDoubleRegister());
1931 __ lr(kScratchReg, i.InputRegister(1));
1932 __ ldgr(i.OutputDoubleRegister(), kScratchReg);
1933 break;
1934 case kS390_DoubleInsertHighWord32:
1935 __ sllg(kScratchReg, i.InputRegister(1), Operand(32));
1936 __ lgdr(r0, i.OutputDoubleRegister());
1937 __ lr(kScratchReg, r0);
1938 __ ldgr(i.OutputDoubleRegister(), kScratchReg);
1939 break;
1940 case kS390_DoubleConstruct:
1941 __ sllg(kScratchReg, i.InputRegister(0), Operand(32));
1942 __ lr(kScratchReg, i.InputRegister(1));
1943
1944 // Bitwise convert from GPR to FPR
1945 __ ldgr(i.OutputDoubleRegister(), kScratchReg);
1946 break;
1947 case kS390_LoadWordS8:
1948 ASSEMBLE_LOAD_INTEGER(LoadlB);
1949 #if V8_TARGET_ARCH_S390X
1950 __ lgbr(i.OutputRegister(), i.OutputRegister());
1951 #else
1952 __ lbr(i.OutputRegister(), i.OutputRegister());
1953 #endif
1954 break;
1955 case kS390_BitcastFloat32ToInt32:
1956 __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
1957 break;
1958 case kS390_BitcastInt32ToFloat32:
1959 __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
1960 break;
1961 #if V8_TARGET_ARCH_S390X
1962 case kS390_BitcastDoubleToInt64:
1963 __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
1964 break;
1965 case kS390_BitcastInt64ToDouble:
1966 __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
1967 break;
1968 #endif
1969 case kS390_LoadWordU8:
1970 ASSEMBLE_LOAD_INTEGER(LoadlB);
1971 break;
1972 case kS390_LoadWordU16:
1973 ASSEMBLE_LOAD_INTEGER(LoadLogicalHalfWordP);
1974 break;
1975 case kS390_LoadWordS16:
1976 ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
1977 break;
1978 case kS390_LoadWordU32:
1979 ASSEMBLE_LOAD_INTEGER(LoadlW);
1980 break;
1981 case kS390_LoadWordS32:
1982 ASSEMBLE_LOAD_INTEGER(LoadW);
1983 break;
1984 case kS390_LoadReverse16:
1985 ASSEMBLE_LOAD_INTEGER(lrvh);
1986 break;
1987 case kS390_LoadReverse32:
1988 ASSEMBLE_LOAD_INTEGER(lrv);
1989 break;
1990 case kS390_LoadReverse64:
1991 ASSEMBLE_LOAD_INTEGER(lrvg);
1992 break;
1993 case kS390_LoadReverse16RR:
1994 __ lrvr(i.OutputRegister(), i.InputRegister(0));
1995 __ rll(i.OutputRegister(), i.OutputRegister(), Operand(16));
1996 break;
1997 case kS390_LoadReverse32RR:
1998 __ lrvr(i.OutputRegister(), i.InputRegister(0));
1999 break;
2000 case kS390_LoadReverse64RR:
2001 __ lrvgr(i.OutputRegister(), i.InputRegister(0));
2002 break;
2003 #if V8_TARGET_ARCH_S390X
2004 case kS390_LoadWord64:
2005 ASSEMBLE_LOAD_INTEGER(lg);
2006 break;
2007 #endif
2008 case kS390_LoadFloat32:
2009 ASSEMBLE_LOAD_FLOAT(LoadFloat32);
2010 break;
2011 case kS390_LoadDouble:
2012 ASSEMBLE_LOAD_FLOAT(LoadDouble);
2013 break;
2014 case kS390_StoreWord8:
2015 ASSEMBLE_STORE_INTEGER(StoreByte);
2016 break;
2017 case kS390_StoreWord16:
2018 ASSEMBLE_STORE_INTEGER(StoreHalfWord);
2019 break;
2020 case kS390_StoreWord32:
2021 ASSEMBLE_STORE_INTEGER(StoreW);
2022 break;
2023 #if V8_TARGET_ARCH_S390X
2024 case kS390_StoreWord64:
2025 ASSEMBLE_STORE_INTEGER(StoreP);
2026 break;
2027 #endif
2028 case kS390_StoreReverse16:
2029 ASSEMBLE_STORE_INTEGER(strvh);
2030 break;
2031 case kS390_StoreReverse32:
2032 ASSEMBLE_STORE_INTEGER(strv);
2033 break;
2034 case kS390_StoreReverse64:
2035 ASSEMBLE_STORE_INTEGER(strvg);
2036 break;
2037 case kS390_StoreFloat32:
2038 ASSEMBLE_STORE_FLOAT32();
2039 break;
2040 case kS390_StoreDouble:
2041 ASSEMBLE_STORE_DOUBLE();
2042 break;
2043 case kCheckedLoadInt8:
2044 ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
2045 #if V8_TARGET_ARCH_S390X
2046 __ lgbr(i.OutputRegister(), i.OutputRegister());
2047 #else
2048 __ lbr(i.OutputRegister(), i.OutputRegister());
2049 #endif
2050 break;
2051 case kCheckedLoadUint8:
2052 ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
2053 break;
2054 case kCheckedLoadInt16:
2055 ASSEMBLE_CHECKED_LOAD_INTEGER(LoadHalfWordP);
2056 break;
2057 case kCheckedLoadUint16:
2058 ASSEMBLE_CHECKED_LOAD_INTEGER(LoadLogicalHalfWordP);
2059 break;
2060 case kCheckedLoadWord32:
2061 ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlW);
2062 break;
2063 case kCheckedLoadWord64:
2064 #if V8_TARGET_ARCH_S390X
2065 ASSEMBLE_CHECKED_LOAD_INTEGER(LoadP);
2066 #else
2067 UNREACHABLE();
2068 #endif
2069 break;
2070 case kCheckedLoadFloat32:
2071 ASSEMBLE_CHECKED_LOAD_FLOAT(LoadFloat32, 32);
2072 break;
2073 case kCheckedLoadFloat64:
2074 ASSEMBLE_CHECKED_LOAD_FLOAT(LoadDouble, 64);
2075 break;
2076 case kCheckedStoreWord8:
2077 ASSEMBLE_CHECKED_STORE_INTEGER(StoreByte);
2078 break;
2079 case kCheckedStoreWord16:
2080 ASSEMBLE_CHECKED_STORE_INTEGER(StoreHalfWord);
2081 break;
2082 case kCheckedStoreWord32:
2083 ASSEMBLE_CHECKED_STORE_INTEGER(StoreW);
2084 break;
2085 case kCheckedStoreWord64:
2086 #if V8_TARGET_ARCH_S390X
2087 ASSEMBLE_CHECKED_STORE_INTEGER(StoreP);
2088 #else
2089 UNREACHABLE();
2090 #endif
2091 break;
2092 case kCheckedStoreFloat32:
2093 ASSEMBLE_CHECKED_STORE_FLOAT32();
2094 break;
2095 case kCheckedStoreFloat64:
2096 ASSEMBLE_CHECKED_STORE_DOUBLE();
2097 break;
2098 case kAtomicLoadInt8:
2099 __ LoadB(i.OutputRegister(), i.MemoryOperand());
2100 break;
2101 case kAtomicLoadUint8:
2102 __ LoadlB(i.OutputRegister(), i.MemoryOperand());
2103 break;
2104 case kAtomicLoadInt16:
2105 __ LoadHalfWordP(i.OutputRegister(), i.MemoryOperand());
2106 break;
2107 case kAtomicLoadUint16:
2108 __ LoadLogicalHalfWordP(i.OutputRegister(), i.MemoryOperand());
2109 break;
2110 case kAtomicLoadWord32:
2111 __ LoadlW(i.OutputRegister(), i.MemoryOperand());
2112 break;
2113 case kAtomicStoreWord8:
2114 __ StoreByte(i.InputRegister(0), i.MemoryOperand(NULL, 1));
2115 break;
2116 case kAtomicStoreWord16:
2117 __ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(NULL, 1));
2118 break;
2119 case kAtomicStoreWord32:
2120 __ StoreW(i.InputRegister(0), i.MemoryOperand(NULL, 1));
2121 break;
2122 default:
2123 UNREACHABLE();
2124 break;
2125 }
2126 return kSuccess;
2127 } // NOLINT(readability/fn_size)
2128
2129 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)2130 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
2131 S390OperandConverter i(this, instr);
2132 Label* tlabel = branch->true_label;
2133 Label* flabel = branch->false_label;
2134 ArchOpcode op = instr->arch_opcode();
2135 FlagsCondition condition = branch->condition;
2136
2137 Condition cond = FlagsConditionToCondition(condition, op);
2138 if (op == kS390_CmpDouble) {
2139 // check for unordered if necessary
2140 // Branching to flabel/tlabel according to what's expected by tests
2141 if (cond == le || cond == eq || cond == lt) {
2142 __ bunordered(flabel);
2143 } else if (cond == gt || cond == ne || cond == ge) {
2144 __ bunordered(tlabel);
2145 }
2146 }
2147 __ b(cond, tlabel);
2148 if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
2149 }
2150
AssembleArchJump(RpoNumber target)2151 void CodeGenerator::AssembleArchJump(RpoNumber target) {
2152 if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
2153 }
2154
2155 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)2156 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
2157 FlagsCondition condition) {
2158 S390OperandConverter i(this, instr);
2159 ArchOpcode op = instr->arch_opcode();
2160 bool check_unordered = (op == kS390_CmpDouble || op == kS390_CmpFloat);
2161
2162 // Overflow checked for add/sub only.
2163 DCHECK((condition != kOverflow && condition != kNotOverflow) ||
2164 (op == kS390_Add32 || kS390_Add64 || op == kS390_Sub32 ||
2165 op == kS390_Sub64));
2166
2167 // Materialize a full 32-bit 1 or 0 value. The result register is always the
2168 // last output of the instruction.
2169 DCHECK_NE(0u, instr->OutputCount());
2170 Register reg = i.OutputRegister(instr->OutputCount() - 1);
2171 Condition cond = FlagsConditionToCondition(condition, op);
2172 Label done;
2173 if (check_unordered) {
2174 __ LoadImmP(reg, (cond == eq || cond == le || cond == lt) ? Operand::Zero()
2175 : Operand(1));
2176 __ bunordered(&done);
2177 }
2178 __ LoadImmP(reg, Operand::Zero());
2179 __ LoadImmP(kScratchReg, Operand(1));
2180 // locr is sufficient since reg's upper 32 is guarrantee to be 0
2181 __ locr(cond, reg, kScratchReg);
2182 __ bind(&done);
2183 }
2184
AssembleArchLookupSwitch(Instruction * instr)2185 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
2186 S390OperandConverter i(this, instr);
2187 Register input = i.InputRegister(0);
2188 for (size_t index = 2; index < instr->InputCount(); index += 2) {
2189 __ Cmp32(input, Operand(i.InputInt32(index + 0)));
2190 __ beq(GetLabel(i.InputRpo(index + 1)));
2191 }
2192 AssembleArchJump(i.InputRpo(1));
2193 }
2194
AssembleArchTableSwitch(Instruction * instr)2195 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
2196 S390OperandConverter i(this, instr);
2197 Register input = i.InputRegister(0);
2198 int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
2199 Label** cases = zone()->NewArray<Label*>(case_count);
2200 for (int32_t index = 0; index < case_count; ++index) {
2201 cases[index] = GetLabel(i.InputRpo(index + 2));
2202 }
2203 Label* const table = AddJumpTable(cases, case_count);
2204 __ CmpLogicalP(input, Operand(case_count));
2205 __ bge(GetLabel(i.InputRpo(1)));
2206 __ larl(kScratchReg, table);
2207 __ ShiftLeftP(r1, input, Operand(kPointerSizeLog2));
2208 __ LoadP(kScratchReg, MemOperand(kScratchReg, r1));
2209 __ Jump(kScratchReg);
2210 }
2211
AssembleDeoptimizerCall(int deoptimization_id,Deoptimizer::BailoutType bailout_type,SourcePosition pos)2212 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
2213 int deoptimization_id, Deoptimizer::BailoutType bailout_type,
2214 SourcePosition pos) {
2215 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
2216 isolate(), deoptimization_id, bailout_type);
2217 // TODO(turbofan): We should be able to generate better code by sharing the
2218 // actual final call site and just bl'ing to it here, similar to what we do
2219 // in the lithium backend.
2220 if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
2221 DeoptimizeReason deoptimization_reason =
2222 GetDeoptimizationReason(deoptimization_id);
2223 __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
2224 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
2225 return kSuccess;
2226 }
2227
FinishFrame(Frame * frame)2228 void CodeGenerator::FinishFrame(Frame* frame) {
2229 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2230 const RegList double_saves = descriptor->CalleeSavedFPRegisters();
2231
2232 // Save callee-saved Double registers.
2233 if (double_saves != 0) {
2234 frame->AlignSavedCalleeRegisterSlots();
2235 DCHECK(kNumCalleeSavedDoubles ==
2236 base::bits::CountPopulation32(double_saves));
2237 frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
2238 (kDoubleSize / kPointerSize));
2239 }
2240 // Save callee-saved registers.
2241 const RegList saves = descriptor->CalleeSavedRegisters();
2242 if (saves != 0) {
2243 // register save area does not include the fp or constant pool pointer.
2244 const int num_saves = kNumCalleeSaved - 1;
2245 DCHECK(num_saves == base::bits::CountPopulation32(saves));
2246 frame->AllocateSavedCalleeRegisterSlots(num_saves);
2247 }
2248 }
2249
AssembleConstructFrame()2250 void CodeGenerator::AssembleConstructFrame() {
2251 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2252
2253 if (frame_access_state()->has_frame()) {
2254 if (descriptor->IsCFunctionCall()) {
2255 __ Push(r14, fp);
2256 __ LoadRR(fp, sp);
2257 } else if (descriptor->IsJSFunctionCall()) {
2258 __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
2259 if (descriptor->PushArgumentCount()) {
2260 __ Push(kJavaScriptCallArgCountRegister);
2261 }
2262 } else {
2263 StackFrame::Type type = info()->GetOutputStackFrameType();
2264 // TODO(mbrandy): Detect cases where ip is the entrypoint (for
2265 // efficient intialization of the constant pool pointer register).
2266 __ StubPrologue(type);
2267 }
2268 }
2269
2270 int shrink_slots =
2271 frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
2272 if (info()->is_osr()) {
2273 // TurboFan OSR-compiled functions cannot be entered directly.
2274 __ Abort(kShouldNotDirectlyEnterOsrFunction);
2275
2276 // Unoptimized code jumps directly to this entrypoint while the unoptimized
2277 // frame is still on the stack. Optimized code uses OSR values directly from
2278 // the unoptimized frame. Thus, all that needs to be done is to allocate the
2279 // remaining stack slots.
2280 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
2281 osr_pc_offset_ = __ pc_offset();
2282 shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
2283 }
2284
2285 const RegList double_saves = descriptor->CalleeSavedFPRegisters();
2286 if (shrink_slots > 0) {
2287 __ lay(sp, MemOperand(sp, -shrink_slots * kPointerSize));
2288 }
2289
2290 // Save callee-saved Double registers.
2291 if (double_saves != 0) {
2292 __ MultiPushDoubles(double_saves);
2293 DCHECK(kNumCalleeSavedDoubles ==
2294 base::bits::CountPopulation32(double_saves));
2295 }
2296
2297 // Save callee-saved registers.
2298 const RegList saves = descriptor->CalleeSavedRegisters();
2299 if (saves != 0) {
2300 __ MultiPush(saves);
2301 // register save area does not include the fp or constant pool pointer.
2302 }
2303 }
2304
AssembleReturn(InstructionOperand * pop)2305 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
2306 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2307 int pop_count = static_cast<int>(descriptor->StackParameterCount());
2308
2309 // Restore registers.
2310 const RegList saves = descriptor->CalleeSavedRegisters();
2311 if (saves != 0) {
2312 __ MultiPop(saves);
2313 }
2314
2315 // Restore double registers.
2316 const RegList double_saves = descriptor->CalleeSavedFPRegisters();
2317 if (double_saves != 0) {
2318 __ MultiPopDoubles(double_saves);
2319 }
2320
2321 S390OperandConverter g(this, nullptr);
2322 if (descriptor->IsCFunctionCall()) {
2323 AssembleDeconstructFrame();
2324 } else if (frame_access_state()->has_frame()) {
2325 // Canonicalize JSFunction return sites for now unless they have an variable
2326 // number of stack slot pops
2327 if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
2328 if (return_label_.is_bound()) {
2329 __ b(&return_label_);
2330 return;
2331 } else {
2332 __ bind(&return_label_);
2333 AssembleDeconstructFrame();
2334 }
2335 } else {
2336 AssembleDeconstructFrame();
2337 }
2338 }
2339 if (pop->IsImmediate()) {
2340 DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
2341 pop_count += g.ToConstant(pop).ToInt32();
2342 } else {
2343 __ Drop(g.ToRegister(pop));
2344 }
2345 __ Drop(pop_count);
2346 __ Ret();
2347 }
2348
AssembleMove(InstructionOperand * source,InstructionOperand * destination)2349 void CodeGenerator::AssembleMove(InstructionOperand* source,
2350 InstructionOperand* destination) {
2351 S390OperandConverter g(this, nullptr);
2352 // Dispatch on the source and destination operand kinds. Not all
2353 // combinations are possible.
2354 if (source->IsRegister()) {
2355 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2356 Register src = g.ToRegister(source);
2357 if (destination->IsRegister()) {
2358 __ Move(g.ToRegister(destination), src);
2359 } else {
2360 __ StoreP(src, g.ToMemOperand(destination));
2361 }
2362 } else if (source->IsStackSlot()) {
2363 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2364 MemOperand src = g.ToMemOperand(source);
2365 if (destination->IsRegister()) {
2366 __ LoadP(g.ToRegister(destination), src);
2367 } else {
2368 Register temp = kScratchReg;
2369 __ LoadP(temp, src, r0);
2370 __ StoreP(temp, g.ToMemOperand(destination));
2371 }
2372 } else if (source->IsConstant()) {
2373 Constant src = g.ToConstant(source);
2374 if (destination->IsRegister() || destination->IsStackSlot()) {
2375 Register dst =
2376 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
2377 switch (src.type()) {
2378 case Constant::kInt32:
2379 #if V8_TARGET_ARCH_S390X
2380 if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
2381 #else
2382 if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
2383 src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
2384 src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
2385 #endif
2386 __ mov(dst, Operand(src.ToInt32(), src.rmode()));
2387 } else {
2388 __ mov(dst, Operand(src.ToInt32()));
2389 }
2390 break;
2391 case Constant::kInt64:
2392 #if V8_TARGET_ARCH_S390X
2393 if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
2394 src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
2395 __ mov(dst, Operand(src.ToInt64(), src.rmode()));
2396 } else {
2397 DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
2398 __ mov(dst, Operand(src.ToInt64()));
2399 }
2400 #else
2401 __ mov(dst, Operand(src.ToInt64()));
2402 #endif // V8_TARGET_ARCH_S390X
2403 break;
2404 case Constant::kFloat32:
2405 __ Move(dst,
2406 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
2407 break;
2408 case Constant::kFloat64:
2409 __ Move(dst,
2410 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
2411 break;
2412 case Constant::kExternalReference:
2413 __ mov(dst, Operand(src.ToExternalReference()));
2414 break;
2415 case Constant::kHeapObject: {
2416 Handle<HeapObject> src_object = src.ToHeapObject();
2417 Heap::RootListIndex index;
2418 if (IsMaterializableFromRoot(src_object, &index)) {
2419 __ LoadRoot(dst, index);
2420 } else {
2421 __ Move(dst, src_object);
2422 }
2423 break;
2424 }
2425 case Constant::kRpoNumber:
2426 UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
2427 break;
2428 }
2429 if (destination->IsStackSlot()) {
2430 __ StoreP(dst, g.ToMemOperand(destination), r0);
2431 }
2432 } else {
2433 DoubleRegister dst = destination->IsFPRegister()
2434 ? g.ToDoubleRegister(destination)
2435 : kScratchDoubleReg;
2436 double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
2437 : src.ToFloat64();
2438 if (src.type() == Constant::kFloat32) {
2439 __ LoadFloat32Literal(dst, src.ToFloat32(), kScratchReg);
2440 } else {
2441 __ LoadDoubleLiteral(dst, value, kScratchReg);
2442 }
2443
2444 if (destination->IsFPStackSlot()) {
2445 __ StoreDouble(dst, g.ToMemOperand(destination));
2446 }
2447 }
2448 } else if (source->IsFPRegister()) {
2449 DoubleRegister src = g.ToDoubleRegister(source);
2450 if (destination->IsFPRegister()) {
2451 DoubleRegister dst = g.ToDoubleRegister(destination);
2452 __ Move(dst, src);
2453 } else {
2454 DCHECK(destination->IsFPStackSlot());
2455 LocationOperand* op = LocationOperand::cast(source);
2456 if (op->representation() == MachineRepresentation::kFloat64) {
2457 __ StoreDouble(src, g.ToMemOperand(destination));
2458 } else {
2459 __ StoreFloat32(src, g.ToMemOperand(destination));
2460 }
2461 }
2462 } else if (source->IsFPStackSlot()) {
2463 DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
2464 MemOperand src = g.ToMemOperand(source);
2465 if (destination->IsFPRegister()) {
2466 LocationOperand* op = LocationOperand::cast(source);
2467 if (op->representation() == MachineRepresentation::kFloat64) {
2468 __ LoadDouble(g.ToDoubleRegister(destination), src);
2469 } else {
2470 __ LoadFloat32(g.ToDoubleRegister(destination), src);
2471 }
2472 } else {
2473 LocationOperand* op = LocationOperand::cast(source);
2474 DoubleRegister temp = kScratchDoubleReg;
2475 if (op->representation() == MachineRepresentation::kFloat64) {
2476 __ LoadDouble(temp, src);
2477 __ StoreDouble(temp, g.ToMemOperand(destination));
2478 } else {
2479 __ LoadFloat32(temp, src);
2480 __ StoreFloat32(temp, g.ToMemOperand(destination));
2481 }
2482 }
2483 } else {
2484 UNREACHABLE();
2485 }
2486 }
2487
2488 void CodeGenerator::AssembleSwap(InstructionOperand* source,
2489 InstructionOperand* destination) {
2490 S390OperandConverter g(this, nullptr);
2491 // Dispatch on the source and destination operand kinds. Not all
2492 // combinations are possible.
2493 if (source->IsRegister()) {
2494 // Register-register.
2495 Register temp = kScratchReg;
2496 Register src = g.ToRegister(source);
2497 if (destination->IsRegister()) {
2498 Register dst = g.ToRegister(destination);
2499 __ LoadRR(temp, src);
2500 __ LoadRR(src, dst);
2501 __ LoadRR(dst, temp);
2502 } else {
2503 DCHECK(destination->IsStackSlot());
2504 MemOperand dst = g.ToMemOperand(destination);
2505 __ LoadRR(temp, src);
2506 __ LoadP(src, dst);
2507 __ StoreP(temp, dst);
2508 }
2509 #if V8_TARGET_ARCH_S390X
2510 } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
2511 #else
2512 } else if (source->IsStackSlot()) {
2513 DCHECK(destination->IsStackSlot());
2514 #endif
2515 Register temp_0 = kScratchReg;
2516 Register temp_1 = r0;
2517 MemOperand src = g.ToMemOperand(source);
2518 MemOperand dst = g.ToMemOperand(destination);
2519 __ LoadP(temp_0, src);
2520 __ LoadP(temp_1, dst);
2521 __ StoreP(temp_0, dst);
2522 __ StoreP(temp_1, src);
2523 } else if (source->IsFPRegister()) {
2524 DoubleRegister temp = kScratchDoubleReg;
2525 DoubleRegister src = g.ToDoubleRegister(source);
2526 if (destination->IsFPRegister()) {
2527 DoubleRegister dst = g.ToDoubleRegister(destination);
2528 __ ldr(temp, src);
2529 __ ldr(src, dst);
2530 __ ldr(dst, temp);
2531 } else {
2532 DCHECK(destination->IsFPStackSlot());
2533 MemOperand dst = g.ToMemOperand(destination);
2534 __ ldr(temp, src);
2535 __ LoadDouble(src, dst);
2536 __ StoreDouble(temp, dst);
2537 }
2538 #if !V8_TARGET_ARCH_S390X
2539 } else if (source->IsFPStackSlot()) {
2540 DCHECK(destination->IsFPStackSlot());
2541 DoubleRegister temp_0 = kScratchDoubleReg;
2542 DoubleRegister temp_1 = d0;
2543 MemOperand src = g.ToMemOperand(source);
2544 MemOperand dst = g.ToMemOperand(destination);
2545 // TODO(joransiu): MVC opportunity
2546 __ LoadDouble(temp_0, src);
2547 __ LoadDouble(temp_1, dst);
2548 __ StoreDouble(temp_0, dst);
2549 __ StoreDouble(temp_1, src);
2550 #endif
2551 } else {
2552 // No other combinations are possible.
2553 UNREACHABLE();
2554 }
2555 }
2556
2557 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2558 for (size_t index = 0; index < target_count; ++index) {
2559 __ emit_label_addr(targets[index]);
2560 }
2561 }
2562
2563 void CodeGenerator::EnsureSpaceForLazyDeopt() {
2564 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2565 return;
2566 }
2567
2568 int space_needed = Deoptimizer::patch_size();
2569 // Ensure that we have enough space after the previous lazy-bailout
2570 // instruction for patching the code here.
2571 int current_pc = masm()->pc_offset();
2572 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
2573 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2574 DCHECK_EQ(0, padding_size % 2);
2575 while (padding_size > 0) {
2576 __ nop();
2577 padding_size -= 2;
2578 }
2579 }
2580 }
2581
2582 #undef __
2583
2584 } // namespace compiler
2585 } // namespace internal
2586 } // namespace v8
2587