1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <assert.h> // For assert
6 #include <limits.h> // For LONG_MIN, LONG_MAX.
7
8 #if V8_TARGET_ARCH_S390
9
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/debug/debug.h"
15 #include "src/register-configuration.h"
16 #include "src/runtime/runtime.h"
17
18 #include "src/s390/macro-assembler-s390.h"
19
20 namespace v8 {
21 namespace internal {
22
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
24 CodeObjectRequired create_code_object)
25 : Assembler(arg_isolate, buffer, size),
26 generating_stub_(false),
27 has_frame_(false) {
28 if (create_code_object == CodeObjectRequired::kYes) {
29 code_object_ =
30 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
31 }
32 }
33
Jump(Register target)34 void MacroAssembler::Jump(Register target) { b(target); }
35
JumpToJSEntry(Register target)36 void MacroAssembler::JumpToJSEntry(Register target) {
37 Move(ip, target);
38 Jump(ip);
39 }
40
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,CRegister)41 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
42 Condition cond, CRegister) {
43 Label skip;
44
45 if (cond != al) b(NegateCondition(cond), &skip);
46
47 DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
48
49 mov(ip, Operand(target, rmode));
50 b(ip);
51
52 bind(&skip);
53 }
54
Jump(Address target,RelocInfo::Mode rmode,Condition cond,CRegister cr)55 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
56 CRegister cr) {
57 DCHECK(!RelocInfo::IsCodeTarget(rmode));
58 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
59 }
60
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)61 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
62 Condition cond) {
63 DCHECK(RelocInfo::IsCodeTarget(rmode));
64 jump(code, rmode, cond);
65 }
66
CallSize(Register target)67 int MacroAssembler::CallSize(Register target) { return 2; } // BASR
68
Call(Register target)69 void MacroAssembler::Call(Register target) {
70 Label start;
71 bind(&start);
72
73 // Branch to target via indirect branch
74 basr(r14, target);
75
76 DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
77 }
78
CallJSEntry(Register target)79 void MacroAssembler::CallJSEntry(Register target) {
80 DCHECK(target.is(ip));
81 Call(target);
82 }
83
CallSize(Address target,RelocInfo::Mode rmode,Condition cond)84 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
85 Condition cond) {
86 // S390 Assembler::move sequence is IILF / IIHF
87 int size;
88 #if V8_TARGET_ARCH_S390X
89 size = 14; // IILF + IIHF + BASR
90 #else
91 size = 8; // IILF + BASR
92 #endif
93 return size;
94 }
95
CallSizeNotPredictableCodeSize(Address target,RelocInfo::Mode rmode,Condition cond)96 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
97 RelocInfo::Mode rmode,
98 Condition cond) {
99 // S390 Assembler::move sequence is IILF / IIHF
100 int size;
101 #if V8_TARGET_ARCH_S390X
102 size = 14; // IILF + IIHF + BASR
103 #else
104 size = 8; // IILF + BASR
105 #endif
106 return size;
107 }
108
Call(Address target,RelocInfo::Mode rmode,Condition cond)109 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
110 Condition cond) {
111 DCHECK(cond == al);
112
113 #ifdef DEBUG
114 // Check the expected size before generating code to ensure we assume the same
115 // constant pool availability (e.g., whether constant pool is full or not).
116 int expected_size = CallSize(target, rmode, cond);
117 Label start;
118 bind(&start);
119 #endif
120
121 mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
122 basr(r14, ip);
123
124 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
125 }
126
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)127 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
128 TypeFeedbackId ast_id, Condition cond) {
129 return 6; // BRASL
130 }
131
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)132 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
133 TypeFeedbackId ast_id, Condition cond) {
134 DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
135
136 #ifdef DEBUG
137 // Check the expected size before generating code to ensure we assume the same
138 // constant pool availability (e.g., whether constant pool is full or not).
139 int expected_size = CallSize(code, rmode, ast_id, cond);
140 Label start;
141 bind(&start);
142 #endif
143 call(code, rmode, ast_id);
144 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
145 }
146
Drop(int count)147 void MacroAssembler::Drop(int count) {
148 if (count > 0) {
149 int total = count * kPointerSize;
150 if (is_uint12(total)) {
151 la(sp, MemOperand(sp, total));
152 } else if (is_int20(total)) {
153 lay(sp, MemOperand(sp, total));
154 } else {
155 AddP(sp, Operand(total));
156 }
157 }
158 }
159
Drop(Register count,Register scratch)160 void MacroAssembler::Drop(Register count, Register scratch) {
161 ShiftLeftP(scratch, count, Operand(kPointerSizeLog2));
162 AddP(sp, sp, scratch);
163 }
164
Call(Label * target)165 void MacroAssembler::Call(Label* target) { b(r14, target); }
166
Push(Handle<Object> handle)167 void MacroAssembler::Push(Handle<Object> handle) {
168 mov(r0, Operand(handle));
169 push(r0);
170 }
171
Move(Register dst,Handle<Object> value)172 void MacroAssembler::Move(Register dst, Handle<Object> value) {
173 mov(dst, Operand(value));
174 }
175
Move(Register dst,Register src,Condition cond)176 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
177 if (!dst.is(src)) {
178 LoadRR(dst, src);
179 }
180 }
181
Move(DoubleRegister dst,DoubleRegister src)182 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
183 if (!dst.is(src)) {
184 ldr(dst, src);
185 }
186 }
187
MultiPush(RegList regs,Register location)188 void MacroAssembler::MultiPush(RegList regs, Register location) {
189 int16_t num_to_push = NumberOfBitsSet(regs);
190 int16_t stack_offset = num_to_push * kPointerSize;
191
192 SubP(location, location, Operand(stack_offset));
193 for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
194 if ((regs & (1 << i)) != 0) {
195 stack_offset -= kPointerSize;
196 StoreP(ToRegister(i), MemOperand(location, stack_offset));
197 }
198 }
199 }
200
MultiPop(RegList regs,Register location)201 void MacroAssembler::MultiPop(RegList regs, Register location) {
202 int16_t stack_offset = 0;
203
204 for (int16_t i = 0; i < Register::kNumRegisters; i++) {
205 if ((regs & (1 << i)) != 0) {
206 LoadP(ToRegister(i), MemOperand(location, stack_offset));
207 stack_offset += kPointerSize;
208 }
209 }
210 AddP(location, location, Operand(stack_offset));
211 }
212
MultiPushDoubles(RegList dregs,Register location)213 void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
214 int16_t num_to_push = NumberOfBitsSet(dregs);
215 int16_t stack_offset = num_to_push * kDoubleSize;
216
217 SubP(location, location, Operand(stack_offset));
218 for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
219 if ((dregs & (1 << i)) != 0) {
220 DoubleRegister dreg = DoubleRegister::from_code(i);
221 stack_offset -= kDoubleSize;
222 StoreDouble(dreg, MemOperand(location, stack_offset));
223 }
224 }
225 }
226
MultiPopDoubles(RegList dregs,Register location)227 void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
228 int16_t stack_offset = 0;
229
230 for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
231 if ((dregs & (1 << i)) != 0) {
232 DoubleRegister dreg = DoubleRegister::from_code(i);
233 LoadDouble(dreg, MemOperand(location, stack_offset));
234 stack_offset += kDoubleSize;
235 }
236 }
237 AddP(location, location, Operand(stack_offset));
238 }
239
LoadRoot(Register destination,Heap::RootListIndex index,Condition)240 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
241 Condition) {
242 LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
243 }
244
StoreRoot(Register source,Heap::RootListIndex index,Condition)245 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
246 Condition) {
247 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
248 StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2));
249 }
250
InNewSpace(Register object,Register scratch,Condition cond,Label * branch)251 void MacroAssembler::InNewSpace(Register object, Register scratch,
252 Condition cond, Label* branch) {
253 DCHECK(cond == eq || cond == ne);
254 CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
255 }
256
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)257 void MacroAssembler::RecordWriteField(
258 Register object, int offset, Register value, Register dst,
259 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
260 RememberedSetAction remembered_set_action, SmiCheck smi_check,
261 PointersToHereCheck pointers_to_here_check_for_value) {
262 // First, check if a write barrier is even needed. The tests below
263 // catch stores of Smis.
264 Label done;
265
266 // Skip barrier if writing a smi.
267 if (smi_check == INLINE_SMI_CHECK) {
268 JumpIfSmi(value, &done);
269 }
270
271 // Although the object register is tagged, the offset is relative to the start
272 // of the object, so so offset must be a multiple of kPointerSize.
273 DCHECK(IsAligned(offset, kPointerSize));
274
275 lay(dst, MemOperand(object, offset - kHeapObjectTag));
276 if (emit_debug_code()) {
277 Label ok;
278 AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
279 beq(&ok, Label::kNear);
280 stop("Unaligned cell in write barrier");
281 bind(&ok);
282 }
283
284 RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
285 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
286
287 bind(&done);
288
289 // Clobber clobbered input registers when running with the debug-code flag
290 // turned on to provoke errors.
291 if (emit_debug_code()) {
292 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
293 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
294 }
295 }
296
297 // Will clobber 4 registers: object, map, dst, ip. The
298 // register 'object' contains a heap object pointer.
RecordWriteForMap(Register object,Register map,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode)299 void MacroAssembler::RecordWriteForMap(Register object, Register map,
300 Register dst,
301 LinkRegisterStatus lr_status,
302 SaveFPRegsMode fp_mode) {
303 if (emit_debug_code()) {
304 LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
305 CmpP(dst, Operand(isolate()->factory()->meta_map()));
306 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
307 }
308
309 if (!FLAG_incremental_marking) {
310 return;
311 }
312
313 if (emit_debug_code()) {
314 CmpP(map, FieldMemOperand(object, HeapObject::kMapOffset));
315 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
316 }
317
318 Label done;
319
320 // A single check of the map's pages interesting flag suffices, since it is
321 // only set during incremental collection, and then it's also guaranteed that
322 // the from object's page's interesting flag is also set. This optimization
323 // relies on the fact that maps can never be in new space.
324 CheckPageFlag(map,
325 map, // Used as scratch.
326 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
327
328 lay(dst, MemOperand(object, HeapObject::kMapOffset - kHeapObjectTag));
329 if (emit_debug_code()) {
330 Label ok;
331 AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
332 beq(&ok, Label::kNear);
333 stop("Unaligned cell in write barrier");
334 bind(&ok);
335 }
336
337 // Record the actual write.
338 if (lr_status == kLRHasNotBeenSaved) {
339 push(r14);
340 }
341 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
342 fp_mode);
343 CallStub(&stub);
344 if (lr_status == kLRHasNotBeenSaved) {
345 pop(r14);
346 }
347
348 bind(&done);
349
350 // Count number of write barriers in generated code.
351 isolate()->counters()->write_barriers_static()->Increment();
352 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
353
354 // Clobber clobbered registers when running with the debug-code flag
355 // turned on to provoke errors.
356 if (emit_debug_code()) {
357 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
358 mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
359 }
360 }
361
362 // Will clobber 4 registers: object, address, scratch, ip. The
363 // register 'object' contains a heap object pointer. The heap object
364 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)365 void MacroAssembler::RecordWrite(
366 Register object, Register address, Register value,
367 LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
368 RememberedSetAction remembered_set_action, SmiCheck smi_check,
369 PointersToHereCheck pointers_to_here_check_for_value) {
370 DCHECK(!object.is(value));
371 if (emit_debug_code()) {
372 CmpP(value, MemOperand(address));
373 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
374 }
375
376 if (remembered_set_action == OMIT_REMEMBERED_SET &&
377 !FLAG_incremental_marking) {
378 return;
379 }
380 // First, check if a write barrier is even needed. The tests below
381 // catch stores of smis and stores into the young generation.
382 Label done;
383
384 if (smi_check == INLINE_SMI_CHECK) {
385 JumpIfSmi(value, &done);
386 }
387
388 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
389 CheckPageFlag(value,
390 value, // Used as scratch.
391 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
392 }
393 CheckPageFlag(object,
394 value, // Used as scratch.
395 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
396
397 // Record the actual write.
398 if (lr_status == kLRHasNotBeenSaved) {
399 push(r14);
400 }
401 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
402 fp_mode);
403 CallStub(&stub);
404 if (lr_status == kLRHasNotBeenSaved) {
405 pop(r14);
406 }
407
408 bind(&done);
409
410 // Count number of write barriers in generated code.
411 isolate()->counters()->write_barriers_static()->Increment();
412 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
413 value);
414
415 // Clobber clobbered registers when running with the debug-code flag
416 // turned on to provoke errors.
417 if (emit_debug_code()) {
418 mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
419 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
420 }
421 }
422
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)423 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
424 Register code_entry,
425 Register scratch) {
426 const int offset = JSFunction::kCodeEntryOffset;
427
428 // Since a code entry (value) is always in old space, we don't need to update
429 // remembered set. If incremental marking is off, there is nothing for us to
430 // do.
431 if (!FLAG_incremental_marking) return;
432
433 DCHECK(js_function.is(r3));
434 DCHECK(code_entry.is(r6));
435 DCHECK(scratch.is(r7));
436 AssertNotSmi(js_function);
437
438 if (emit_debug_code()) {
439 AddP(scratch, js_function, Operand(offset - kHeapObjectTag));
440 LoadP(ip, MemOperand(scratch));
441 CmpP(ip, code_entry);
442 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
443 }
444
445 // First, check if a write barrier is even needed. The tests below
446 // catch stores of Smis and stores into young gen.
447 Label done;
448
449 CheckPageFlag(code_entry, scratch,
450 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
451 CheckPageFlag(js_function, scratch,
452 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
453
454 const Register dst = scratch;
455 AddP(dst, js_function, Operand(offset - kHeapObjectTag));
456
457 // Save caller-saved registers. js_function and code_entry are in the
458 // caller-saved register list.
459 DCHECK(kJSCallerSaved & js_function.bit());
460 // DCHECK(kJSCallerSaved & code_entry.bit());
461 MultiPush(kJSCallerSaved | code_entry.bit() | r14.bit());
462
463 int argument_count = 3;
464 PrepareCallCFunction(argument_count, code_entry);
465
466 LoadRR(r2, js_function);
467 LoadRR(r3, dst);
468 mov(r4, Operand(ExternalReference::isolate_address(isolate())));
469
470 {
471 AllowExternalCallThatCantCauseGC scope(this);
472 CallCFunction(
473 ExternalReference::incremental_marking_record_write_code_entry_function(
474 isolate()),
475 argument_count);
476 }
477
478 // Restore caller-saved registers (including js_function and code_entry).
479 MultiPop(kJSCallerSaved | code_entry.bit() | r14.bit());
480
481 bind(&done);
482 }
483
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)484 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
485 Register address, Register scratch,
486 SaveFPRegsMode fp_mode,
487 RememberedSetFinalAction and_then) {
488 Label done;
489 if (emit_debug_code()) {
490 Label ok;
491 JumpIfNotInNewSpace(object, scratch, &ok);
492 stop("Remembered set pointer is in new space");
493 bind(&ok);
494 }
495 // Load store buffer top.
496 ExternalReference store_buffer =
497 ExternalReference::store_buffer_top(isolate());
498 mov(ip, Operand(store_buffer));
499 LoadP(scratch, MemOperand(ip));
500 // Store pointer to buffer and increment buffer top.
501 StoreP(address, MemOperand(scratch));
502 AddP(scratch, Operand(kPointerSize));
503 // Write back new top of buffer.
504 StoreP(scratch, MemOperand(ip));
505 // Call stub on end of buffer.
506 // Check for end of buffer.
507 AndP(scratch, Operand(StoreBuffer::kStoreBufferMask));
508
509 if (and_then == kFallThroughAtEnd) {
510 bne(&done, Label::kNear);
511 } else {
512 DCHECK(and_then == kReturnAtEnd);
513 bne(&done, Label::kNear);
514 }
515 push(r14);
516 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
517 CallStub(&store_buffer_overflow);
518 pop(r14);
519 bind(&done);
520 if (and_then == kReturnAtEnd) {
521 Ret();
522 }
523 }
524
PushCommonFrame(Register marker_reg)525 void MacroAssembler::PushCommonFrame(Register marker_reg) {
526 int fp_delta = 0;
527 CleanseP(r14);
528 if (marker_reg.is_valid()) {
529 Push(r14, fp, marker_reg);
530 fp_delta = 1;
531 } else {
532 Push(r14, fp);
533 fp_delta = 0;
534 }
535 la(fp, MemOperand(sp, fp_delta * kPointerSize));
536 }
537
PopCommonFrame(Register marker_reg)538 void MacroAssembler::PopCommonFrame(Register marker_reg) {
539 if (marker_reg.is_valid()) {
540 Pop(r14, fp, marker_reg);
541 } else {
542 Pop(r14, fp);
543 }
544 }
545
PushStandardFrame(Register function_reg)546 void MacroAssembler::PushStandardFrame(Register function_reg) {
547 int fp_delta = 0;
548 CleanseP(r14);
549 if (function_reg.is_valid()) {
550 Push(r14, fp, cp, function_reg);
551 fp_delta = 2;
552 } else {
553 Push(r14, fp, cp);
554 fp_delta = 1;
555 }
556 la(fp, MemOperand(sp, fp_delta * kPointerSize));
557 }
558
RestoreFrameStateForTailCall()559 void MacroAssembler::RestoreFrameStateForTailCall() {
560 // if (FLAG_enable_embedded_constant_pool) {
561 // LoadP(kConstantPoolRegister,
562 // MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
563 // set_constant_pool_available(false);
564 // }
565 DCHECK(!FLAG_enable_embedded_constant_pool);
566 LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
567 LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
568 }
569
570 const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
571 const int MacroAssembler::kNumSafepointSavedRegisters =
572 Register::kNumAllocatable;
573
574 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()575 void MacroAssembler::PushSafepointRegisters() {
576 // Safepoints expect a block of kNumSafepointRegisters values on the
577 // stack, so adjust the stack for unsaved registers.
578 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
579 DCHECK(num_unsaved >= 0);
580 if (num_unsaved > 0) {
581 lay(sp, MemOperand(sp, -(num_unsaved * kPointerSize)));
582 }
583 MultiPush(kSafepointSavedRegisters);
584 }
585
PopSafepointRegisters()586 void MacroAssembler::PopSafepointRegisters() {
587 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
588 MultiPop(kSafepointSavedRegisters);
589 if (num_unsaved > 0) {
590 la(sp, MemOperand(sp, num_unsaved * kPointerSize));
591 }
592 }
593
StoreToSafepointRegisterSlot(Register src,Register dst)594 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
595 StoreP(src, SafepointRegisterSlot(dst));
596 }
597
LoadFromSafepointRegisterSlot(Register dst,Register src)598 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
599 LoadP(dst, SafepointRegisterSlot(src));
600 }
601
SafepointRegisterStackIndex(int reg_code)602 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
603 // The registers are pushed starting with the highest encoding,
604 // which means that lowest encodings are closest to the stack pointer.
605 RegList regs = kSafepointSavedRegisters;
606 int index = 0;
607
608 DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
609
610 for (int16_t i = 0; i < reg_code; i++) {
611 if ((regs & (1 << i)) != 0) {
612 index++;
613 }
614 }
615
616 return index;
617 }
618
SafepointRegisterSlot(Register reg)619 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
620 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
621 }
622
SafepointRegistersAndDoublesSlot(Register reg)623 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
624 // General purpose registers are pushed last on the stack.
625 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
626 int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
627 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
628 return MemOperand(sp, doubles_size + register_offset);
629 }
630
CanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)631 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
632 const DoubleRegister src) {
633 // Turn potential sNaN into qNaN
634 if (!dst.is(src)) ldr(dst, src);
635 lzdr(kDoubleRegZero);
636 sdbr(dst, kDoubleRegZero);
637 }
638
ConvertIntToDouble(Register src,DoubleRegister dst)639 void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
640 cdfbr(dst, src);
641 }
642
ConvertUnsignedIntToDouble(Register src,DoubleRegister dst)643 void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
644 DoubleRegister dst) {
645 if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) {
646 cdlfbr(Condition(5), Condition(0), dst, src);
647 } else {
648 // zero-extend src
649 llgfr(src, src);
650 // convert to double
651 cdgbr(dst, src);
652 }
653 }
654
ConvertIntToFloat(Register src,DoubleRegister dst)655 void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
656 cefbr(Condition(4), dst, src);
657 }
658
ConvertUnsignedIntToFloat(Register src,DoubleRegister dst)659 void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
660 DoubleRegister dst) {
661 celfbr(Condition(4), Condition(0), dst, src);
662 }
663
664 #if V8_TARGET_ARCH_S390X
ConvertInt64ToDouble(Register src,DoubleRegister double_dst)665 void MacroAssembler::ConvertInt64ToDouble(Register src,
666 DoubleRegister double_dst) {
667 cdgbr(double_dst, src);
668 }
669
ConvertUnsignedInt64ToFloat(Register src,DoubleRegister double_dst)670 void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
671 DoubleRegister double_dst) {
672 celgbr(Condition(0), Condition(0), double_dst, src);
673 }
674
ConvertUnsignedInt64ToDouble(Register src,DoubleRegister double_dst)675 void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
676 DoubleRegister double_dst) {
677 cdlgbr(Condition(0), Condition(0), double_dst, src);
678 }
679
ConvertInt64ToFloat(Register src,DoubleRegister double_dst)680 void MacroAssembler::ConvertInt64ToFloat(Register src,
681 DoubleRegister double_dst) {
682 cegbr(double_dst, src);
683 }
684 #endif
685
ConvertFloat32ToInt64(const DoubleRegister double_input,const Register dst_hi,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)686 void MacroAssembler::ConvertFloat32ToInt64(const DoubleRegister double_input,
687 #if !V8_TARGET_ARCH_S390X
688 const Register dst_hi,
689 #endif
690 const Register dst,
691 const DoubleRegister double_dst,
692 FPRoundingMode rounding_mode) {
693 Condition m = Condition(0);
694 switch (rounding_mode) {
695 case kRoundToZero:
696 m = Condition(5);
697 break;
698 case kRoundToNearest:
699 UNIMPLEMENTED();
700 break;
701 case kRoundToPlusInf:
702 m = Condition(6);
703 break;
704 case kRoundToMinusInf:
705 m = Condition(7);
706 break;
707 default:
708 UNIMPLEMENTED();
709 break;
710 }
711 cgebr(m, dst, double_input);
712 ldgr(double_dst, dst);
713 #if !V8_TARGET_ARCH_S390X
714 srlg(dst_hi, dst, Operand(32));
715 #endif
716 }
717
ConvertDoubleToInt64(const DoubleRegister double_input,const Register dst_hi,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)718 void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
719 #if !V8_TARGET_ARCH_S390X
720 const Register dst_hi,
721 #endif
722 const Register dst,
723 const DoubleRegister double_dst,
724 FPRoundingMode rounding_mode) {
725 Condition m = Condition(0);
726 switch (rounding_mode) {
727 case kRoundToZero:
728 m = Condition(5);
729 break;
730 case kRoundToNearest:
731 UNIMPLEMENTED();
732 break;
733 case kRoundToPlusInf:
734 m = Condition(6);
735 break;
736 case kRoundToMinusInf:
737 m = Condition(7);
738 break;
739 default:
740 UNIMPLEMENTED();
741 break;
742 }
743 cgdbr(m, dst, double_input);
744 ldgr(double_dst, dst);
745 #if !V8_TARGET_ARCH_S390X
746 srlg(dst_hi, dst, Operand(32));
747 #endif
748 }
749
ConvertFloat32ToInt32(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)750 void MacroAssembler::ConvertFloat32ToInt32(const DoubleRegister double_input,
751 const Register dst,
752 const DoubleRegister double_dst,
753 FPRoundingMode rounding_mode) {
754 Condition m = Condition(0);
755 switch (rounding_mode) {
756 case kRoundToZero:
757 m = Condition(5);
758 break;
759 case kRoundToNearest:
760 m = Condition(4);
761 break;
762 case kRoundToPlusInf:
763 m = Condition(6);
764 break;
765 case kRoundToMinusInf:
766 m = Condition(7);
767 break;
768 default:
769 UNIMPLEMENTED();
770 break;
771 }
772 cfebr(m, dst, double_input);
773 Label done;
774 b(Condition(0xe), &done, Label::kNear); // special case
775 LoadImmP(dst, Operand::Zero());
776 bind(&done);
777 ldgr(double_dst, dst);
778 }
779
ConvertFloat32ToUnsignedInt32(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)780 void MacroAssembler::ConvertFloat32ToUnsignedInt32(
781 const DoubleRegister double_input, const Register dst,
782 const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
783 Condition m = Condition(0);
784 switch (rounding_mode) {
785 case kRoundToZero:
786 m = Condition(5);
787 break;
788 case kRoundToNearest:
789 UNIMPLEMENTED();
790 break;
791 case kRoundToPlusInf:
792 m = Condition(6);
793 break;
794 case kRoundToMinusInf:
795 m = Condition(7);
796 break;
797 default:
798 UNIMPLEMENTED();
799 break;
800 }
801 clfebr(m, Condition(0), dst, double_input);
802 Label done;
803 b(Condition(0xe), &done, Label::kNear); // special case
804 LoadImmP(dst, Operand::Zero());
805 bind(&done);
806 ldgr(double_dst, dst);
807 }
808
809 #if V8_TARGET_ARCH_S390X
ConvertFloat32ToUnsignedInt64(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)810 void MacroAssembler::ConvertFloat32ToUnsignedInt64(
811 const DoubleRegister double_input, const Register dst,
812 const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
813 Condition m = Condition(0);
814 switch (rounding_mode) {
815 case kRoundToZero:
816 m = Condition(5);
817 break;
818 case kRoundToNearest:
819 UNIMPLEMENTED();
820 break;
821 case kRoundToPlusInf:
822 m = Condition(6);
823 break;
824 case kRoundToMinusInf:
825 m = Condition(7);
826 break;
827 default:
828 UNIMPLEMENTED();
829 break;
830 }
831 clgebr(m, Condition(0), dst, double_input);
832 ldgr(double_dst, dst);
833 }
834
ConvertDoubleToUnsignedInt64(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)835 void MacroAssembler::ConvertDoubleToUnsignedInt64(
836 const DoubleRegister double_input, const Register dst,
837 const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
838 Condition m = Condition(0);
839 switch (rounding_mode) {
840 case kRoundToZero:
841 m = Condition(5);
842 break;
843 case kRoundToNearest:
844 UNIMPLEMENTED();
845 break;
846 case kRoundToPlusInf:
847 m = Condition(6);
848 break;
849 case kRoundToMinusInf:
850 m = Condition(7);
851 break;
852 default:
853 UNIMPLEMENTED();
854 break;
855 }
856 clgdbr(m, Condition(0), dst, double_input);
857 ldgr(double_dst, dst);
858 }
859
860 #endif
861
862 #if !V8_TARGET_ARCH_S390X
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)863 void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
864 Register src_low, Register src_high,
865 Register scratch, Register shift) {
866 LoadRR(r0, src_high);
867 LoadRR(r1, src_low);
868 sldl(r0, shift, Operand::Zero());
869 LoadRR(dst_high, r0);
870 LoadRR(dst_low, r1);
871 }
872
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)873 void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
874 Register src_low, Register src_high,
875 uint32_t shift) {
876 LoadRR(r0, src_high);
877 LoadRR(r1, src_low);
878 sldl(r0, r0, Operand(shift));
879 LoadRR(dst_high, r0);
880 LoadRR(dst_low, r1);
881 }
882
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)883 void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
884 Register src_low, Register src_high,
885 Register scratch, Register shift) {
886 LoadRR(r0, src_high);
887 LoadRR(r1, src_low);
888 srdl(r0, shift, Operand::Zero());
889 LoadRR(dst_high, r0);
890 LoadRR(dst_low, r1);
891 }
892
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)893 void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
894 Register src_low, Register src_high,
895 uint32_t shift) {
896 LoadRR(r0, src_high);
897 LoadRR(r1, src_low);
898 srdl(r0, r0, Operand(shift));
899 LoadRR(dst_high, r0);
900 LoadRR(dst_low, r1);
901 }
902
ShiftRightArithPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)903 void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
904 Register src_low, Register src_high,
905 Register scratch, Register shift) {
906 LoadRR(r0, src_high);
907 LoadRR(r1, src_low);
908 srda(r0, shift, Operand::Zero());
909 LoadRR(dst_high, r0);
910 LoadRR(dst_low, r1);
911 }
912
ShiftRightArithPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)913 void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
914 Register src_low, Register src_high,
915 uint32_t shift) {
916 LoadRR(r0, src_high);
917 LoadRR(r1, src_low);
918 srda(r0, r0, Operand(shift));
919 LoadRR(dst_high, r0);
920 LoadRR(dst_low, r1);
921 }
922 #endif
923
MovDoubleToInt64(Register dst,DoubleRegister src)924 void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
925 lgdr(dst, src);
926 }
927
MovInt64ToDouble(DoubleRegister dst,Register src)928 void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
929 ldgr(dst, src);
930 }
931
StubPrologue(StackFrame::Type type,Register base,int prologue_offset)932 void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
933 int prologue_offset) {
934 {
935 ConstantPoolUnavailableScope constant_pool_unavailable(this);
936 LoadSmiLiteral(r1, Smi::FromInt(type));
937 PushCommonFrame(r1);
938 }
939 }
940
Prologue(bool code_pre_aging,Register base,int prologue_offset)941 void MacroAssembler::Prologue(bool code_pre_aging, Register base,
942 int prologue_offset) {
943 DCHECK(!base.is(no_reg));
944 {
945 PredictableCodeSizeScope predictible_code_size_scope(
946 this, kNoCodeAgeSequenceLength);
947 // The following instructions must remain together and unmodified
948 // for code aging to work properly.
949 if (code_pre_aging) {
950 // Pre-age the code.
951 // This matches the code found in PatchPlatformCodeAge()
952 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
953 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
954 nop();
955 CleanseP(r14);
956 Push(r14);
957 mov(r2, Operand(target));
958 Call(r2);
959 for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
960 i += 2) {
961 // TODO(joransiu): Create nop function to pad
962 // (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
963 nop(); // 2-byte nops().
964 }
965 } else {
966 // This matches the code found in GetNoCodeAgeSequence()
967 PushStandardFrame(r3);
968 }
969 }
970 }
971
EmitLoadTypeFeedbackVector(Register vector)972 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
973 LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
974 LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
975 LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
976 }
977
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)978 void MacroAssembler::EnterFrame(StackFrame::Type type,
979 bool load_constant_pool_pointer_reg) {
980 // We create a stack frame with:
981 // Return Addr <-- old sp
982 // Old FP <-- new fp
983 // CP
984 // type
985 // CodeObject <-- new sp
986
987 LoadSmiLiteral(ip, Smi::FromInt(type));
988 PushCommonFrame(ip);
989
990 if (type == StackFrame::INTERNAL) {
991 mov(r0, Operand(CodeObject()));
992 push(r0);
993 }
994 }
995
LeaveFrame(StackFrame::Type type,int stack_adjustment)996 int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
997 // Drop the execution stack down to the frame pointer and restore
998 // the caller frame pointer, return address and constant pool pointer.
999 LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1000 lay(r1, MemOperand(
1001 fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment));
1002 LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1003 LoadRR(sp, r1);
1004 int frame_ends = pc_offset();
1005 return frame_ends;
1006 }
1007
EnterBuiltinFrame(Register context,Register target,Register argc)1008 void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
1009 Register argc) {
1010 CleanseP(r14);
1011 Push(r14, fp, context, target);
1012 la(fp, MemOperand(sp, 2 * kPointerSize));
1013 Push(argc);
1014 }
1015
LeaveBuiltinFrame(Register context,Register target,Register argc)1016 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
1017 Register argc) {
1018 Pop(argc);
1019 Pop(r14, fp, context, target);
1020 }
1021
1022 // ExitFrame layout (probably wrongish.. needs updating)
1023 //
1024 // SP -> previousSP
1025 // LK reserved
1026 // code
1027 // sp_on_exit (for debug?)
1028 // oldSP->prev SP
1029 // LK
1030 // <parameters on stack>
1031
1032 // Prior to calling EnterExitFrame, we've got a bunch of parameters
1033 // on the stack that we need to wrap a real frame around.. so first
1034 // we reserve a slot for LK and push the previous SP which is captured
1035 // in the fp register (r11)
1036 // Then - we buy a new frame
1037
1038 // r14
1039 // oldFP <- newFP
1040 // SP
1041 // Code
1042 // Floats
1043 // gaps
1044 // Args
1045 // ABIRes <- newSP
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)1046 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1047 StackFrame::Type frame_type) {
1048 DCHECK(frame_type == StackFrame::EXIT ||
1049 frame_type == StackFrame::BUILTIN_EXIT);
1050 // Set up the frame structure on the stack.
1051 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1052 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1053 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1054 DCHECK(stack_space > 0);
1055
1056 // This is an opportunity to build a frame to wrap
1057 // all of the pushes that have happened inside of V8
1058 // since we were called from C code
1059 CleanseP(r14);
1060 LoadSmiLiteral(r1, Smi::FromInt(frame_type));
1061 PushCommonFrame(r1);
1062 // Reserve room for saved entry sp and code object.
1063 lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
1064
1065 if (emit_debug_code()) {
1066 StoreP(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(), r1);
1067 }
1068 mov(r1, Operand(CodeObject()));
1069 StoreP(r1, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1070
1071 // Save the frame pointer and the context in top.
1072 mov(r1, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1073 StoreP(fp, MemOperand(r1));
1074 mov(r1, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1075 StoreP(cp, MemOperand(r1));
1076
1077 // Optionally save all volatile double registers.
1078 if (save_doubles) {
1079 MultiPushDoubles(kCallerSavedDoubles);
1080 // Note that d0 will be accessible at
1081 // fp - ExitFrameConstants::kFrameSize -
1082 // kNumCallerSavedDoubles * kDoubleSize,
1083 // since the sp slot and code slot were pushed after the fp.
1084 }
1085
1086 lay(sp, MemOperand(sp, -stack_space * kPointerSize));
1087
1088 // Allocate and align the frame preparing for calling the runtime
1089 // function.
1090 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1091 if (frame_alignment > 0) {
1092 DCHECK(frame_alignment == 8);
1093 ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8
1094 }
1095
1096 lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
1097 StoreP(MemOperand(sp), Operand::Zero(), r0);
1098 // Set the exit frame sp value to point just before the return address
1099 // location.
1100 lay(r1, MemOperand(sp, kStackFrameSPSlot * kPointerSize));
1101 StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
1102 }
1103
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)1104 void MacroAssembler::InitializeNewString(Register string, Register length,
1105 Heap::RootListIndex map_index,
1106 Register scratch1, Register scratch2) {
1107 SmiTag(scratch1, length);
1108 LoadRoot(scratch2, map_index);
1109 StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset));
1110 StoreP(FieldMemOperand(string, String::kHashFieldSlot),
1111 Operand(String::kEmptyHashField), scratch1);
1112 StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1113 }
1114
ActivationFrameAlignment()1115 int MacroAssembler::ActivationFrameAlignment() {
1116 #if !defined(USE_SIMULATOR)
1117 // Running on the real platform. Use the alignment as mandated by the local
1118 // environment.
1119 // Note: This will break if we ever start generating snapshots on one S390
1120 // platform for another S390 platform with a different alignment.
1121 return base::OS::ActivationFrameAlignment();
1122 #else // Simulated
1123 // If we are using the simulator then we should always align to the expected
1124 // alignment. As the simulator is used to generate snapshots we do not know
1125 // if the target platform will need alignment, so this is controlled from a
1126 // flag.
1127 return FLAG_sim_stack_alignment;
1128 #endif
1129 }
1130
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool argument_count_is_length)1131 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1132 bool restore_context,
1133 bool argument_count_is_length) {
1134 // Optionally restore all double registers.
1135 if (save_doubles) {
1136 // Calculate the stack location of the saved doubles and restore them.
1137 const int kNumRegs = kNumCallerSavedDoubles;
1138 lay(r5, MemOperand(fp, -(ExitFrameConstants::kFixedFrameSizeFromFp +
1139 kNumRegs * kDoubleSize)));
1140 MultiPopDoubles(kCallerSavedDoubles, r5);
1141 }
1142
1143 // Clear top frame.
1144 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1145 StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
1146
1147 // Restore current context from top and clear it in debug mode.
1148 if (restore_context) {
1149 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1150 LoadP(cp, MemOperand(ip));
1151 }
1152 #ifdef DEBUG
1153 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1154 StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
1155 #endif
1156
1157 // Tear down the exit frame, pop the arguments, and return.
1158 LeaveFrame(StackFrame::EXIT);
1159
1160 if (argument_count.is_valid()) {
1161 if (!argument_count_is_length) {
1162 ShiftLeftP(argument_count, argument_count, Operand(kPointerSizeLog2));
1163 }
1164 la(sp, MemOperand(sp, argument_count));
1165 }
1166 }
1167
MovFromFloatResult(const DoubleRegister dst)1168 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
1169 Move(dst, d0);
1170 }
1171
MovFromFloatParameter(const DoubleRegister dst)1172 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1173 Move(dst, d0);
1174 }
1175
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)1176 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1177 Register caller_args_count_reg,
1178 Register scratch0, Register scratch1) {
1179 #if DEBUG
1180 if (callee_args_count.is_reg()) {
1181 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1182 scratch1));
1183 } else {
1184 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1185 }
1186 #endif
1187
1188 // Calculate the end of destination area where we will put the arguments
1189 // after we drop current frame. We AddP kPointerSize to count the receiver
1190 // argument which is not included into formal parameters count.
1191 Register dst_reg = scratch0;
1192 ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
1193 AddP(dst_reg, fp, dst_reg);
1194 AddP(dst_reg, dst_reg,
1195 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1196
1197 Register src_reg = caller_args_count_reg;
1198 // Calculate the end of source area. +kPointerSize is for the receiver.
1199 if (callee_args_count.is_reg()) {
1200 ShiftLeftP(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
1201 AddP(src_reg, sp, src_reg);
1202 AddP(src_reg, src_reg, Operand(kPointerSize));
1203 } else {
1204 mov(src_reg, Operand((callee_args_count.immediate() + 1) * kPointerSize));
1205 AddP(src_reg, src_reg, sp);
1206 }
1207
1208 if (FLAG_debug_code) {
1209 CmpLogicalP(src_reg, dst_reg);
1210 Check(lt, kStackAccessBelowStackPointer);
1211 }
1212
1213 // Restore caller's frame pointer and return address now as they will be
1214 // overwritten by the copying loop.
1215 RestoreFrameStateForTailCall();
1216
1217 // Now copy callee arguments to the caller frame going backwards to avoid
1218 // callee arguments corruption (source and destination areas could overlap).
1219
1220 // Both src_reg and dst_reg are pointing to the word after the one to copy,
1221 // so they must be pre-decremented in the loop.
1222 Register tmp_reg = scratch1;
1223 Label loop;
1224 if (callee_args_count.is_reg()) {
1225 AddP(tmp_reg, callee_args_count.reg(), Operand(1)); // +1 for receiver
1226 } else {
1227 mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
1228 }
1229 LoadRR(r1, tmp_reg);
1230 bind(&loop);
1231 LoadP(tmp_reg, MemOperand(src_reg, -kPointerSize));
1232 StoreP(tmp_reg, MemOperand(dst_reg, -kPointerSize));
1233 lay(src_reg, MemOperand(src_reg, -kPointerSize));
1234 lay(dst_reg, MemOperand(dst_reg, -kPointerSize));
1235 BranchOnCount(r1, &loop);
1236
1237 // Leave current frame.
1238 LoadRR(sp, dst_reg);
1239 }
1240
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)1241 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1242 const ParameterCount& actual, Label* done,
1243 bool* definitely_mismatches,
1244 InvokeFlag flag,
1245 const CallWrapper& call_wrapper) {
1246 bool definitely_matches = false;
1247 *definitely_mismatches = false;
1248 Label regular_invoke;
1249
1250 // Check whether the expected and actual arguments count match. If not,
1251 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1252 // r2: actual arguments count
1253 // r3: function (passed through to callee)
1254 // r4: expected arguments count
1255
1256 // The code below is made a lot easier because the calling code already sets
1257 // up actual and expected registers according to the contract if values are
1258 // passed in registers.
1259
1260 // ARM has some sanity checks as per below, considering add them for S390
1261 // DCHECK(actual.is_immediate() || actual.reg().is(r2));
1262 // DCHECK(expected.is_immediate() || expected.reg().is(r4));
1263
1264 if (expected.is_immediate()) {
1265 DCHECK(actual.is_immediate());
1266 mov(r2, Operand(actual.immediate()));
1267 if (expected.immediate() == actual.immediate()) {
1268 definitely_matches = true;
1269 } else {
1270 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1271 if (expected.immediate() == sentinel) {
1272 // Don't worry about adapting arguments for builtins that
1273 // don't want that done. Skip adaption code by making it look
1274 // like we have a match between expected and actual number of
1275 // arguments.
1276 definitely_matches = true;
1277 } else {
1278 *definitely_mismatches = true;
1279 mov(r4, Operand(expected.immediate()));
1280 }
1281 }
1282 } else {
1283 if (actual.is_immediate()) {
1284 mov(r2, Operand(actual.immediate()));
1285 CmpPH(expected.reg(), Operand(actual.immediate()));
1286 beq(®ular_invoke);
1287 } else {
1288 CmpP(expected.reg(), actual.reg());
1289 beq(®ular_invoke);
1290 }
1291 }
1292
1293 if (!definitely_matches) {
1294 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
1295 if (flag == CALL_FUNCTION) {
1296 call_wrapper.BeforeCall(CallSize(adaptor));
1297 Call(adaptor);
1298 call_wrapper.AfterCall();
1299 if (!*definitely_mismatches) {
1300 b(done);
1301 }
1302 } else {
1303 Jump(adaptor, RelocInfo::CODE_TARGET);
1304 }
1305 bind(®ular_invoke);
1306 }
1307 }
1308
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)1309 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
1310 const ParameterCount& expected,
1311 const ParameterCount& actual) {
1312 Label skip_flooding;
1313 ExternalReference last_step_action =
1314 ExternalReference::debug_last_step_action_address(isolate());
1315 STATIC_ASSERT(StepFrame > StepIn);
1316 mov(r6, Operand(last_step_action));
1317 LoadB(r6, MemOperand(r6));
1318 CmpP(r6, Operand(StepIn));
1319 blt(&skip_flooding);
1320 {
1321 FrameScope frame(this,
1322 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1323 if (expected.is_reg()) {
1324 SmiTag(expected.reg());
1325 Push(expected.reg());
1326 }
1327 if (actual.is_reg()) {
1328 SmiTag(actual.reg());
1329 Push(actual.reg());
1330 }
1331 if (new_target.is_valid()) {
1332 Push(new_target);
1333 }
1334 Push(fun, fun);
1335 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
1336 Pop(fun);
1337 if (new_target.is_valid()) {
1338 Pop(new_target);
1339 }
1340 if (actual.is_reg()) {
1341 Pop(actual.reg());
1342 SmiUntag(actual.reg());
1343 }
1344 if (expected.is_reg()) {
1345 Pop(expected.reg());
1346 SmiUntag(expected.reg());
1347 }
1348 }
1349 bind(&skip_flooding);
1350 }
1351
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1352 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1353 const ParameterCount& expected,
1354 const ParameterCount& actual,
1355 InvokeFlag flag,
1356 const CallWrapper& call_wrapper) {
1357 // You can't call a function without a valid frame.
1358 DCHECK(flag == JUMP_FUNCTION || has_frame());
1359
1360 DCHECK(function.is(r3));
1361 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r5));
1362
1363 if (call_wrapper.NeedsDebugStepCheck()) {
1364 FloodFunctionIfStepping(function, new_target, expected, actual);
1365 }
1366
1367 // Clear the new.target register if not given.
1368 if (!new_target.is_valid()) {
1369 LoadRoot(r5, Heap::kUndefinedValueRootIndex);
1370 }
1371
1372 Label done;
1373 bool definitely_mismatches = false;
1374 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
1375 call_wrapper);
1376 if (!definitely_mismatches) {
1377 // We call indirectly through the code field in the function to
1378 // allow recompilation to take effect without changing any of the
1379 // call sites.
1380 Register code = ip;
1381 LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1382 if (flag == CALL_FUNCTION) {
1383 call_wrapper.BeforeCall(CallSize(code));
1384 CallJSEntry(code);
1385 call_wrapper.AfterCall();
1386 } else {
1387 DCHECK(flag == JUMP_FUNCTION);
1388 JumpToJSEntry(code);
1389 }
1390
1391 // Continue here if InvokePrologue does handle the invocation due to
1392 // mismatched parameter counts.
1393 bind(&done);
1394 }
1395 }
1396
InvokeFunction(Register fun,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1397 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1398 const ParameterCount& actual,
1399 InvokeFlag flag,
1400 const CallWrapper& call_wrapper) {
1401 // You can't call a function without a valid frame.
1402 DCHECK(flag == JUMP_FUNCTION || has_frame());
1403
1404 // Contract with called JS functions requires that function is passed in r3.
1405 DCHECK(fun.is(r3));
1406
1407 Register expected_reg = r4;
1408 Register temp_reg = r6;
1409 LoadP(temp_reg, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
1410 LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
1411 LoadW(expected_reg,
1412 FieldMemOperand(temp_reg,
1413 SharedFunctionInfo::kFormalParameterCountOffset));
1414 #if !defined(V8_TARGET_ARCH_S390X)
1415 SmiUntag(expected_reg);
1416 #endif
1417
1418 ParameterCount expected(expected_reg);
1419 InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
1420 }
1421
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1422 void MacroAssembler::InvokeFunction(Register function,
1423 const ParameterCount& expected,
1424 const ParameterCount& actual,
1425 InvokeFlag flag,
1426 const CallWrapper& call_wrapper) {
1427 // You can't call a function without a valid frame.
1428 DCHECK(flag == JUMP_FUNCTION || has_frame());
1429
1430 // Contract with called JS functions requires that function is passed in r3.
1431 DCHECK(function.is(r3));
1432
1433 // Get the function and setup the context.
1434 LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
1435
1436 InvokeFunctionCode(r3, no_reg, expected, actual, flag, call_wrapper);
1437 }
1438
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1439 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1440 const ParameterCount& expected,
1441 const ParameterCount& actual,
1442 InvokeFlag flag,
1443 const CallWrapper& call_wrapper) {
1444 Move(r3, function);
1445 InvokeFunction(r3, expected, actual, flag, call_wrapper);
1446 }
1447
IsObjectJSStringType(Register object,Register scratch,Label * fail)1448 void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
1449 Label* fail) {
1450 DCHECK(kNotStringTag != 0);
1451
1452 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1453 LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1454 mov(r0, Operand(kIsNotStringMask));
1455 AndP(r0, scratch);
1456 bne(fail);
1457 }
1458
IsObjectNameType(Register object,Register scratch,Label * fail)1459 void MacroAssembler::IsObjectNameType(Register object, Register scratch,
1460 Label* fail) {
1461 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1462 LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1463 CmpP(scratch, Operand(LAST_NAME_TYPE));
1464 bgt(fail);
1465 }
1466
DebugBreak()1467 void MacroAssembler::DebugBreak() {
1468 LoadImmP(r2, Operand::Zero());
1469 mov(r3,
1470 Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
1471 CEntryStub ces(isolate(), 1);
1472 DCHECK(AllowThisStubCall(&ces));
1473 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
1474 }
1475
PushStackHandler()1476 void MacroAssembler::PushStackHandler() {
1477 // Adjust this code if not the case.
1478 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1479 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1480
1481 // Link the current handler as the next handler.
1482 mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1483
1484 // Buy the full stack frame for 5 slots.
1485 lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
1486
1487 // Copy the old handler into the next handler slot.
1488 mvc(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
1489 kPointerSize);
1490 // Set this new handler as the current one.
1491 StoreP(sp, MemOperand(r7));
1492 }
1493
PopStackHandler()1494 void MacroAssembler::PopStackHandler() {
1495 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1496 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1497
1498 // Pop the Next Handler into r3 and store it into Handler Address reference.
1499 Pop(r3);
1500 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1501
1502 StoreP(r3, MemOperand(ip));
1503 }
1504
1505 // Compute the hash code from the untagged key. This must be kept in sync with
1506 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1507 // code-stub-hydrogen.cc
GetNumberHash(Register t0,Register scratch)1508 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1509 // First of all we assign the hash seed to scratch.
1510 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1511 SmiUntag(scratch);
1512
1513 // Xor original key with a seed.
1514 XorP(t0, scratch);
1515
1516 // Compute the hash code from the untagged key. This must be kept in sync
1517 // with ComputeIntegerHash in utils.h.
1518 //
1519 // hash = ~hash + (hash << 15);
1520 LoadRR(scratch, t0);
1521 NotP(scratch);
1522 sll(t0, Operand(15));
1523 AddP(t0, scratch, t0);
1524 // hash = hash ^ (hash >> 12);
1525 ShiftRight(scratch, t0, Operand(12));
1526 XorP(t0, scratch);
1527 // hash = hash + (hash << 2);
1528 ShiftLeft(scratch, t0, Operand(2));
1529 AddP(t0, t0, scratch);
1530 // hash = hash ^ (hash >> 4);
1531 ShiftRight(scratch, t0, Operand(4));
1532 XorP(t0, scratch);
1533 // hash = hash * 2057;
1534 LoadRR(r0, t0);
1535 ShiftLeft(scratch, t0, Operand(3));
1536 AddP(t0, t0, scratch);
1537 ShiftLeft(scratch, r0, Operand(11));
1538 AddP(t0, t0, scratch);
1539 // hash = hash ^ (hash >> 16);
1540 ShiftRight(scratch, t0, Operand(16));
1541 XorP(t0, scratch);
1542 // hash & 0x3fffffff
1543 ExtractBitRange(t0, t0, 29, 0);
1544 }
1545
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)1546 void MacroAssembler::Allocate(int object_size, Register result,
1547 Register scratch1, Register scratch2,
1548 Label* gc_required, AllocationFlags flags) {
1549 DCHECK(object_size <= kMaxRegularHeapObjectSize);
1550 DCHECK((flags & ALLOCATION_FOLDED) == 0);
1551 if (!FLAG_inline_new) {
1552 if (emit_debug_code()) {
1553 // Trash the registers to simulate an allocation failure.
1554 LoadImmP(result, Operand(0x7091));
1555 LoadImmP(scratch1, Operand(0x7191));
1556 LoadImmP(scratch2, Operand(0x7291));
1557 }
1558 b(gc_required);
1559 return;
1560 }
1561
1562 DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1563
1564 // Make object size into bytes.
1565 if ((flags & SIZE_IN_WORDS) != 0) {
1566 object_size *= kPointerSize;
1567 }
1568 DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
1569
1570 // Check relative positions of allocation top and limit addresses.
1571 ExternalReference allocation_top =
1572 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1573 ExternalReference allocation_limit =
1574 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1575
1576 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1577 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1578 DCHECK((limit - top) == kPointerSize);
1579
1580 // Set up allocation top address register.
1581 Register top_address = scratch1;
1582 // This code stores a temporary value in ip. This is OK, as the code below
1583 // does not need ip for implicit literal generation.
1584 Register alloc_limit = ip;
1585 Register result_end = scratch2;
1586 mov(top_address, Operand(allocation_top));
1587
1588 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1589 // Load allocation top into result and allocation limit into ip.
1590 LoadP(result, MemOperand(top_address));
1591 LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
1592 } else {
1593 if (emit_debug_code()) {
1594 // Assert that result actually contains top on entry.
1595 LoadP(alloc_limit, MemOperand(top_address));
1596 CmpP(result, alloc_limit);
1597 Check(eq, kUnexpectedAllocationTop);
1598 }
1599 // Load allocation limit. Result already contains allocation top.
1600 LoadP(alloc_limit, MemOperand(top_address, limit - top));
1601 }
1602
1603 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1604 // Align the next allocation. Storing the filler map without checking top is
1605 // safe in new-space because the limit of the heap is aligned there.
1606 #if V8_TARGET_ARCH_S390X
1607 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1608 #else
1609 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1610 AndP(result_end, result, Operand(kDoubleAlignmentMask));
1611 Label aligned;
1612 beq(&aligned, Label::kNear);
1613 if ((flags & PRETENURE) != 0) {
1614 CmpLogicalP(result, alloc_limit);
1615 bge(gc_required);
1616 }
1617 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1618 StoreW(result_end, MemOperand(result));
1619 AddP(result, result, Operand(kDoubleSize / 2));
1620 bind(&aligned);
1621 #endif
1622 }
1623
1624 // Calculate new top and bail out if new space is exhausted. Use result
1625 // to calculate the new top.
1626 SubP(r0, alloc_limit, result);
1627 if (is_int16(object_size)) {
1628 CmpP(r0, Operand(object_size));
1629 blt(gc_required);
1630 AddP(result_end, result, Operand(object_size));
1631 } else {
1632 mov(result_end, Operand(object_size));
1633 CmpP(r0, result_end);
1634 blt(gc_required);
1635 AddP(result_end, result, result_end);
1636 }
1637
1638 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1639 // The top pointer is not updated for allocation folding dominators.
1640 StoreP(result_end, MemOperand(top_address));
1641 }
1642
1643 // Tag object.
1644 AddP(result, result, Operand(kHeapObjectTag));
1645 }
1646
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)1647 void MacroAssembler::Allocate(Register object_size, Register result,
1648 Register result_end, Register scratch,
1649 Label* gc_required, AllocationFlags flags) {
1650 DCHECK((flags & ALLOCATION_FOLDED) == 0);
1651 if (!FLAG_inline_new) {
1652 if (emit_debug_code()) {
1653 // Trash the registers to simulate an allocation failure.
1654 LoadImmP(result, Operand(0x7091));
1655 LoadImmP(scratch, Operand(0x7191));
1656 LoadImmP(result_end, Operand(0x7291));
1657 }
1658 b(gc_required);
1659 return;
1660 }
1661
1662 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
1663 // is not specified. Other registers must not overlap.
1664 DCHECK(!AreAliased(object_size, result, scratch, ip));
1665 DCHECK(!AreAliased(result_end, result, scratch, ip));
1666 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
1667
1668 // Check relative positions of allocation top and limit addresses.
1669 ExternalReference allocation_top =
1670 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1671 ExternalReference allocation_limit =
1672 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1673 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1674 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1675 DCHECK((limit - top) == kPointerSize);
1676
1677 // Set up allocation top address and allocation limit registers.
1678 Register top_address = scratch;
1679 // This code stores a temporary value in ip. This is OK, as the code below
1680 // does not need ip for implicit literal generation.
1681 Register alloc_limit = ip;
1682 mov(top_address, Operand(allocation_top));
1683
1684 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1685 // Load allocation top into result and allocation limit into alloc_limit..
1686 LoadP(result, MemOperand(top_address));
1687 LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
1688 } else {
1689 if (emit_debug_code()) {
1690 // Assert that result actually contains top on entry.
1691 LoadP(alloc_limit, MemOperand(top_address));
1692 CmpP(result, alloc_limit);
1693 Check(eq, kUnexpectedAllocationTop);
1694 }
1695 // Load allocation limit. Result already contains allocation top.
1696 LoadP(alloc_limit, MemOperand(top_address, limit - top));
1697 }
1698
1699 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1700 // Align the next allocation. Storing the filler map without checking top is
1701 // safe in new-space because the limit of the heap is aligned there.
1702 #if V8_TARGET_ARCH_S390X
1703 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1704 #else
1705 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1706 AndP(result_end, result, Operand(kDoubleAlignmentMask));
1707 Label aligned;
1708 beq(&aligned, Label::kNear);
1709 if ((flags & PRETENURE) != 0) {
1710 CmpLogicalP(result, alloc_limit);
1711 bge(gc_required);
1712 }
1713 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1714 StoreW(result_end, MemOperand(result));
1715 AddP(result, result, Operand(kDoubleSize / 2));
1716 bind(&aligned);
1717 #endif
1718 }
1719
1720 // Calculate new top and bail out if new space is exhausted. Use result
1721 // to calculate the new top. Object size may be in words so a shift is
1722 // required to get the number of bytes.
1723 SubP(r0, alloc_limit, result);
1724 if ((flags & SIZE_IN_WORDS) != 0) {
1725 ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
1726 CmpP(r0, result_end);
1727 blt(gc_required);
1728 AddP(result_end, result, result_end);
1729 } else {
1730 CmpP(r0, object_size);
1731 blt(gc_required);
1732 AddP(result_end, result, object_size);
1733 }
1734
1735 // Update allocation top. result temporarily holds the new top.
1736 if (emit_debug_code()) {
1737 AndP(r0, result_end, Operand(kObjectAlignmentMask));
1738 Check(eq, kUnalignedAllocationInNewSpace, cr0);
1739 }
1740 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1741 // The top pointer is not updated for allocation folding dominators.
1742 StoreP(result_end, MemOperand(top_address));
1743 }
1744
1745 // Tag object.
1746 AddP(result, result, Operand(kHeapObjectTag));
1747 }
1748
FastAllocate(Register object_size,Register result,Register result_end,Register scratch,AllocationFlags flags)1749 void MacroAssembler::FastAllocate(Register object_size, Register result,
1750 Register result_end, Register scratch,
1751 AllocationFlags flags) {
1752 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
1753 // is not specified. Other registers must not overlap.
1754 DCHECK(!AreAliased(object_size, result, scratch, ip));
1755 DCHECK(!AreAliased(result_end, result, scratch, ip));
1756 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
1757
1758 ExternalReference allocation_top =
1759 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1760
1761 Register top_address = scratch;
1762 mov(top_address, Operand(allocation_top));
1763 LoadP(result, MemOperand(top_address));
1764
1765 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1766 // Align the next allocation. Storing the filler map without checking top is
1767 // safe in new-space because the limit of the heap is aligned there.
1768 #if V8_TARGET_ARCH_S390X
1769 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1770 #else
1771 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1772 AndP(result_end, result, Operand(kDoubleAlignmentMask));
1773 Label aligned;
1774 beq(&aligned, Label::kNear);
1775 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1776 StoreW(result_end, MemOperand(result));
1777 AddP(result, result, Operand(kDoubleSize / 2));
1778 bind(&aligned);
1779 #endif
1780 }
1781
1782 // Calculate new top using result. Object size may be in words so a shift is
1783 // required to get the number of bytes.
1784 if ((flags & SIZE_IN_WORDS) != 0) {
1785 ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
1786 AddP(result_end, result, result_end);
1787 } else {
1788 AddP(result_end, result, object_size);
1789 }
1790
1791 // Update allocation top. result temporarily holds the new top.
1792 if (emit_debug_code()) {
1793 AndP(r0, result_end, Operand(kObjectAlignmentMask));
1794 Check(eq, kUnalignedAllocationInNewSpace, cr0);
1795 }
1796 StoreP(result_end, MemOperand(top_address));
1797
1798 // Tag object.
1799 AddP(result, result, Operand(kHeapObjectTag));
1800 }
1801
FastAllocate(int object_size,Register result,Register scratch1,Register scratch2,AllocationFlags flags)1802 void MacroAssembler::FastAllocate(int object_size, Register result,
1803 Register scratch1, Register scratch2,
1804 AllocationFlags flags) {
1805 DCHECK(object_size <= kMaxRegularHeapObjectSize);
1806 DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1807
1808 // Make object size into bytes.
1809 if ((flags & SIZE_IN_WORDS) != 0) {
1810 object_size *= kPointerSize;
1811 }
1812 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
1813
1814 ExternalReference allocation_top =
1815 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1816
1817 // Set up allocation top address register.
1818 Register top_address = scratch1;
1819 Register result_end = scratch2;
1820 mov(top_address, Operand(allocation_top));
1821 LoadP(result, MemOperand(top_address));
1822
1823 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1824 // Align the next allocation. Storing the filler map without checking top is
1825 // safe in new-space because the limit of the heap is aligned there.
1826 #if V8_TARGET_ARCH_S390X
1827 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1828 #else
1829 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1830 AndP(result_end, result, Operand(kDoubleAlignmentMask));
1831 Label aligned;
1832 beq(&aligned, Label::kNear);
1833 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1834 StoreW(result_end, MemOperand(result));
1835 AddP(result, result, Operand(kDoubleSize / 2));
1836 bind(&aligned);
1837 #endif
1838 }
1839
1840 // Calculate new top using result.
1841 AddP(result_end, result, Operand(object_size));
1842
1843 // The top pointer is not updated for allocation folding dominators.
1844 StoreP(result_end, MemOperand(top_address));
1845
1846 // Tag object.
1847 AddP(result, result, Operand(kHeapObjectTag));
1848 }
1849
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)1850 void MacroAssembler::AllocateTwoByteString(Register result, Register length,
1851 Register scratch1, Register scratch2,
1852 Register scratch3,
1853 Label* gc_required) {
1854 // Calculate the number of bytes needed for the characters in the string while
1855 // observing object alignment.
1856 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1857
1858 ShiftLeftP(scratch1, length, Operand(1)); // Length in bytes, not chars.
1859 AddP(scratch1, Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1860
1861 AndP(scratch1, Operand(~kObjectAlignmentMask));
1862
1863 // Allocate two-byte string in new space.
1864 Allocate(scratch1, result, scratch2, scratch3, gc_required,
1865 NO_ALLOCATION_FLAGS);
1866
1867 // Set the map, length and hash field.
1868 InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
1869 scratch2);
1870 }
1871
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)1872 void MacroAssembler::AllocateOneByteString(Register result, Register length,
1873 Register scratch1, Register scratch2,
1874 Register scratch3,
1875 Label* gc_required) {
1876 // Calculate the number of bytes needed for the characters in the string while
1877 // observing object alignment.
1878 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1879 DCHECK(kCharSize == 1);
1880 AddP(scratch1, length,
1881 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1882 AndP(scratch1, Operand(~kObjectAlignmentMask));
1883
1884 // Allocate one-byte string in new space.
1885 Allocate(scratch1, result, scratch2, scratch3, gc_required,
1886 NO_ALLOCATION_FLAGS);
1887
1888 // Set the map, length and hash field.
1889 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
1890 scratch1, scratch2);
1891 }
1892
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)1893 void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
1894 Register scratch1,
1895 Register scratch2,
1896 Label* gc_required) {
1897 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1898 NO_ALLOCATION_FLAGS);
1899
1900 InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
1901 scratch2);
1902 }
1903
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)1904 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
1905 Register scratch1,
1906 Register scratch2,
1907 Label* gc_required) {
1908 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1909 NO_ALLOCATION_FLAGS);
1910
1911 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
1912 scratch1, scratch2);
1913 }
1914
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)1915 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1916 Register length,
1917 Register scratch1,
1918 Register scratch2,
1919 Label* gc_required) {
1920 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1921 NO_ALLOCATION_FLAGS);
1922
1923 InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
1924 scratch2);
1925 }
1926
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)1927 void MacroAssembler::AllocateOneByteSlicedString(Register result,
1928 Register length,
1929 Register scratch1,
1930 Register scratch2,
1931 Label* gc_required) {
1932 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1933 NO_ALLOCATION_FLAGS);
1934
1935 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
1936 scratch1, scratch2);
1937 }
1938
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)1939 void MacroAssembler::CompareObjectType(Register object, Register map,
1940 Register type_reg, InstanceType type) {
1941 const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
1942
1943 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1944 CompareInstanceType(map, temp, type);
1945 }
1946
CompareInstanceType(Register map,Register type_reg,InstanceType type)1947 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1948 InstanceType type) {
1949 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1950 STATIC_ASSERT(LAST_TYPE < 256);
1951 LoadlB(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1952 CmpP(type_reg, Operand(type));
1953 }
1954
CompareRoot(Register obj,Heap::RootListIndex index)1955 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
1956 CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
1957 }
1958
CheckFastObjectElements(Register map,Register scratch,Label * fail)1959 void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
1960 Label* fail) {
1961 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1962 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1963 STATIC_ASSERT(FAST_ELEMENTS == 2);
1964 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
1965 CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
1966 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1967 ble(fail);
1968 CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
1969 Operand(Map::kMaximumBitField2FastHoleyElementValue));
1970 bgt(fail);
1971 }
1972
CheckFastSmiElements(Register map,Register scratch,Label * fail)1973 void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
1974 Label* fail) {
1975 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1976 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1977 CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
1978 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1979 bgt(fail);
1980 }
1981
SmiToDouble(DoubleRegister value,Register smi)1982 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
1983 SmiUntag(ip, smi);
1984 ConvertIntToDouble(ip, value);
1985 }
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,DoubleRegister double_scratch,Label * fail,int elements_offset)1986 void MacroAssembler::StoreNumberToDoubleElements(
1987 Register value_reg, Register key_reg, Register elements_reg,
1988 Register scratch1, DoubleRegister double_scratch, Label* fail,
1989 int elements_offset) {
1990 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
1991 Label smi_value, store;
1992
1993 // Handle smi values specially.
1994 JumpIfSmi(value_reg, &smi_value);
1995
1996 // Ensure that the object is a heap number
1997 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
1998 DONT_DO_SMI_CHECK);
1999
2000 LoadDouble(double_scratch,
2001 FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2002 // Force a canonical NaN.
2003 CanonicalizeNaN(double_scratch);
2004 b(&store);
2005
2006 bind(&smi_value);
2007 SmiToDouble(double_scratch, value_reg);
2008
2009 bind(&store);
2010 SmiToDoubleArrayOffset(scratch1, key_reg);
2011 StoreDouble(double_scratch,
2012 FieldMemOperand(elements_reg, scratch1,
2013 FixedDoubleArray::kHeaderSize - elements_offset));
2014 }
2015
CompareMap(Register obj,Register scratch,Handle<Map> map,Label * early_success)2016 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
2017 Label* early_success) {
2018 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2019 CompareMap(obj, map, early_success);
2020 }
2021
CompareMap(Register obj_map,Handle<Map> map,Label * early_success)2022 void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
2023 Label* early_success) {
2024 mov(r0, Operand(map));
2025 CmpP(r0, FieldMemOperand(obj_map, HeapObject::kMapOffset));
2026 }
2027
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)2028 void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
2029 Label* fail, SmiCheckType smi_check_type) {
2030 if (smi_check_type == DO_SMI_CHECK) {
2031 JumpIfSmi(obj, fail);
2032 }
2033
2034 Label success;
2035 CompareMap(obj, scratch, map, &success);
2036 bne(fail);
2037 bind(&success);
2038 }
2039
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)2040 void MacroAssembler::CheckMap(Register obj, Register scratch,
2041 Heap::RootListIndex index, Label* fail,
2042 SmiCheckType smi_check_type) {
2043 if (smi_check_type == DO_SMI_CHECK) {
2044 JumpIfSmi(obj, fail);
2045 }
2046 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2047 CompareRoot(scratch, index);
2048 bne(fail);
2049 }
2050
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)2051 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
2052 Register scratch2, Handle<WeakCell> cell,
2053 Handle<Code> success,
2054 SmiCheckType smi_check_type) {
2055 Label fail;
2056 if (smi_check_type == DO_SMI_CHECK) {
2057 JumpIfSmi(obj, &fail);
2058 }
2059 LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
2060 CmpWeakValue(scratch1, cell, scratch2);
2061 Jump(success, RelocInfo::CODE_TARGET, eq);
2062 bind(&fail);
2063 }
2064
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch,CRegister)2065 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2066 Register scratch, CRegister) {
2067 mov(scratch, Operand(cell));
2068 CmpP(value, FieldMemOperand(scratch, WeakCell::kValueOffset));
2069 }
2070
GetWeakValue(Register value,Handle<WeakCell> cell)2071 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2072 mov(value, Operand(cell));
2073 LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
2074 }
2075
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)2076 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2077 Label* miss) {
2078 GetWeakValue(value, cell);
2079 JumpIfSmi(value, miss);
2080 }
2081
GetMapConstructor(Register result,Register map,Register temp,Register temp2)2082 void MacroAssembler::GetMapConstructor(Register result, Register map,
2083 Register temp, Register temp2) {
2084 Label done, loop;
2085 LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
2086 bind(&loop);
2087 JumpIfSmi(result, &done);
2088 CompareObjectType(result, temp, temp2, MAP_TYPE);
2089 bne(&done);
2090 LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
2091 b(&loop);
2092 bind(&done);
2093 }
2094
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)2095 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2096 Register scratch, Label* miss) {
2097 // Get the prototype or initial map from the function.
2098 LoadP(result,
2099 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2100
2101 // If the prototype or initial map is the hole, don't return it and
2102 // simply miss the cache instead. This will allow us to allocate a
2103 // prototype object on-demand in the runtime system.
2104 CompareRoot(result, Heap::kTheHoleValueRootIndex);
2105 beq(miss);
2106
2107 // If the function does not have an initial map, we're done.
2108 Label done;
2109 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2110 bne(&done, Label::kNear);
2111
2112 // Get the prototype from the initial map.
2113 LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2114
2115 // All done.
2116 bind(&done);
2117 }
2118
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)2119 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
2120 Condition cond) {
2121 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2122 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2123 }
2124
TailCallStub(CodeStub * stub,Condition cond)2125 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2126 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2127 }
2128
AllowThisStubCall(CodeStub * stub)2129 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2130 return has_frame_ || !stub->SometimesSetsUpAFrame();
2131 }
2132
TestDoubleIsInt32(DoubleRegister double_input,Register scratch1,Register scratch2,DoubleRegister double_scratch)2133 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
2134 Register scratch1, Register scratch2,
2135 DoubleRegister double_scratch) {
2136 TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
2137 }
2138
TestDoubleIsMinusZero(DoubleRegister input,Register scratch1,Register scratch2)2139 void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input,
2140 Register scratch1,
2141 Register scratch2) {
2142 lgdr(scratch1, input);
2143 #if V8_TARGET_ARCH_S390X
2144 llihf(scratch2, Operand(0x80000000)); // scratch2 = 0x80000000_00000000
2145 CmpP(scratch1, scratch2);
2146 #else
2147 Label done;
2148 CmpP(scratch1, Operand::Zero());
2149 bne(&done, Label::kNear);
2150
2151 srlg(scratch1, scratch1, Operand(32));
2152 CmpP(scratch1, Operand(HeapNumber::kSignMask));
2153 bind(&done);
2154 #endif
2155 }
2156
TestDoubleSign(DoubleRegister input,Register scratch)2157 void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) {
2158 lgdr(scratch, input);
2159 cgfi(scratch, Operand::Zero());
2160 }
2161
TestHeapNumberSign(Register input,Register scratch)2162 void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) {
2163 LoadlW(scratch, FieldMemOperand(input, HeapNumber::kValueOffset +
2164 Register::kExponentOffset));
2165 Cmp32(scratch, Operand::Zero());
2166 }
2167
TryDoubleToInt32Exact(Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch)2168 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2169 DoubleRegister double_input,
2170 Register scratch,
2171 DoubleRegister double_scratch) {
2172 Label done;
2173 DCHECK(!double_input.is(double_scratch));
2174
2175 ConvertDoubleToInt64(double_input,
2176 #if !V8_TARGET_ARCH_S390X
2177 scratch,
2178 #endif
2179 result, double_scratch);
2180
2181 #if V8_TARGET_ARCH_S390X
2182 TestIfInt32(result, r0);
2183 #else
2184 TestIfInt32(scratch, result, r0);
2185 #endif
2186 bne(&done);
2187
2188 // convert back and compare
2189 lgdr(scratch, double_scratch);
2190 cdfbr(double_scratch, scratch);
2191 cdbr(double_scratch, double_input);
2192 bind(&done);
2193 }
2194
TryInt32Floor(Register result,DoubleRegister double_input,Register input_high,Register scratch,DoubleRegister double_scratch,Label * done,Label * exact)2195 void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
2196 Register input_high, Register scratch,
2197 DoubleRegister double_scratch, Label* done,
2198 Label* exact) {
2199 DCHECK(!result.is(input_high));
2200 DCHECK(!double_input.is(double_scratch));
2201 Label exception;
2202
2203 // Move high word into input_high
2204 lay(sp, MemOperand(sp, -kDoubleSize));
2205 StoreDouble(double_input, MemOperand(sp));
2206 LoadlW(input_high, MemOperand(sp, Register::kExponentOffset));
2207 la(sp, MemOperand(sp, kDoubleSize));
2208
2209 // Test for NaN/Inf
2210 ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
2211 CmpLogicalP(result, Operand(0x7ff));
2212 beq(&exception);
2213
2214 // Convert (rounding to -Inf)
2215 ConvertDoubleToInt64(double_input,
2216 #if !V8_TARGET_ARCH_S390X
2217 scratch,
2218 #endif
2219 result, double_scratch, kRoundToMinusInf);
2220
2221 // Test for overflow
2222 #if V8_TARGET_ARCH_S390X
2223 TestIfInt32(result, r0);
2224 #else
2225 TestIfInt32(scratch, result, r0);
2226 #endif
2227 bne(&exception);
2228
2229 // Test for exactness
2230 lgdr(scratch, double_scratch);
2231 cdfbr(double_scratch, scratch);
2232 cdbr(double_scratch, double_input);
2233 beq(exact);
2234 b(done);
2235
2236 bind(&exception);
2237 }
2238
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)2239 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2240 DoubleRegister double_input,
2241 Label* done) {
2242 DoubleRegister double_scratch = kScratchDoubleReg;
2243 #if !V8_TARGET_ARCH_S390X
2244 Register scratch = ip;
2245 #endif
2246
2247 ConvertDoubleToInt64(double_input,
2248 #if !V8_TARGET_ARCH_S390X
2249 scratch,
2250 #endif
2251 result, double_scratch);
2252
2253 // Test for overflow
2254 #if V8_TARGET_ARCH_S390X
2255 TestIfInt32(result, r0);
2256 #else
2257 TestIfInt32(scratch, result, r0);
2258 #endif
2259 beq(done);
2260 }
2261
TruncateDoubleToI(Register result,DoubleRegister double_input)2262 void MacroAssembler::TruncateDoubleToI(Register result,
2263 DoubleRegister double_input) {
2264 Label done;
2265
2266 TryInlineTruncateDoubleToI(result, double_input, &done);
2267
2268 // If we fell through then inline version didn't succeed - call stub instead.
2269 push(r14);
2270 // Put input on stack.
2271 lay(sp, MemOperand(sp, -kDoubleSize));
2272 StoreDouble(double_input, MemOperand(sp));
2273
2274 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2275 CallStub(&stub);
2276
2277 la(sp, MemOperand(sp, kDoubleSize));
2278 pop(r14);
2279
2280 bind(&done);
2281 }
2282
TruncateHeapNumberToI(Register result,Register object)2283 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2284 Label done;
2285 DoubleRegister double_scratch = kScratchDoubleReg;
2286 DCHECK(!result.is(object));
2287
2288 LoadDouble(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2289 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2290
2291 // If we fell through then inline version didn't succeed - call stub instead.
2292 push(r14);
2293 DoubleToIStub stub(isolate(), object, result,
2294 HeapNumber::kValueOffset - kHeapObjectTag, true, true);
2295 CallStub(&stub);
2296 pop(r14);
2297
2298 bind(&done);
2299 }
2300
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch1,Label * not_number)2301 void MacroAssembler::TruncateNumberToI(Register object, Register result,
2302 Register heap_number_map,
2303 Register scratch1, Label* not_number) {
2304 Label done;
2305 DCHECK(!result.is(object));
2306
2307 UntagAndJumpIfSmi(result, object, &done);
2308 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2309 TruncateHeapNumberToI(result, object);
2310
2311 bind(&done);
2312 }
2313
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2314 void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
2315 int num_least_bits) {
2316 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
2317 // We rotate by kSmiShift amount, and extract the num_least_bits
2318 risbg(dst, src, Operand(64 - num_least_bits), Operand(63),
2319 Operand(64 - kSmiShift), true);
2320 } else {
2321 SmiUntag(dst, src);
2322 AndP(dst, Operand((1 << num_least_bits) - 1));
2323 }
2324 }
2325
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2326 void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
2327 int num_least_bits) {
2328 AndP(dst, src, Operand((1 << num_least_bits) - 1));
2329 }
2330
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)2331 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
2332 SaveFPRegsMode save_doubles) {
2333 // All parameters are on the stack. r2 has the return value after call.
2334
2335 // If the expected number of arguments of the runtime function is
2336 // constant, we check that the actual number of arguments match the
2337 // expectation.
2338 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2339
2340 // TODO(1236192): Most runtime routines don't need the number of
2341 // arguments passed in because it is constant. At some point we
2342 // should remove this need and make the runtime routine entry code
2343 // smarter.
2344 mov(r2, Operand(num_arguments));
2345 mov(r3, Operand(ExternalReference(f, isolate())));
2346 CEntryStub stub(isolate(),
2347 #if V8_TARGET_ARCH_S390X
2348 f->result_size,
2349 #else
2350 1,
2351 #endif
2352 save_doubles);
2353 CallStub(&stub);
2354 }
2355
CallExternalReference(const ExternalReference & ext,int num_arguments)2356 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2357 int num_arguments) {
2358 mov(r2, Operand(num_arguments));
2359 mov(r3, Operand(ext));
2360
2361 CEntryStub stub(isolate(), 1);
2362 CallStub(&stub);
2363 }
2364
TailCallRuntime(Runtime::FunctionId fid)2365 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
2366 const Runtime::Function* function = Runtime::FunctionForId(fid);
2367 DCHECK_EQ(1, function->result_size);
2368 if (function->nargs >= 0) {
2369 mov(r2, Operand(function->nargs));
2370 }
2371 JumpToExternalReference(ExternalReference(fid, isolate()));
2372 }
2373
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)2374 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
2375 bool builtin_exit_frame) {
2376 mov(r3, Operand(builtin));
2377 CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
2378 builtin_exit_frame);
2379 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2380 }
2381
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2382 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2383 Register scratch1, Register scratch2) {
2384 if (FLAG_native_code_counters && counter->Enabled()) {
2385 mov(scratch1, Operand(value));
2386 mov(scratch2, Operand(ExternalReference(counter)));
2387 StoreW(scratch1, MemOperand(scratch2));
2388 }
2389 }
2390
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2391 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2392 Register scratch1, Register scratch2) {
2393 DCHECK(value > 0 && is_int8(value));
2394 if (FLAG_native_code_counters && counter->Enabled()) {
2395 mov(scratch1, Operand(ExternalReference(counter)));
2396 // @TODO(john.yan): can be optimized by asi()
2397 LoadW(scratch2, MemOperand(scratch1));
2398 AddP(scratch2, Operand(value));
2399 StoreW(scratch2, MemOperand(scratch1));
2400 }
2401 }
2402
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2403 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2404 Register scratch1, Register scratch2) {
2405 DCHECK(value > 0 && is_int8(value));
2406 if (FLAG_native_code_counters && counter->Enabled()) {
2407 mov(scratch1, Operand(ExternalReference(counter)));
2408 // @TODO(john.yan): can be optimized by asi()
2409 LoadW(scratch2, MemOperand(scratch1));
2410 AddP(scratch2, Operand(-value));
2411 StoreW(scratch2, MemOperand(scratch1));
2412 }
2413 }
2414
Assert(Condition cond,BailoutReason reason,CRegister cr)2415 void MacroAssembler::Assert(Condition cond, BailoutReason reason,
2416 CRegister cr) {
2417 if (emit_debug_code()) Check(cond, reason, cr);
2418 }
2419
AssertFastElements(Register elements)2420 void MacroAssembler::AssertFastElements(Register elements) {
2421 if (emit_debug_code()) {
2422 DCHECK(!elements.is(r0));
2423 Label ok;
2424 push(elements);
2425 LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2426 CompareRoot(elements, Heap::kFixedArrayMapRootIndex);
2427 beq(&ok, Label::kNear);
2428 CompareRoot(elements, Heap::kFixedDoubleArrayMapRootIndex);
2429 beq(&ok, Label::kNear);
2430 CompareRoot(elements, Heap::kFixedCOWArrayMapRootIndex);
2431 beq(&ok, Label::kNear);
2432 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2433 bind(&ok);
2434 pop(elements);
2435 }
2436 }
2437
Check(Condition cond,BailoutReason reason,CRegister cr)2438 void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
2439 Label L;
2440 b(cond, &L);
2441 Abort(reason);
2442 // will not return here
2443 bind(&L);
2444 }
2445
Abort(BailoutReason reason)2446 void MacroAssembler::Abort(BailoutReason reason) {
2447 Label abort_start;
2448 bind(&abort_start);
2449 #ifdef DEBUG
2450 const char* msg = GetBailoutReason(reason);
2451 if (msg != NULL) {
2452 RecordComment("Abort message: ");
2453 RecordComment(msg);
2454 }
2455
2456 if (FLAG_trap_on_abort) {
2457 stop(msg);
2458 return;
2459 }
2460 #endif
2461
2462 // Check if Abort() has already been initialized.
2463 DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
2464
2465 LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(reason)));
2466
2467 // Disable stub call restrictions to always allow calls to abort.
2468 if (!has_frame_) {
2469 // We don't actually want to generate a pile of code for this, so just
2470 // claim there is a stack frame, without generating one.
2471 FrameScope scope(this, StackFrame::NONE);
2472 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
2473 } else {
2474 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
2475 }
2476 // will not return here
2477 }
2478
LoadContext(Register dst,int context_chain_length)2479 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2480 if (context_chain_length > 0) {
2481 // Move up the chain of contexts to the context containing the slot.
2482 LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2483 for (int i = 1; i < context_chain_length; i++) {
2484 LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2485 }
2486 } else {
2487 // Slot is in the current function context. Move it into the
2488 // destination register in case we store into it (the write barrier
2489 // cannot be allowed to destroy the context in esi).
2490 LoadRR(dst, cp);
2491 }
2492 }
2493
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)2494 void MacroAssembler::LoadTransitionedArrayMapConditional(
2495 ElementsKind expected_kind, ElementsKind transitioned_kind,
2496 Register map_in_out, Register scratch, Label* no_map_match) {
2497 DCHECK(IsFastElementsKind(expected_kind));
2498 DCHECK(IsFastElementsKind(transitioned_kind));
2499
2500 // Check that the function's map is the same as the expected cached map.
2501 LoadP(scratch, NativeContextMemOperand());
2502 LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
2503 CmpP(map_in_out, ip);
2504 bne(no_map_match);
2505
2506 // Use the transitioned cached map.
2507 LoadP(map_in_out,
2508 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
2509 }
2510
LoadNativeContextSlot(int index,Register dst)2511 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2512 LoadP(dst, NativeContextMemOperand());
2513 LoadP(dst, ContextMemOperand(dst, index));
2514 }
2515
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)2516 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2517 Register map,
2518 Register scratch) {
2519 // Load the initial map. The global functions all have initial maps.
2520 LoadP(map,
2521 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2522 if (emit_debug_code()) {
2523 Label ok, fail;
2524 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2525 b(&ok);
2526 bind(&fail);
2527 Abort(kGlobalFunctionsMustHaveInitialMap);
2528 bind(&ok);
2529 }
2530 }
2531
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)2532 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2533 Register reg, Register scratch, Label* not_power_of_two_or_zero) {
2534 SubP(scratch, reg, Operand(1));
2535 CmpP(scratch, Operand::Zero());
2536 blt(not_power_of_two_or_zero);
2537 AndP(r0, reg, scratch /*, SetRC*/); // Should be okay to remove rc
2538 bne(not_power_of_two_or_zero /*, cr0*/);
2539 }
2540
JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,Register scratch,Label * zero_and_neg,Label * not_power_of_two)2541 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
2542 Register scratch,
2543 Label* zero_and_neg,
2544 Label* not_power_of_two) {
2545 SubP(scratch, reg, Operand(1));
2546 CmpP(scratch, Operand::Zero());
2547 blt(zero_and_neg);
2548 AndP(r0, reg, scratch /*, SetRC*/); // Should be okay to remove rc
2549 bne(not_power_of_two /*, cr0*/);
2550 }
2551
2552 #if !V8_TARGET_ARCH_S390X
SmiTagCheckOverflow(Register reg,Register overflow)2553 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
2554 DCHECK(!reg.is(overflow));
2555 LoadRR(overflow, reg); // Save original value.
2556 SmiTag(reg);
2557 XorP(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
2558 LoadAndTestRR(overflow, overflow);
2559 }
2560
SmiTagCheckOverflow(Register dst,Register src,Register overflow)2561 void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
2562 Register overflow) {
2563 if (dst.is(src)) {
2564 // Fall back to slower case.
2565 SmiTagCheckOverflow(dst, overflow);
2566 } else {
2567 DCHECK(!dst.is(src));
2568 DCHECK(!dst.is(overflow));
2569 DCHECK(!src.is(overflow));
2570 SmiTag(dst, src);
2571 XorP(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
2572 LoadAndTestRR(overflow, overflow);
2573 }
2574 }
2575 #endif
2576
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)2577 void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
2578 Label* on_not_both_smi) {
2579 STATIC_ASSERT(kSmiTag == 0);
2580 OrP(r0, reg1, reg2 /*, LeaveRC*/); // should be okay to remove LeaveRC
2581 JumpIfNotSmi(r0, on_not_both_smi);
2582 }
2583
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)2584 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
2585 Label* smi_case) {
2586 STATIC_ASSERT(kSmiTag == 0);
2587 STATIC_ASSERT(kSmiTagSize == 1);
2588 // this won't work if src == dst
2589 DCHECK(src.code() != dst.code());
2590 SmiUntag(dst, src);
2591 TestIfSmi(src);
2592 beq(smi_case);
2593 }
2594
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)2595 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
2596 Label* non_smi_case) {
2597 STATIC_ASSERT(kSmiTag == 0);
2598 STATIC_ASSERT(kSmiTagSize == 1);
2599
2600 // We can more optimally use TestIfSmi if dst != src
2601 // otherwise, the UnTag operation will kill the CC and we cannot
2602 // test the Tag bit.
2603 if (src.code() != dst.code()) {
2604 SmiUntag(dst, src);
2605 TestIfSmi(src);
2606 } else {
2607 TestBit(src, 0, r0);
2608 SmiUntag(dst, src);
2609 LoadAndTestRR(r0, r0);
2610 }
2611 bne(non_smi_case);
2612 }
2613
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)2614 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
2615 Label* on_either_smi) {
2616 STATIC_ASSERT(kSmiTag == 0);
2617 JumpIfSmi(reg1, on_either_smi);
2618 JumpIfSmi(reg2, on_either_smi);
2619 }
2620
AssertNotNumber(Register object)2621 void MacroAssembler::AssertNotNumber(Register object) {
2622 if (emit_debug_code()) {
2623 STATIC_ASSERT(kSmiTag == 0);
2624 TestIfSmi(object);
2625 Check(ne, kOperandIsANumber, cr0);
2626 push(object);
2627 CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
2628 pop(object);
2629 Check(ne, kOperandIsANumber);
2630 }
2631 }
2632
AssertNotSmi(Register object)2633 void MacroAssembler::AssertNotSmi(Register object) {
2634 if (emit_debug_code()) {
2635 STATIC_ASSERT(kSmiTag == 0);
2636 TestIfSmi(object);
2637 Check(ne, kOperandIsASmi, cr0);
2638 }
2639 }
2640
AssertSmi(Register object)2641 void MacroAssembler::AssertSmi(Register object) {
2642 if (emit_debug_code()) {
2643 STATIC_ASSERT(kSmiTag == 0);
2644 TestIfSmi(object);
2645 Check(eq, kOperandIsNotSmi, cr0);
2646 }
2647 }
2648
AssertString(Register object)2649 void MacroAssembler::AssertString(Register object) {
2650 if (emit_debug_code()) {
2651 STATIC_ASSERT(kSmiTag == 0);
2652 TestIfSmi(object);
2653 Check(ne, kOperandIsASmiAndNotAString, cr0);
2654 push(object);
2655 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2656 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2657 pop(object);
2658 Check(lt, kOperandIsNotAString);
2659 }
2660 }
2661
AssertName(Register object)2662 void MacroAssembler::AssertName(Register object) {
2663 if (emit_debug_code()) {
2664 STATIC_ASSERT(kSmiTag == 0);
2665 TestIfSmi(object);
2666 Check(ne, kOperandIsASmiAndNotAName, cr0);
2667 push(object);
2668 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2669 CompareInstanceType(object, object, LAST_NAME_TYPE);
2670 pop(object);
2671 Check(le, kOperandIsNotAName);
2672 }
2673 }
2674
AssertFunction(Register object)2675 void MacroAssembler::AssertFunction(Register object) {
2676 if (emit_debug_code()) {
2677 STATIC_ASSERT(kSmiTag == 0);
2678 TestIfSmi(object);
2679 Check(ne, kOperandIsASmiAndNotAFunction, cr0);
2680 push(object);
2681 CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
2682 pop(object);
2683 Check(eq, kOperandIsNotAFunction);
2684 }
2685 }
2686
AssertBoundFunction(Register object)2687 void MacroAssembler::AssertBoundFunction(Register object) {
2688 if (emit_debug_code()) {
2689 STATIC_ASSERT(kSmiTag == 0);
2690 TestIfSmi(object);
2691 Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
2692 push(object);
2693 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2694 pop(object);
2695 Check(eq, kOperandIsNotABoundFunction);
2696 }
2697 }
2698
AssertGeneratorObject(Register object)2699 void MacroAssembler::AssertGeneratorObject(Register object) {
2700 if (emit_debug_code()) {
2701 STATIC_ASSERT(kSmiTag == 0);
2702 TestIfSmi(object);
2703 Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
2704 push(object);
2705 CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
2706 pop(object);
2707 Check(eq, kOperandIsNotAGeneratorObject);
2708 }
2709 }
2710
AssertReceiver(Register object)2711 void MacroAssembler::AssertReceiver(Register object) {
2712 if (emit_debug_code()) {
2713 STATIC_ASSERT(kSmiTag == 0);
2714 TestIfSmi(object);
2715 Check(ne, kOperandIsASmiAndNotAReceiver, cr0);
2716 push(object);
2717 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2718 CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
2719 pop(object);
2720 Check(ge, kOperandIsNotAReceiver);
2721 }
2722 }
2723
AssertUndefinedOrAllocationSite(Register object,Register scratch)2724 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2725 Register scratch) {
2726 if (emit_debug_code()) {
2727 Label done_checking;
2728 AssertNotSmi(object);
2729 CompareRoot(object, Heap::kUndefinedValueRootIndex);
2730 beq(&done_checking, Label::kNear);
2731 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2732 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2733 Assert(eq, kExpectedUndefinedOrCell);
2734 bind(&done_checking);
2735 }
2736 }
2737
AssertIsRoot(Register reg,Heap::RootListIndex index)2738 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2739 if (emit_debug_code()) {
2740 CompareRoot(reg, index);
2741 Check(eq, kHeapNumberMapRegisterClobbered);
2742 }
2743 }
2744
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)2745 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2746 Register heap_number_map,
2747 Register scratch,
2748 Label* on_not_heap_number) {
2749 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2750 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2751 CmpP(scratch, heap_number_map);
2752 bne(on_not_heap_number);
2753 }
2754
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)2755 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
2756 Register first, Register second, Register scratch1, Register scratch2,
2757 Label* failure) {
2758 // Test that both first and second are sequential one-byte strings.
2759 // Assume that they are non-smis.
2760 LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2761 LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2762 LoadlB(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2763 LoadlB(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2764
2765 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
2766 scratch2, failure);
2767 }
2768
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)2769 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
2770 Register second,
2771 Register scratch1,
2772 Register scratch2,
2773 Label* failure) {
2774 // Check that neither is a smi.
2775 AndP(scratch1, first, second);
2776 JumpIfSmi(scratch1, failure);
2777 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
2778 scratch2, failure);
2779 }
2780
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)2781 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2782 Label* not_unique_name) {
2783 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2784 Label succeed;
2785 AndP(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2786 beq(&succeed, Label::kNear);
2787 CmpP(reg, Operand(SYMBOL_TYPE));
2788 bne(not_unique_name);
2789
2790 bind(&succeed);
2791 }
2792
2793 // Allocates a heap number or jumps to the need_gc label if the young space
2794 // is full and a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required,MutableMode mode)2795 void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
2796 Register scratch2,
2797 Register heap_number_map,
2798 Label* gc_required,
2799 MutableMode mode) {
2800 // Allocate an object in the heap for the heap number and tag it as a heap
2801 // object.
2802 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
2803 NO_ALLOCATION_FLAGS);
2804
2805 Heap::RootListIndex map_index = mode == MUTABLE
2806 ? Heap::kMutableHeapNumberMapRootIndex
2807 : Heap::kHeapNumberMapRootIndex;
2808 AssertIsRoot(heap_number_map, map_index);
2809
2810 // Store heap number map in the allocated object.
2811 StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
2812 }
2813
AllocateHeapNumberWithValue(Register result,DoubleRegister value,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required)2814 void MacroAssembler::AllocateHeapNumberWithValue(
2815 Register result, DoubleRegister value, Register scratch1, Register scratch2,
2816 Register heap_number_map, Label* gc_required) {
2817 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
2818 StoreDouble(value, FieldMemOperand(result, HeapNumber::kValueOffset));
2819 }
2820
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)2821 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
2822 Register value, Register scratch1,
2823 Register scratch2, Label* gc_required) {
2824 DCHECK(!result.is(constructor));
2825 DCHECK(!result.is(scratch1));
2826 DCHECK(!result.is(scratch2));
2827 DCHECK(!result.is(value));
2828
2829 // Allocate JSValue in new space.
2830 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
2831 NO_ALLOCATION_FLAGS);
2832
2833 // Initialize the JSValue.
2834 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
2835 StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
2836 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
2837 StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
2838 StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
2839 StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
2840 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
2841 }
2842
InitializeNFieldsWithFiller(Register current_address,Register count,Register filler)2843 void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
2844 Register count,
2845 Register filler) {
2846 Label loop;
2847 bind(&loop);
2848 StoreP(filler, MemOperand(current_address));
2849 AddP(current_address, current_address, Operand(kPointerSize));
2850 BranchOnCount(r1, &loop);
2851 }
2852
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)2853 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
2854 Register end_address,
2855 Register filler) {
2856 Label done;
2857 DCHECK(!filler.is(r1));
2858 DCHECK(!current_address.is(r1));
2859 DCHECK(!end_address.is(r1));
2860 SubP(r1, end_address, current_address /*, LeaveOE, SetRC*/);
2861 beq(&done, Label::kNear);
2862 ShiftRightP(r1, r1, Operand(kPointerSizeLog2));
2863 InitializeNFieldsWithFiller(current_address, r1, filler);
2864 bind(&done);
2865 }
2866
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)2867 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2868 Register first, Register second, Register scratch1, Register scratch2,
2869 Label* failure) {
2870 const int kFlatOneByteStringMask =
2871 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2872 const int kFlatOneByteStringTag =
2873 kStringTag | kOneByteStringTag | kSeqStringTag;
2874 if (!scratch1.is(first)) LoadRR(scratch1, first);
2875 if (!scratch2.is(second)) LoadRR(scratch2, second);
2876 nilf(scratch1, Operand(kFlatOneByteStringMask));
2877 CmpP(scratch1, Operand(kFlatOneByteStringTag));
2878 bne(failure);
2879 nilf(scratch2, Operand(kFlatOneByteStringMask));
2880 CmpP(scratch2, Operand(kFlatOneByteStringTag));
2881 bne(failure);
2882 }
2883
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)2884 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
2885 Register scratch,
2886 Label* failure) {
2887 const int kFlatOneByteStringMask =
2888 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2889 const int kFlatOneByteStringTag =
2890 kStringTag | kOneByteStringTag | kSeqStringTag;
2891
2892 if (!scratch.is(type)) LoadRR(scratch, type);
2893 nilf(scratch, Operand(kFlatOneByteStringMask));
2894 CmpP(scratch, Operand(kFlatOneByteStringTag));
2895 bne(failure);
2896 }
2897
2898 static const int kRegisterPassedArguments = 5;
2899
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)2900 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
2901 int num_double_arguments) {
2902 int stack_passed_words = 0;
2903 if (num_double_arguments > DoubleRegister::kNumRegisters) {
2904 stack_passed_words +=
2905 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
2906 }
2907 // Up to five simple arguments are passed in registers r2..r6
2908 if (num_reg_arguments > kRegisterPassedArguments) {
2909 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
2910 }
2911 return stack_passed_words;
2912 }
2913
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)2914 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
2915 Register value,
2916 uint32_t encoding_mask) {
2917 Label is_object;
2918 TestIfSmi(string);
2919 Check(ne, kNonObject, cr0);
2920
2921 LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
2922 LoadlB(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
2923
2924 AndP(ip, Operand(kStringRepresentationMask | kStringEncodingMask));
2925 CmpP(ip, Operand(encoding_mask));
2926 Check(eq, kUnexpectedStringType);
2927
2928 // The index is assumed to be untagged coming in, tag it to compare with the
2929 // string length without using a temp register, it is restored at the end of
2930 // this function.
2931 #if !V8_TARGET_ARCH_S390X
2932 Label index_tag_ok, index_tag_bad;
2933 JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
2934 #endif
2935 SmiTag(index, index);
2936 #if !V8_TARGET_ARCH_S390X
2937 b(&index_tag_ok);
2938 bind(&index_tag_bad);
2939 Abort(kIndexIsTooLarge);
2940 bind(&index_tag_ok);
2941 #endif
2942
2943 LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
2944 CmpP(index, ip);
2945 Check(lt, kIndexIsTooLarge);
2946
2947 DCHECK(Smi::kZero == 0);
2948 CmpP(index, Operand::Zero());
2949 Check(ge, kIndexIsNegative);
2950
2951 SmiUntag(index, index);
2952 }
2953
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)2954 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
2955 int num_double_arguments,
2956 Register scratch) {
2957 int frame_alignment = ActivationFrameAlignment();
2958 int stack_passed_arguments =
2959 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2960 int stack_space = kNumRequiredStackFrameSlots;
2961 if (frame_alignment > kPointerSize) {
2962 // Make stack end at alignment and make room for stack arguments
2963 // -- preserving original value of sp.
2964 LoadRR(scratch, sp);
2965 lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize));
2966 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2967 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
2968 StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize));
2969 } else {
2970 stack_space += stack_passed_arguments;
2971 }
2972 lay(sp, MemOperand(sp, -(stack_space)*kPointerSize));
2973 }
2974
PrepareCallCFunction(int num_reg_arguments,Register scratch)2975 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
2976 Register scratch) {
2977 PrepareCallCFunction(num_reg_arguments, 0, scratch);
2978 }
2979
MovToFloatParameter(DoubleRegister src)2980 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
2981
MovToFloatResult(DoubleRegister src)2982 void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
2983
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)2984 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
2985 DoubleRegister src2) {
2986 if (src2.is(d0)) {
2987 DCHECK(!src1.is(d2));
2988 Move(d2, src2);
2989 Move(d0, src1);
2990 } else {
2991 Move(d0, src1);
2992 Move(d2, src2);
2993 }
2994 }
2995
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)2996 void MacroAssembler::CallCFunction(ExternalReference function,
2997 int num_reg_arguments,
2998 int num_double_arguments) {
2999 mov(ip, Operand(function));
3000 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3001 }
3002
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)3003 void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
3004 int num_double_arguments) {
3005 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3006 }
3007
CallCFunction(ExternalReference function,int num_arguments)3008 void MacroAssembler::CallCFunction(ExternalReference function,
3009 int num_arguments) {
3010 CallCFunction(function, num_arguments, 0);
3011 }
3012
CallCFunction(Register function,int num_arguments)3013 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
3014 CallCFunction(function, num_arguments, 0);
3015 }
3016
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)3017 void MacroAssembler::CallCFunctionHelper(Register function,
3018 int num_reg_arguments,
3019 int num_double_arguments) {
3020 DCHECK(has_frame());
3021
3022 // Just call directly. The function called cannot cause a GC, or
3023 // allow preemption, so the return address in the link register
3024 // stays correct.
3025 Register dest = function;
3026 if (ABI_CALL_VIA_IP) {
3027 Move(ip, function);
3028 dest = ip;
3029 }
3030
3031 Call(dest);
3032
3033 int stack_passed_arguments =
3034 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3035 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
3036 if (ActivationFrameAlignment() > kPointerSize) {
3037 // Load the original stack pointer (pre-alignment) from the stack
3038 LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
3039 } else {
3040 la(sp, MemOperand(sp, stack_space * kPointerSize));
3041 }
3042 }
3043
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)3044 void MacroAssembler::CheckPageFlag(
3045 Register object,
3046 Register scratch, // scratch may be same register as object
3047 int mask, Condition cc, Label* condition_met) {
3048 DCHECK(cc == ne || cc == eq);
3049 ClearRightImm(scratch, object, Operand(kPageSizeBits));
3050
3051 if (base::bits::IsPowerOfTwo32(mask)) {
3052 // If it's a power of two, we can use Test-Under-Mask Memory-Imm form
3053 // which allows testing of a single byte in memory.
3054 int32_t byte_offset = 4;
3055 uint32_t shifted_mask = mask;
3056 // Determine the byte offset to be tested
3057 if (mask <= 0x80) {
3058 byte_offset = kPointerSize - 1;
3059 } else if (mask < 0x8000) {
3060 byte_offset = kPointerSize - 2;
3061 shifted_mask = mask >> 8;
3062 } else if (mask < 0x800000) {
3063 byte_offset = kPointerSize - 3;
3064 shifted_mask = mask >> 16;
3065 } else {
3066 byte_offset = kPointerSize - 4;
3067 shifted_mask = mask >> 24;
3068 }
3069 #if V8_TARGET_LITTLE_ENDIAN
3070 // Reverse the byte_offset if emulating on little endian platform
3071 byte_offset = kPointerSize - byte_offset - 1;
3072 #endif
3073 tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
3074 Operand(shifted_mask));
3075 } else {
3076 LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3077 AndP(r0, scratch, Operand(mask));
3078 }
3079 // Should be okay to remove rc
3080
3081 if (cc == ne) {
3082 bne(condition_met);
3083 }
3084 if (cc == eq) {
3085 beq(condition_met);
3086 }
3087 }
3088
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)3089 void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
3090 Register scratch1, Label* on_black) {
3091 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
3092 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3093 }
3094
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)3095 void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
3096 Register mask_scratch, Label* has_color,
3097 int first_bit, int second_bit) {
3098 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3099
3100 GetMarkBits(object, bitmap_scratch, mask_scratch);
3101
3102 Label other_color, word_boundary;
3103 LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3104 // Test the first bit
3105 AndP(r0, ip, mask_scratch /*, SetRC*/); // Should be okay to remove rc
3106 b(first_bit == 1 ? eq : ne, &other_color, Label::kNear);
3107 // Shift left 1
3108 // May need to load the next cell
3109 sll(mask_scratch, Operand(1) /*, SetRC*/);
3110 LoadAndTest32(mask_scratch, mask_scratch);
3111 beq(&word_boundary, Label::kNear);
3112 // Test the second bit
3113 AndP(r0, ip, mask_scratch /*, SetRC*/); // Should be okay to remove rc
3114 b(second_bit == 1 ? ne : eq, has_color);
3115 b(&other_color, Label::kNear);
3116
3117 bind(&word_boundary);
3118 LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
3119 AndP(r0, ip, Operand(1));
3120 b(second_bit == 1 ? ne : eq, has_color);
3121 bind(&other_color);
3122 }
3123
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)3124 void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
3125 Register mask_reg) {
3126 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3127 LoadRR(bitmap_reg, addr_reg);
3128 nilf(bitmap_reg, Operand(~Page::kPageAlignmentMask));
3129 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3130 ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
3131 ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
3132 ShiftLeftP(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
3133 AddP(bitmap_reg, ip);
3134 LoadRR(ip, mask_reg); // Have to do some funky reg shuffling as
3135 // 31-bit shift left clobbers on s390.
3136 LoadImmP(mask_reg, Operand(1));
3137 ShiftLeftP(mask_reg, mask_reg, ip);
3138 }
3139
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)3140 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
3141 Register mask_scratch, Register load_scratch,
3142 Label* value_is_white) {
3143 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3144 GetMarkBits(value, bitmap_scratch, mask_scratch);
3145
3146 // If the value is black or grey we don't need to do anything.
3147 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3148 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3149 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
3150 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3151
3152 // Since both black and grey have a 1 in the first position and white does
3153 // not have a 1 there we only need to check one bit.
3154 LoadlW(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3155 LoadRR(r0, load_scratch);
3156 AndP(r0, mask_scratch);
3157 beq(value_is_white);
3158 }
3159
3160 // Saturate a value into 8-bit unsigned integer
3161 // if input_value < 0, output_value is 0
3162 // if input_value > 255, output_value is 255
3163 // otherwise output_value is the input_value
ClampUint8(Register output_reg,Register input_reg)3164 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3165 int satval = (1 << 8) - 1;
3166
3167 Label done, negative_label, overflow_label;
3168 CmpP(input_reg, Operand::Zero());
3169 blt(&negative_label);
3170
3171 CmpP(input_reg, Operand(satval));
3172 bgt(&overflow_label);
3173 if (!output_reg.is(input_reg)) {
3174 LoadRR(output_reg, input_reg);
3175 }
3176 b(&done);
3177
3178 bind(&negative_label);
3179 LoadImmP(output_reg, Operand::Zero()); // set to 0 if negative
3180 b(&done);
3181
3182 bind(&overflow_label); // set to satval if > satval
3183 LoadImmP(output_reg, Operand(satval));
3184
3185 bind(&done);
3186 }
3187
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister double_scratch)3188 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3189 DoubleRegister input_reg,
3190 DoubleRegister double_scratch) {
3191 Label above_zero;
3192 Label done;
3193 Label in_bounds;
3194
3195 LoadDoubleLiteral(double_scratch, 0.0, result_reg);
3196 cdbr(input_reg, double_scratch);
3197 bgt(&above_zero, Label::kNear);
3198
3199 // Double value is less than zero, NaN or Inf, return 0.
3200 LoadIntLiteral(result_reg, 0);
3201 b(&done, Label::kNear);
3202
3203 // Double value is >= 255, return 255.
3204 bind(&above_zero);
3205 LoadDoubleLiteral(double_scratch, 255.0, result_reg);
3206 cdbr(input_reg, double_scratch);
3207 ble(&in_bounds, Label::kNear);
3208 LoadIntLiteral(result_reg, 255);
3209 b(&done, Label::kNear);
3210
3211 // In 0-255 range, round and truncate.
3212 bind(&in_bounds);
3213
3214 // round to nearest (default rounding mode)
3215 cfdbr(ROUND_TO_NEAREST_WITH_TIES_TO_EVEN, result_reg, input_reg);
3216 bind(&done);
3217 }
3218
LoadInstanceDescriptors(Register map,Register descriptors)3219 void MacroAssembler::LoadInstanceDescriptors(Register map,
3220 Register descriptors) {
3221 LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3222 }
3223
NumberOfOwnDescriptors(Register dst,Register map)3224 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3225 LoadlW(dst, FieldMemOperand(map, Map::kBitField3Offset));
3226 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3227 }
3228
EnumLength(Register dst,Register map)3229 void MacroAssembler::EnumLength(Register dst, Register map) {
3230 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3231 LoadW(dst, FieldMemOperand(map, Map::kBitField3Offset));
3232 And(dst, Operand(Map::EnumLengthBits::kMask));
3233 SmiTag(dst);
3234 }
3235
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)3236 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3237 int accessor_index,
3238 AccessorComponent accessor) {
3239 LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3240 LoadInstanceDescriptors(dst, dst);
3241 LoadP(dst,
3242 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3243 const int getterOffset = AccessorPair::kGetterOffset;
3244 const int setterOffset = AccessorPair::kSetterOffset;
3245 int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
3246 LoadP(dst, FieldMemOperand(dst, offset));
3247 }
3248
CheckEnumCache(Label * call_runtime)3249 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3250 Register null_value = r7;
3251 Register empty_fixed_array_value = r8;
3252 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3253 Label next, start;
3254 LoadRR(r4, r2);
3255
3256 // Check if the enum length field is properly initialized, indicating that
3257 // there is an enum cache.
3258 LoadP(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
3259
3260 EnumLength(r5, r3);
3261 CmpSmiLiteral(r5, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
3262 beq(call_runtime);
3263
3264 LoadRoot(null_value, Heap::kNullValueRootIndex);
3265 b(&start, Label::kNear);
3266
3267 bind(&next);
3268 LoadP(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
3269
3270 // For all objects but the receiver, check that the cache is empty.
3271 EnumLength(r5, r3);
3272 CmpSmiLiteral(r5, Smi::kZero, r0);
3273 bne(call_runtime);
3274
3275 bind(&start);
3276
3277 // Check that there are no elements. Register r4 contains the current JS
3278 // object we've reached through the prototype chain.
3279 Label no_elements;
3280 LoadP(r4, FieldMemOperand(r4, JSObject::kElementsOffset));
3281 CmpP(r4, empty_fixed_array_value);
3282 beq(&no_elements, Label::kNear);
3283
3284 // Second chance, the object may be using the empty slow element dictionary.
3285 CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
3286 bne(call_runtime);
3287
3288 bind(&no_elements);
3289 LoadP(r4, FieldMemOperand(r3, Map::kPrototypeOffset));
3290 CmpP(r4, null_value);
3291 bne(&next);
3292 }
3293
3294 ////////////////////////////////////////////////////////////////////////////////
3295 //
3296 // New MacroAssembler Interfaces added for S390
3297 //
3298 ////////////////////////////////////////////////////////////////////////////////
3299 // Primarily used for loading constants
3300 // This should really move to be in macro-assembler as it
3301 // is really a pseudo instruction
3302 // Some usages of this intend for a FIXED_SEQUENCE to be used
3303 // @TODO - break this dependency so we can optimize mov() in general
3304 // and only use the generic version when we require a fixed sequence
LoadRepresentation(Register dst,const MemOperand & mem,Representation r,Register scratch)3305 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
3306 Representation r, Register scratch) {
3307 DCHECK(!r.IsDouble());
3308 if (r.IsInteger8()) {
3309 LoadB(dst, mem);
3310 lgbr(dst, dst);
3311 } else if (r.IsUInteger8()) {
3312 LoadlB(dst, mem);
3313 } else if (r.IsInteger16()) {
3314 LoadHalfWordP(dst, mem, scratch);
3315 lghr(dst, dst);
3316 } else if (r.IsUInteger16()) {
3317 LoadHalfWordP(dst, mem, scratch);
3318 #if V8_TARGET_ARCH_S390X
3319 } else if (r.IsInteger32()) {
3320 LoadW(dst, mem, scratch);
3321 #endif
3322 } else {
3323 LoadP(dst, mem, scratch);
3324 }
3325 }
3326
StoreRepresentation(Register src,const MemOperand & mem,Representation r,Register scratch)3327 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
3328 Representation r, Register scratch) {
3329 DCHECK(!r.IsDouble());
3330 if (r.IsInteger8() || r.IsUInteger8()) {
3331 StoreByte(src, mem, scratch);
3332 } else if (r.IsInteger16() || r.IsUInteger16()) {
3333 StoreHalfWord(src, mem, scratch);
3334 #if V8_TARGET_ARCH_S390X
3335 } else if (r.IsInteger32()) {
3336 StoreW(src, mem, scratch);
3337 #endif
3338 } else {
3339 if (r.IsHeapObject()) {
3340 AssertNotSmi(src);
3341 } else if (r.IsSmi()) {
3342 AssertSmi(src);
3343 }
3344 StoreP(src, mem, scratch);
3345 }
3346 }
3347
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Register scratch2_reg,Label * no_memento_found)3348 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
3349 Register scratch_reg,
3350 Register scratch2_reg,
3351 Label* no_memento_found) {
3352 Label map_check;
3353 Label top_check;
3354 ExternalReference new_space_allocation_top_adr =
3355 ExternalReference::new_space_allocation_top_address(isolate());
3356 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
3357 const int kMementoLastWordOffset =
3358 kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
3359
3360 DCHECK(!AreAliased(receiver_reg, scratch_reg));
3361
3362 // Bail out if the object is not in new space.
3363 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
3364
3365 DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
3366
3367 // If the object is in new space, we need to check whether it is on the same
3368 // page as the current top.
3369 AddP(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
3370 mov(ip, Operand(new_space_allocation_top_adr));
3371 LoadP(ip, MemOperand(ip));
3372 XorP(r0, scratch_reg, ip);
3373 AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
3374 beq(&top_check, Label::kNear);
3375 // The object is on a different page than allocation top. Bail out if the
3376 // object sits on the page boundary as no memento can follow and we cannot
3377 // touch the memory following it.
3378 XorP(r0, scratch_reg, receiver_reg);
3379 AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
3380 bne(no_memento_found);
3381 // Continue with the actual map check.
3382 b(&map_check, Label::kNear);
3383 // If top is on the same page as the current object, we need to check whether
3384 // we are below top.
3385 bind(&top_check);
3386 CmpP(scratch_reg, ip);
3387 bge(no_memento_found);
3388 // Memento map check.
3389 bind(&map_check);
3390 LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
3391 CmpP(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
3392 }
3393
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)3394 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
3395 Register reg4, Register reg5,
3396 Register reg6) {
3397 RegList regs = 0;
3398 if (reg1.is_valid()) regs |= reg1.bit();
3399 if (reg2.is_valid()) regs |= reg2.bit();
3400 if (reg3.is_valid()) regs |= reg3.bit();
3401 if (reg4.is_valid()) regs |= reg4.bit();
3402 if (reg5.is_valid()) regs |= reg5.bit();
3403 if (reg6.is_valid()) regs |= reg6.bit();
3404
3405 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
3406 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
3407 int code = config->GetAllocatableGeneralCode(i);
3408 Register candidate = Register::from_code(code);
3409 if (regs & candidate.bit()) continue;
3410 return candidate;
3411 }
3412 UNREACHABLE();
3413 return no_reg;
3414 }
3415
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)3416 void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
3417 Register scratch0,
3418 Register scratch1,
3419 Label* found) {
3420 DCHECK(!scratch1.is(scratch0));
3421 Register current = scratch0;
3422 Label loop_again, end;
3423
3424 // scratch contained elements pointer.
3425 LoadRR(current, object);
3426 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
3427 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
3428 CompareRoot(current, Heap::kNullValueRootIndex);
3429 beq(&end);
3430
3431 // Loop based on the map going up the prototype chain.
3432 bind(&loop_again);
3433 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
3434
3435 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
3436 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
3437 LoadlB(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
3438 CmpP(scratch1, Operand(JS_OBJECT_TYPE));
3439 blt(found);
3440
3441 LoadlB(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
3442 DecodeField<Map::ElementsKindBits>(scratch1);
3443 CmpP(scratch1, Operand(DICTIONARY_ELEMENTS));
3444 beq(found);
3445 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
3446 CompareRoot(current, Heap::kNullValueRootIndex);
3447 bne(&loop_again);
3448
3449 bind(&end);
3450 }
3451
mov(Register dst,const Operand & src)3452 void MacroAssembler::mov(Register dst, const Operand& src) {
3453 if (src.rmode_ != kRelocInfo_NONEPTR) {
3454 // some form of relocation needed
3455 RecordRelocInfo(src.rmode_, src.imm_);
3456 }
3457
3458 #if V8_TARGET_ARCH_S390X
3459 int64_t value = src.immediate();
3460 int32_t hi_32 = static_cast<int64_t>(value) >> 32;
3461 int32_t lo_32 = static_cast<int32_t>(value);
3462
3463 iihf(dst, Operand(hi_32));
3464 iilf(dst, Operand(lo_32));
3465 #else
3466 int value = src.immediate();
3467 iilf(dst, Operand(value));
3468 #endif
3469 }
3470
Mul32(Register dst,const MemOperand & src1)3471 void MacroAssembler::Mul32(Register dst, const MemOperand& src1) {
3472 if (is_uint12(src1.offset())) {
3473 ms(dst, src1);
3474 } else if (is_int20(src1.offset())) {
3475 msy(dst, src1);
3476 } else {
3477 UNIMPLEMENTED();
3478 }
3479 }
3480
Mul32(Register dst,Register src1)3481 void MacroAssembler::Mul32(Register dst, Register src1) { msr(dst, src1); }
3482
Mul32(Register dst,const Operand & src1)3483 void MacroAssembler::Mul32(Register dst, const Operand& src1) {
3484 msfi(dst, src1);
3485 }
3486
Mul64(Register dst,const MemOperand & src1)3487 void MacroAssembler::Mul64(Register dst, const MemOperand& src1) {
3488 if (is_int20(src1.offset())) {
3489 msg(dst, src1);
3490 } else {
3491 UNIMPLEMENTED();
3492 }
3493 }
3494
Mul64(Register dst,Register src1)3495 void MacroAssembler::Mul64(Register dst, Register src1) { msgr(dst, src1); }
3496
Mul64(Register dst,const Operand & src1)3497 void MacroAssembler::Mul64(Register dst, const Operand& src1) {
3498 msgfi(dst, src1);
3499 }
3500
Mul(Register dst,Register src1,Register src2)3501 void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
3502 if (dst.is(src2)) {
3503 MulP(dst, src1);
3504 } else if (dst.is(src1)) {
3505 MulP(dst, src2);
3506 } else {
3507 Move(dst, src1);
3508 MulP(dst, src2);
3509 }
3510 }
3511
DivP(Register dividend,Register divider)3512 void MacroAssembler::DivP(Register dividend, Register divider) {
3513 // have to make sure the src and dst are reg pairs
3514 DCHECK(dividend.code() % 2 == 0);
3515 #if V8_TARGET_ARCH_S390X
3516 dsgr(dividend, divider);
3517 #else
3518 dr(dividend, divider);
3519 #endif
3520 }
3521
MulP(Register dst,const Operand & opnd)3522 void MacroAssembler::MulP(Register dst, const Operand& opnd) {
3523 #if V8_TARGET_ARCH_S390X
3524 msgfi(dst, opnd);
3525 #else
3526 msfi(dst, opnd);
3527 #endif
3528 }
3529
MulP(Register dst,Register src)3530 void MacroAssembler::MulP(Register dst, Register src) {
3531 #if V8_TARGET_ARCH_S390X
3532 msgr(dst, src);
3533 #else
3534 msr(dst, src);
3535 #endif
3536 }
3537
MulP(Register dst,const MemOperand & opnd)3538 void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
3539 #if V8_TARGET_ARCH_S390X
3540 if (is_uint16(opnd.offset())) {
3541 ms(dst, opnd);
3542 } else if (is_int20(opnd.offset())) {
3543 msy(dst, opnd);
3544 } else {
3545 UNIMPLEMENTED();
3546 }
3547 #else
3548 if (is_int20(opnd.offset())) {
3549 msg(dst, opnd);
3550 } else {
3551 UNIMPLEMENTED();
3552 }
3553 #endif
3554 }
3555
3556 //----------------------------------------------------------------------------
3557 // Add Instructions
3558 //----------------------------------------------------------------------------
3559
3560 // Add 32-bit (Register dst = Register dst + Immediate opnd)
Add32(Register dst,const Operand & opnd)3561 void MacroAssembler::Add32(Register dst, const Operand& opnd) {
3562 if (is_int16(opnd.immediate()))
3563 ahi(dst, opnd);
3564 else
3565 afi(dst, opnd);
3566 }
3567
3568 // Add Pointer Size (Register dst = Register dst + Immediate opnd)
AddP(Register dst,const Operand & opnd)3569 void MacroAssembler::AddP(Register dst, const Operand& opnd) {
3570 #if V8_TARGET_ARCH_S390X
3571 if (is_int16(opnd.immediate()))
3572 aghi(dst, opnd);
3573 else
3574 agfi(dst, opnd);
3575 #else
3576 Add32(dst, opnd);
3577 #endif
3578 }
3579
3580 // Add 32-bit (Register dst = Register src + Immediate opnd)
Add32(Register dst,Register src,const Operand & opnd)3581 void MacroAssembler::Add32(Register dst, Register src, const Operand& opnd) {
3582 if (!dst.is(src)) {
3583 if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
3584 ahik(dst, src, opnd);
3585 return;
3586 }
3587 lr(dst, src);
3588 }
3589 Add32(dst, opnd);
3590 }
3591
3592 // Add Pointer Size (Register dst = Register src + Immediate opnd)
AddP(Register dst,Register src,const Operand & opnd)3593 void MacroAssembler::AddP(Register dst, Register src, const Operand& opnd) {
3594 if (!dst.is(src)) {
3595 if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
3596 AddPImm_RRI(dst, src, opnd);
3597 return;
3598 }
3599 LoadRR(dst, src);
3600 }
3601 AddP(dst, opnd);
3602 }
3603
3604 // Add 32-bit (Register dst = Register dst + Register src)
Add32(Register dst,Register src)3605 void MacroAssembler::Add32(Register dst, Register src) { ar(dst, src); }
3606
3607 // Add Pointer Size (Register dst = Register dst + Register src)
AddP(Register dst,Register src)3608 void MacroAssembler::AddP(Register dst, Register src) { AddRR(dst, src); }
3609
3610 // Add Pointer Size with src extension
3611 // (Register dst(ptr) = Register dst (ptr) + Register src (32 | 32->64))
3612 // src is treated as a 32-bit signed integer, which is sign extended to
3613 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,Register src)3614 void MacroAssembler::AddP_ExtendSrc(Register dst, Register src) {
3615 #if V8_TARGET_ARCH_S390X
3616 agfr(dst, src);
3617 #else
3618 ar(dst, src);
3619 #endif
3620 }
3621
3622 // Add 32-bit (Register dst = Register src1 + Register src2)
Add32(Register dst,Register src1,Register src2)3623 void MacroAssembler::Add32(Register dst, Register src1, Register src2) {
3624 if (!dst.is(src1) && !dst.is(src2)) {
3625 // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
3626 // as AR is a smaller instruction
3627 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3628 ark(dst, src1, src2);
3629 return;
3630 } else {
3631 lr(dst, src1);
3632 }
3633 } else if (dst.is(src2)) {
3634 src2 = src1;
3635 }
3636 ar(dst, src2);
3637 }
3638
3639 // Add Pointer Size (Register dst = Register src1 + Register src2)
AddP(Register dst,Register src1,Register src2)3640 void MacroAssembler::AddP(Register dst, Register src1, Register src2) {
3641 if (!dst.is(src1) && !dst.is(src2)) {
3642 // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
3643 // as AR is a smaller instruction
3644 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3645 AddP_RRR(dst, src1, src2);
3646 return;
3647 } else {
3648 LoadRR(dst, src1);
3649 }
3650 } else if (dst.is(src2)) {
3651 src2 = src1;
3652 }
3653 AddRR(dst, src2);
3654 }
3655
3656 // Add Pointer Size with src extension
3657 // (Register dst (ptr) = Register dst (ptr) + Register src1 (ptr) +
3658 // Register src2 (32 | 32->64))
3659 // src is treated as a 32-bit signed integer, which is sign extended to
3660 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,Register src1,Register src2)3661 void MacroAssembler::AddP_ExtendSrc(Register dst, Register src1,
3662 Register src2) {
3663 #if V8_TARGET_ARCH_S390X
3664 if (dst.is(src2)) {
3665 // The source we need to sign extend is the same as result.
3666 lgfr(dst, src2);
3667 agr(dst, src1);
3668 } else {
3669 if (!dst.is(src1)) LoadRR(dst, src1);
3670 agfr(dst, src2);
3671 }
3672 #else
3673 AddP(dst, src1, src2);
3674 #endif
3675 }
3676
3677 // Add 32-bit (Register-Memory)
Add32(Register dst,const MemOperand & opnd)3678 void MacroAssembler::Add32(Register dst, const MemOperand& opnd) {
3679 DCHECK(is_int20(opnd.offset()));
3680 if (is_uint12(opnd.offset()))
3681 a(dst, opnd);
3682 else
3683 ay(dst, opnd);
3684 }
3685
3686 // Add Pointer Size (Register-Memory)
AddP(Register dst,const MemOperand & opnd)3687 void MacroAssembler::AddP(Register dst, const MemOperand& opnd) {
3688 #if V8_TARGET_ARCH_S390X
3689 DCHECK(is_int20(opnd.offset()));
3690 ag(dst, opnd);
3691 #else
3692 Add32(dst, opnd);
3693 #endif
3694 }
3695
3696 // Add Pointer Size with src extension
3697 // (Register dst (ptr) = Register dst (ptr) + Mem opnd (32 | 32->64))
3698 // src is treated as a 32-bit signed integer, which is sign extended to
3699 // 64-bit if necessary.
AddP_ExtendSrc(Register dst,const MemOperand & opnd)3700 void MacroAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) {
3701 #if V8_TARGET_ARCH_S390X
3702 DCHECK(is_int20(opnd.offset()));
3703 agf(dst, opnd);
3704 #else
3705 Add32(dst, opnd);
3706 #endif
3707 }
3708
3709 // Add 32-bit (Memory - Immediate)
Add32(const MemOperand & opnd,const Operand & imm)3710 void MacroAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
3711 DCHECK(is_int8(imm.immediate()));
3712 DCHECK(is_int20(opnd.offset()));
3713 DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
3714 asi(opnd, imm);
3715 }
3716
3717 // Add Pointer-sized (Memory - Immediate)
AddP(const MemOperand & opnd,const Operand & imm)3718 void MacroAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
3719 DCHECK(is_int8(imm.immediate()));
3720 DCHECK(is_int20(opnd.offset()));
3721 DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
3722 #if V8_TARGET_ARCH_S390X
3723 agsi(opnd, imm);
3724 #else
3725 asi(opnd, imm);
3726 #endif
3727 }
3728
3729 //----------------------------------------------------------------------------
3730 // Add Logical Instructions
3731 //----------------------------------------------------------------------------
3732
3733 // Add Logical With Carry 32-bit (Register dst = Register src1 + Register src2)
AddLogicalWithCarry32(Register dst,Register src1,Register src2)3734 void MacroAssembler::AddLogicalWithCarry32(Register dst, Register src1,
3735 Register src2) {
3736 if (!dst.is(src2) && !dst.is(src1)) {
3737 lr(dst, src1);
3738 alcr(dst, src2);
3739 } else if (!dst.is(src2)) {
3740 // dst == src1
3741 DCHECK(dst.is(src1));
3742 alcr(dst, src2);
3743 } else {
3744 // dst == src2
3745 DCHECK(dst.is(src2));
3746 alcr(dst, src1);
3747 }
3748 }
3749
3750 // Add Logical 32-bit (Register dst = Register src1 + Register src2)
AddLogical32(Register dst,Register src1,Register src2)3751 void MacroAssembler::AddLogical32(Register dst, Register src1, Register src2) {
3752 if (!dst.is(src2) && !dst.is(src1)) {
3753 lr(dst, src1);
3754 alr(dst, src2);
3755 } else if (!dst.is(src2)) {
3756 // dst == src1
3757 DCHECK(dst.is(src1));
3758 alr(dst, src2);
3759 } else {
3760 // dst == src2
3761 DCHECK(dst.is(src2));
3762 alr(dst, src1);
3763 }
3764 }
3765
3766 // Add Logical 32-bit (Register dst = Register dst + Immediate opnd)
AddLogical(Register dst,const Operand & imm)3767 void MacroAssembler::AddLogical(Register dst, const Operand& imm) {
3768 alfi(dst, imm);
3769 }
3770
3771 // Add Logical Pointer Size (Register dst = Register dst + Immediate opnd)
AddLogicalP(Register dst,const Operand & imm)3772 void MacroAssembler::AddLogicalP(Register dst, const Operand& imm) {
3773 #ifdef V8_TARGET_ARCH_S390X
3774 algfi(dst, imm);
3775 #else
3776 AddLogical(dst, imm);
3777 #endif
3778 }
3779
3780 // Add Logical 32-bit (Register-Memory)
AddLogical(Register dst,const MemOperand & opnd)3781 void MacroAssembler::AddLogical(Register dst, const MemOperand& opnd) {
3782 DCHECK(is_int20(opnd.offset()));
3783 if (is_uint12(opnd.offset()))
3784 al_z(dst, opnd);
3785 else
3786 aly(dst, opnd);
3787 }
3788
3789 // Add Logical Pointer Size (Register-Memory)
AddLogicalP(Register dst,const MemOperand & opnd)3790 void MacroAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
3791 #if V8_TARGET_ARCH_S390X
3792 DCHECK(is_int20(opnd.offset()));
3793 alg(dst, opnd);
3794 #else
3795 AddLogical(dst, opnd);
3796 #endif
3797 }
3798
3799 //----------------------------------------------------------------------------
3800 // Subtract Instructions
3801 //----------------------------------------------------------------------------
3802
3803 // Subtract Logical With Carry 32-bit (Register dst = Register src1 - Register
3804 // src2)
SubLogicalWithBorrow32(Register dst,Register src1,Register src2)3805 void MacroAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
3806 Register src2) {
3807 if (!dst.is(src2) && !dst.is(src1)) {
3808 lr(dst, src1);
3809 slbr(dst, src2);
3810 } else if (!dst.is(src2)) {
3811 // dst == src1
3812 DCHECK(dst.is(src1));
3813 slbr(dst, src2);
3814 } else {
3815 // dst == src2
3816 DCHECK(dst.is(src2));
3817 lr(r0, dst);
3818 SubLogicalWithBorrow32(dst, src1, r0);
3819 }
3820 }
3821
3822 // Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
SubLogical32(Register dst,Register src1,Register src2)3823 void MacroAssembler::SubLogical32(Register dst, Register src1, Register src2) {
3824 if (!dst.is(src2) && !dst.is(src1)) {
3825 lr(dst, src1);
3826 slr(dst, src2);
3827 } else if (!dst.is(src2)) {
3828 // dst == src1
3829 DCHECK(dst.is(src1));
3830 slr(dst, src2);
3831 } else {
3832 // dst == src2
3833 DCHECK(dst.is(src2));
3834 lr(r0, dst);
3835 SubLogical32(dst, src1, r0);
3836 }
3837 }
3838
3839 // Subtract 32-bit (Register dst = Register dst - Immediate opnd)
Sub32(Register dst,const Operand & imm)3840 void MacroAssembler::Sub32(Register dst, const Operand& imm) {
3841 Add32(dst, Operand(-(imm.imm_)));
3842 }
3843
3844 // Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
SubP(Register dst,const Operand & imm)3845 void MacroAssembler::SubP(Register dst, const Operand& imm) {
3846 AddP(dst, Operand(-(imm.imm_)));
3847 }
3848
3849 // Subtract 32-bit (Register dst = Register src - Immediate opnd)
Sub32(Register dst,Register src,const Operand & imm)3850 void MacroAssembler::Sub32(Register dst, Register src, const Operand& imm) {
3851 Add32(dst, src, Operand(-(imm.imm_)));
3852 }
3853
3854 // Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
SubP(Register dst,Register src,const Operand & imm)3855 void MacroAssembler::SubP(Register dst, Register src, const Operand& imm) {
3856 AddP(dst, src, Operand(-(imm.imm_)));
3857 }
3858
3859 // Subtract 32-bit (Register dst = Register dst - Register src)
Sub32(Register dst,Register src)3860 void MacroAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
3861
3862 // Subtract Pointer Size (Register dst = Register dst - Register src)
SubP(Register dst,Register src)3863 void MacroAssembler::SubP(Register dst, Register src) { SubRR(dst, src); }
3864
3865 // Subtract Pointer Size with src extension
3866 // (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
3867 // src is treated as a 32-bit signed integer, which is sign extended to
3868 // 64-bit if necessary.
SubP_ExtendSrc(Register dst,Register src)3869 void MacroAssembler::SubP_ExtendSrc(Register dst, Register src) {
3870 #if V8_TARGET_ARCH_S390X
3871 sgfr(dst, src);
3872 #else
3873 sr(dst, src);
3874 #endif
3875 }
3876
3877 // Subtract 32-bit (Register = Register - Register)
Sub32(Register dst,Register src1,Register src2)3878 void MacroAssembler::Sub32(Register dst, Register src1, Register src2) {
3879 // Use non-clobbering version if possible
3880 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3881 srk(dst, src1, src2);
3882 return;
3883 }
3884 if (!dst.is(src1) && !dst.is(src2)) lr(dst, src1);
3885 // In scenario where we have dst = src - dst, we need to swap and negate
3886 if (!dst.is(src1) && dst.is(src2)) {
3887 Label done;
3888 lcr(dst, dst); // dst = -dst
3889 b(overflow, &done);
3890 ar(dst, src1); // dst = dst + src
3891 bind(&done);
3892 } else {
3893 sr(dst, src2);
3894 }
3895 }
3896
3897 // Subtract Pointer Sized (Register = Register - Register)
SubP(Register dst,Register src1,Register src2)3898 void MacroAssembler::SubP(Register dst, Register src1, Register src2) {
3899 // Use non-clobbering version if possible
3900 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3901 SubP_RRR(dst, src1, src2);
3902 return;
3903 }
3904 if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
3905 // In scenario where we have dst = src - dst, we need to swap and negate
3906 if (!dst.is(src1) && dst.is(src2)) {
3907 Label done;
3908 LoadComplementRR(dst, dst); // dst = -dst
3909 b(overflow, &done);
3910 AddP(dst, src1); // dst = dst + src
3911 bind(&done);
3912 } else {
3913 SubP(dst, src2);
3914 }
3915 }
3916
3917 // Subtract Pointer Size with src extension
3918 // (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
3919 // src is treated as a 32-bit signed integer, which is sign extended to
3920 // 64-bit if necessary.
SubP_ExtendSrc(Register dst,Register src1,Register src2)3921 void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1,
3922 Register src2) {
3923 #if V8_TARGET_ARCH_S390X
3924 if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
3925
3926 // In scenario where we have dst = src - dst, we need to swap and negate
3927 if (!dst.is(src1) && dst.is(src2)) {
3928 lgfr(dst, dst); // Sign extend this operand first.
3929 LoadComplementRR(dst, dst); // dst = -dst
3930 AddP(dst, src1); // dst = -dst + src
3931 } else {
3932 sgfr(dst, src2);
3933 }
3934 #else
3935 SubP(dst, src1, src2);
3936 #endif
3937 }
3938
3939 // Subtract 32-bit (Register-Memory)
Sub32(Register dst,const MemOperand & opnd)3940 void MacroAssembler::Sub32(Register dst, const MemOperand& opnd) {
3941 DCHECK(is_int20(opnd.offset()));
3942 if (is_uint12(opnd.offset()))
3943 s(dst, opnd);
3944 else
3945 sy(dst, opnd);
3946 }
3947
3948 // Subtract Pointer Sized (Register - Memory)
SubP(Register dst,const MemOperand & opnd)3949 void MacroAssembler::SubP(Register dst, const MemOperand& opnd) {
3950 #if V8_TARGET_ARCH_S390X
3951 sg(dst, opnd);
3952 #else
3953 Sub32(dst, opnd);
3954 #endif
3955 }
3956
MovIntToFloat(DoubleRegister dst,Register src)3957 void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
3958 sllg(src, src, Operand(32));
3959 ldgr(dst, src);
3960 }
3961
MovFloatToInt(Register dst,DoubleRegister src)3962 void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
3963 lgdr(dst, src);
3964 srlg(dst, dst, Operand(32));
3965 }
3966
SubP_ExtendSrc(Register dst,const MemOperand & opnd)3967 void MacroAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
3968 #if V8_TARGET_ARCH_S390X
3969 DCHECK(is_int20(opnd.offset()));
3970 sgf(dst, opnd);
3971 #else
3972 Sub32(dst, opnd);
3973 #endif
3974 }
3975
3976 //----------------------------------------------------------------------------
3977 // Subtract Logical Instructions
3978 //----------------------------------------------------------------------------
3979
3980 // Subtract Logical 32-bit (Register - Memory)
SubLogical(Register dst,const MemOperand & opnd)3981 void MacroAssembler::SubLogical(Register dst, const MemOperand& opnd) {
3982 DCHECK(is_int20(opnd.offset()));
3983 if (is_uint12(opnd.offset()))
3984 sl(dst, opnd);
3985 else
3986 sly(dst, opnd);
3987 }
3988
3989 // Subtract Logical Pointer Sized (Register - Memory)
SubLogicalP(Register dst,const MemOperand & opnd)3990 void MacroAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
3991 DCHECK(is_int20(opnd.offset()));
3992 #if V8_TARGET_ARCH_S390X
3993 slgf(dst, opnd);
3994 #else
3995 SubLogical(dst, opnd);
3996 #endif
3997 }
3998
3999 // Subtract Logical Pointer Size with src extension
4000 // (Register dst (ptr) = Register dst (ptr) - Mem opnd (32 | 32->64))
4001 // src is treated as a 32-bit signed integer, which is sign extended to
4002 // 64-bit if necessary.
SubLogicalP_ExtendSrc(Register dst,const MemOperand & opnd)4003 void MacroAssembler::SubLogicalP_ExtendSrc(Register dst,
4004 const MemOperand& opnd) {
4005 #if V8_TARGET_ARCH_S390X
4006 DCHECK(is_int20(opnd.offset()));
4007 slgf(dst, opnd);
4008 #else
4009 SubLogical(dst, opnd);
4010 #endif
4011 }
4012
4013 //----------------------------------------------------------------------------
4014 // Bitwise Operations
4015 //----------------------------------------------------------------------------
4016
4017 // AND 32-bit - dst = dst & src
And(Register dst,Register src)4018 void MacroAssembler::And(Register dst, Register src) { nr(dst, src); }
4019
4020 // AND Pointer Size - dst = dst & src
AndP(Register dst,Register src)4021 void MacroAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
4022
4023 // Non-clobbering AND 32-bit - dst = src1 & src1
And(Register dst,Register src1,Register src2)4024 void MacroAssembler::And(Register dst, Register src1, Register src2) {
4025 if (!dst.is(src1) && !dst.is(src2)) {
4026 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4027 // as XR is a smaller instruction
4028 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4029 nrk(dst, src1, src2);
4030 return;
4031 } else {
4032 lr(dst, src1);
4033 }
4034 } else if (dst.is(src2)) {
4035 src2 = src1;
4036 }
4037 And(dst, src2);
4038 }
4039
4040 // Non-clobbering AND pointer size - dst = src1 & src1
AndP(Register dst,Register src1,Register src2)4041 void MacroAssembler::AndP(Register dst, Register src1, Register src2) {
4042 if (!dst.is(src1) && !dst.is(src2)) {
4043 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4044 // as XR is a smaller instruction
4045 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4046 AndP_RRR(dst, src1, src2);
4047 return;
4048 } else {
4049 LoadRR(dst, src1);
4050 }
4051 } else if (dst.is(src2)) {
4052 src2 = src1;
4053 }
4054 AndP(dst, src2);
4055 }
4056
4057 // AND 32-bit (Reg - Mem)
And(Register dst,const MemOperand & opnd)4058 void MacroAssembler::And(Register dst, const MemOperand& opnd) {
4059 DCHECK(is_int20(opnd.offset()));
4060 if (is_uint12(opnd.offset()))
4061 n(dst, opnd);
4062 else
4063 ny(dst, opnd);
4064 }
4065
4066 // AND Pointer Size (Reg - Mem)
AndP(Register dst,const MemOperand & opnd)4067 void MacroAssembler::AndP(Register dst, const MemOperand& opnd) {
4068 DCHECK(is_int20(opnd.offset()));
4069 #if V8_TARGET_ARCH_S390X
4070 ng(dst, opnd);
4071 #else
4072 And(dst, opnd);
4073 #endif
4074 }
4075
4076 // AND 32-bit - dst = dst & imm
And(Register dst,const Operand & opnd)4077 void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
4078
4079 // AND Pointer Size - dst = dst & imm
AndP(Register dst,const Operand & opnd)4080 void MacroAssembler::AndP(Register dst, const Operand& opnd) {
4081 #if V8_TARGET_ARCH_S390X
4082 intptr_t value = opnd.imm_;
4083 if (value >> 32 != -1) {
4084 // this may not work b/c condition code won't be set correctly
4085 nihf(dst, Operand(value >> 32));
4086 }
4087 nilf(dst, Operand(value & 0xFFFFFFFF));
4088 #else
4089 And(dst, opnd);
4090 #endif
4091 }
4092
4093 // AND 32-bit - dst = src & imm
And(Register dst,Register src,const Operand & opnd)4094 void MacroAssembler::And(Register dst, Register src, const Operand& opnd) {
4095 if (!dst.is(src)) lr(dst, src);
4096 nilf(dst, opnd);
4097 }
4098
4099 // AND Pointer Size - dst = src & imm
AndP(Register dst,Register src,const Operand & opnd)4100 void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
4101 // Try to exploit RISBG first
4102 intptr_t value = opnd.imm_;
4103 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
4104 intptr_t shifted_value = value;
4105 int trailing_zeros = 0;
4106
4107 // We start checking how many trailing zeros are left at the end.
4108 while ((0 != shifted_value) && (0 == (shifted_value & 1))) {
4109 trailing_zeros++;
4110 shifted_value >>= 1;
4111 }
4112
4113 // If temp (value with right-most set of zeros shifted out) is 1 less
4114 // than power of 2, we have consecutive bits of 1.
4115 // Special case: If shift_value is zero, we cannot use RISBG, as it requires
4116 // selection of at least 1 bit.
4117 if ((0 != shifted_value) && base::bits::IsPowerOfTwo64(shifted_value + 1)) {
4118 int startBit =
4119 base::bits::CountLeadingZeros64(shifted_value) - trailing_zeros;
4120 int endBit = 63 - trailing_zeros;
4121 // Start: startBit, End: endBit, Shift = 0, true = zero unselected bits.
4122 risbg(dst, src, Operand(startBit), Operand(endBit), Operand::Zero(),
4123 true);
4124 return;
4125 } else if (-1 == shifted_value) {
4126 // A Special case in which all top bits up to MSB are 1's. In this case,
4127 // we can set startBit to be 0.
4128 int endBit = 63 - trailing_zeros;
4129 risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true);
4130 return;
4131 }
4132 }
4133
4134 // If we are &'ing zero, we can just whack the dst register and skip copy
4135 if (!dst.is(src) && (0 != value)) LoadRR(dst, src);
4136 AndP(dst, opnd);
4137 }
4138
4139 // OR 32-bit - dst = dst & src
Or(Register dst,Register src)4140 void MacroAssembler::Or(Register dst, Register src) { or_z(dst, src); }
4141
4142 // OR Pointer Size - dst = dst & src
OrP(Register dst,Register src)4143 void MacroAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
4144
4145 // Non-clobbering OR 32-bit - dst = src1 & src1
Or(Register dst,Register src1,Register src2)4146 void MacroAssembler::Or(Register dst, Register src1, Register src2) {
4147 if (!dst.is(src1) && !dst.is(src2)) {
4148 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4149 // as XR is a smaller instruction
4150 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4151 ork(dst, src1, src2);
4152 return;
4153 } else {
4154 lr(dst, src1);
4155 }
4156 } else if (dst.is(src2)) {
4157 src2 = src1;
4158 }
4159 Or(dst, src2);
4160 }
4161
4162 // Non-clobbering OR pointer size - dst = src1 & src1
OrP(Register dst,Register src1,Register src2)4163 void MacroAssembler::OrP(Register dst, Register src1, Register src2) {
4164 if (!dst.is(src1) && !dst.is(src2)) {
4165 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4166 // as XR is a smaller instruction
4167 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4168 OrP_RRR(dst, src1, src2);
4169 return;
4170 } else {
4171 LoadRR(dst, src1);
4172 }
4173 } else if (dst.is(src2)) {
4174 src2 = src1;
4175 }
4176 OrP(dst, src2);
4177 }
4178
4179 // OR 32-bit (Reg - Mem)
Or(Register dst,const MemOperand & opnd)4180 void MacroAssembler::Or(Register dst, const MemOperand& opnd) {
4181 DCHECK(is_int20(opnd.offset()));
4182 if (is_uint12(opnd.offset()))
4183 o(dst, opnd);
4184 else
4185 oy(dst, opnd);
4186 }
4187
4188 // OR Pointer Size (Reg - Mem)
OrP(Register dst,const MemOperand & opnd)4189 void MacroAssembler::OrP(Register dst, const MemOperand& opnd) {
4190 DCHECK(is_int20(opnd.offset()));
4191 #if V8_TARGET_ARCH_S390X
4192 og(dst, opnd);
4193 #else
4194 Or(dst, opnd);
4195 #endif
4196 }
4197
4198 // OR 32-bit - dst = dst & imm
Or(Register dst,const Operand & opnd)4199 void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
4200
4201 // OR Pointer Size - dst = dst & imm
OrP(Register dst,const Operand & opnd)4202 void MacroAssembler::OrP(Register dst, const Operand& opnd) {
4203 #if V8_TARGET_ARCH_S390X
4204 intptr_t value = opnd.imm_;
4205 if (value >> 32 != 0) {
4206 // this may not work b/c condition code won't be set correctly
4207 oihf(dst, Operand(value >> 32));
4208 }
4209 oilf(dst, Operand(value & 0xFFFFFFFF));
4210 #else
4211 Or(dst, opnd);
4212 #endif
4213 }
4214
4215 // OR 32-bit - dst = src & imm
Or(Register dst,Register src,const Operand & opnd)4216 void MacroAssembler::Or(Register dst, Register src, const Operand& opnd) {
4217 if (!dst.is(src)) lr(dst, src);
4218 oilf(dst, opnd);
4219 }
4220
4221 // OR Pointer Size - dst = src & imm
OrP(Register dst,Register src,const Operand & opnd)4222 void MacroAssembler::OrP(Register dst, Register src, const Operand& opnd) {
4223 if (!dst.is(src)) LoadRR(dst, src);
4224 OrP(dst, opnd);
4225 }
4226
4227 // XOR 32-bit - dst = dst & src
Xor(Register dst,Register src)4228 void MacroAssembler::Xor(Register dst, Register src) { xr(dst, src); }
4229
4230 // XOR Pointer Size - dst = dst & src
XorP(Register dst,Register src)4231 void MacroAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
4232
4233 // Non-clobbering XOR 32-bit - dst = src1 & src1
Xor(Register dst,Register src1,Register src2)4234 void MacroAssembler::Xor(Register dst, Register src1, Register src2) {
4235 if (!dst.is(src1) && !dst.is(src2)) {
4236 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4237 // as XR is a smaller instruction
4238 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4239 xrk(dst, src1, src2);
4240 return;
4241 } else {
4242 lr(dst, src1);
4243 }
4244 } else if (dst.is(src2)) {
4245 src2 = src1;
4246 }
4247 Xor(dst, src2);
4248 }
4249
4250 // Non-clobbering XOR pointer size - dst = src1 & src1
XorP(Register dst,Register src1,Register src2)4251 void MacroAssembler::XorP(Register dst, Register src1, Register src2) {
4252 if (!dst.is(src1) && !dst.is(src2)) {
4253 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
4254 // as XR is a smaller instruction
4255 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4256 XorP_RRR(dst, src1, src2);
4257 return;
4258 } else {
4259 LoadRR(dst, src1);
4260 }
4261 } else if (dst.is(src2)) {
4262 src2 = src1;
4263 }
4264 XorP(dst, src2);
4265 }
4266
4267 // XOR 32-bit (Reg - Mem)
Xor(Register dst,const MemOperand & opnd)4268 void MacroAssembler::Xor(Register dst, const MemOperand& opnd) {
4269 DCHECK(is_int20(opnd.offset()));
4270 if (is_uint12(opnd.offset()))
4271 x(dst, opnd);
4272 else
4273 xy(dst, opnd);
4274 }
4275
4276 // XOR Pointer Size (Reg - Mem)
XorP(Register dst,const MemOperand & opnd)4277 void MacroAssembler::XorP(Register dst, const MemOperand& opnd) {
4278 DCHECK(is_int20(opnd.offset()));
4279 #if V8_TARGET_ARCH_S390X
4280 xg(dst, opnd);
4281 #else
4282 Xor(dst, opnd);
4283 #endif
4284 }
4285
4286 // XOR 32-bit - dst = dst & imm
Xor(Register dst,const Operand & opnd)4287 void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
4288
4289 // XOR Pointer Size - dst = dst & imm
XorP(Register dst,const Operand & opnd)4290 void MacroAssembler::XorP(Register dst, const Operand& opnd) {
4291 #if V8_TARGET_ARCH_S390X
4292 intptr_t value = opnd.imm_;
4293 xihf(dst, Operand(value >> 32));
4294 xilf(dst, Operand(value & 0xFFFFFFFF));
4295 #else
4296 Xor(dst, opnd);
4297 #endif
4298 }
4299
4300 // XOR 32-bit - dst = src & imm
Xor(Register dst,Register src,const Operand & opnd)4301 void MacroAssembler::Xor(Register dst, Register src, const Operand& opnd) {
4302 if (!dst.is(src)) lr(dst, src);
4303 xilf(dst, opnd);
4304 }
4305
4306 // XOR Pointer Size - dst = src & imm
XorP(Register dst,Register src,const Operand & opnd)4307 void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) {
4308 if (!dst.is(src)) LoadRR(dst, src);
4309 XorP(dst, opnd);
4310 }
4311
Not32(Register dst,Register src)4312 void MacroAssembler::Not32(Register dst, Register src) {
4313 if (!src.is(no_reg) && !src.is(dst)) lr(dst, src);
4314 xilf(dst, Operand(0xFFFFFFFF));
4315 }
4316
Not64(Register dst,Register src)4317 void MacroAssembler::Not64(Register dst, Register src) {
4318 if (!src.is(no_reg) && !src.is(dst)) lgr(dst, src);
4319 xihf(dst, Operand(0xFFFFFFFF));
4320 xilf(dst, Operand(0xFFFFFFFF));
4321 }
4322
NotP(Register dst,Register src)4323 void MacroAssembler::NotP(Register dst, Register src) {
4324 #if V8_TARGET_ARCH_S390X
4325 Not64(dst, src);
4326 #else
4327 Not32(dst, src);
4328 #endif
4329 }
4330
4331 // works the same as mov
Load(Register dst,const Operand & opnd)4332 void MacroAssembler::Load(Register dst, const Operand& opnd) {
4333 intptr_t value = opnd.immediate();
4334 if (is_int16(value)) {
4335 #if V8_TARGET_ARCH_S390X
4336 lghi(dst, opnd);
4337 #else
4338 lhi(dst, opnd);
4339 #endif
4340 } else {
4341 #if V8_TARGET_ARCH_S390X
4342 llilf(dst, opnd);
4343 #else
4344 iilf(dst, opnd);
4345 #endif
4346 }
4347 }
4348
Load(Register dst,const MemOperand & opnd)4349 void MacroAssembler::Load(Register dst, const MemOperand& opnd) {
4350 DCHECK(is_int20(opnd.offset()));
4351 #if V8_TARGET_ARCH_S390X
4352 lgf(dst, opnd); // 64<-32
4353 #else
4354 if (is_uint12(opnd.offset())) {
4355 l(dst, opnd);
4356 } else {
4357 ly(dst, opnd);
4358 }
4359 #endif
4360 }
4361
4362 //-----------------------------------------------------------------------------
4363 // Compare Helpers
4364 //-----------------------------------------------------------------------------
4365
4366 // Compare 32-bit Register vs Register
Cmp32(Register src1,Register src2)4367 void MacroAssembler::Cmp32(Register src1, Register src2) { cr_z(src1, src2); }
4368
4369 // Compare Pointer Sized Register vs Register
CmpP(Register src1,Register src2)4370 void MacroAssembler::CmpP(Register src1, Register src2) {
4371 #if V8_TARGET_ARCH_S390X
4372 cgr(src1, src2);
4373 #else
4374 Cmp32(src1, src2);
4375 #endif
4376 }
4377
4378 // Compare 32-bit Register vs Immediate
4379 // This helper will set up proper relocation entries if required.
Cmp32(Register dst,const Operand & opnd)4380 void MacroAssembler::Cmp32(Register dst, const Operand& opnd) {
4381 if (opnd.rmode_ == kRelocInfo_NONEPTR) {
4382 intptr_t value = opnd.immediate();
4383 if (is_int16(value))
4384 chi(dst, opnd);
4385 else
4386 cfi(dst, opnd);
4387 } else {
4388 // Need to generate relocation record here
4389 RecordRelocInfo(opnd.rmode_, opnd.imm_);
4390 cfi(dst, opnd);
4391 }
4392 }
4393
4394 // Compare Pointer Sized Register vs Immediate
4395 // This helper will set up proper relocation entries if required.
CmpP(Register dst,const Operand & opnd)4396 void MacroAssembler::CmpP(Register dst, const Operand& opnd) {
4397 #if V8_TARGET_ARCH_S390X
4398 if (opnd.rmode_ == kRelocInfo_NONEPTR) {
4399 cgfi(dst, opnd);
4400 } else {
4401 mov(r0, opnd); // Need to generate 64-bit relocation
4402 cgr(dst, r0);
4403 }
4404 #else
4405 Cmp32(dst, opnd);
4406 #endif
4407 }
4408
4409 // Compare 32-bit Register vs Memory
Cmp32(Register dst,const MemOperand & opnd)4410 void MacroAssembler::Cmp32(Register dst, const MemOperand& opnd) {
4411 // make sure offset is within 20 bit range
4412 DCHECK(is_int20(opnd.offset()));
4413 if (is_uint12(opnd.offset()))
4414 c(dst, opnd);
4415 else
4416 cy(dst, opnd);
4417 }
4418
4419 // Compare Pointer Size Register vs Memory
CmpP(Register dst,const MemOperand & opnd)4420 void MacroAssembler::CmpP(Register dst, const MemOperand& opnd) {
4421 // make sure offset is within 20 bit range
4422 DCHECK(is_int20(opnd.offset()));
4423 #if V8_TARGET_ARCH_S390X
4424 cg(dst, opnd);
4425 #else
4426 Cmp32(dst, opnd);
4427 #endif
4428 }
4429
4430 //-----------------------------------------------------------------------------
4431 // Compare Logical Helpers
4432 //-----------------------------------------------------------------------------
4433
4434 // Compare Logical 32-bit Register vs Register
CmpLogical32(Register dst,Register src)4435 void MacroAssembler::CmpLogical32(Register dst, Register src) { clr(dst, src); }
4436
4437 // Compare Logical Pointer Sized Register vs Register
CmpLogicalP(Register dst,Register src)4438 void MacroAssembler::CmpLogicalP(Register dst, Register src) {
4439 #ifdef V8_TARGET_ARCH_S390X
4440 clgr(dst, src);
4441 #else
4442 CmpLogical32(dst, src);
4443 #endif
4444 }
4445
4446 // Compare Logical 32-bit Register vs Immediate
CmpLogical32(Register dst,const Operand & opnd)4447 void MacroAssembler::CmpLogical32(Register dst, const Operand& opnd) {
4448 clfi(dst, opnd);
4449 }
4450
4451 // Compare Logical Pointer Sized Register vs Immediate
CmpLogicalP(Register dst,const Operand & opnd)4452 void MacroAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
4453 #if V8_TARGET_ARCH_S390X
4454 DCHECK(static_cast<uint32_t>(opnd.immediate() >> 32) == 0);
4455 clgfi(dst, opnd);
4456 #else
4457 CmpLogical32(dst, opnd);
4458 #endif
4459 }
4460
4461 // Compare Logical 32-bit Register vs Memory
CmpLogical32(Register dst,const MemOperand & opnd)4462 void MacroAssembler::CmpLogical32(Register dst, const MemOperand& opnd) {
4463 // make sure offset is within 20 bit range
4464 DCHECK(is_int20(opnd.offset()));
4465 if (is_uint12(opnd.offset()))
4466 cl(dst, opnd);
4467 else
4468 cly(dst, opnd);
4469 }
4470
4471 // Compare Logical Pointer Sized Register vs Memory
CmpLogicalP(Register dst,const MemOperand & opnd)4472 void MacroAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) {
4473 // make sure offset is within 20 bit range
4474 DCHECK(is_int20(opnd.offset()));
4475 #if V8_TARGET_ARCH_S390X
4476 clg(dst, opnd);
4477 #else
4478 CmpLogical32(dst, opnd);
4479 #endif
4480 }
4481
4482 // Compare Logical Byte (Mem - Imm)
CmpLogicalByte(const MemOperand & mem,const Operand & imm)4483 void MacroAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) {
4484 DCHECK(is_uint8(imm.immediate()));
4485 if (is_uint12(mem.offset()))
4486 cli(mem, imm);
4487 else
4488 cliy(mem, imm);
4489 }
4490
Branch(Condition c,const Operand & opnd)4491 void MacroAssembler::Branch(Condition c, const Operand& opnd) {
4492 intptr_t value = opnd.immediate();
4493 if (is_int16(value))
4494 brc(c, opnd);
4495 else
4496 brcl(c, opnd);
4497 }
4498
4499 // Branch On Count. Decrement R1, and branch if R1 != 0.
BranchOnCount(Register r1,Label * l)4500 void MacroAssembler::BranchOnCount(Register r1, Label* l) {
4501 int32_t offset = branch_offset(l);
4502 if (is_int16(offset)) {
4503 #if V8_TARGET_ARCH_S390X
4504 brctg(r1, Operand(offset));
4505 #else
4506 brct(r1, Operand(offset));
4507 #endif
4508 } else {
4509 AddP(r1, Operand(-1));
4510 Branch(ne, Operand(offset));
4511 }
4512 }
4513
LoadIntLiteral(Register dst,int value)4514 void MacroAssembler::LoadIntLiteral(Register dst, int value) {
4515 Load(dst, Operand(value));
4516 }
4517
LoadSmiLiteral(Register dst,Smi * smi)4518 void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
4519 intptr_t value = reinterpret_cast<intptr_t>(smi);
4520 #if V8_TARGET_ARCH_S390X
4521 DCHECK((value & 0xffffffff) == 0);
4522 // The smi value is loaded in upper 32-bits. Lower 32-bit are zeros.
4523 llihf(dst, Operand(value >> 32));
4524 #else
4525 llilf(dst, Operand(value));
4526 #endif
4527 }
4528
LoadDoubleLiteral(DoubleRegister result,uint64_t value,Register scratch)4529 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
4530 Register scratch) {
4531 uint32_t hi_32 = value >> 32;
4532 uint32_t lo_32 = static_cast<uint32_t>(value);
4533
4534 // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
4535 iihf(scratch, Operand(hi_32));
4536 iilf(scratch, Operand(lo_32));
4537 ldgr(result, scratch);
4538 }
4539
LoadDoubleLiteral(DoubleRegister result,double value,Register scratch)4540 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
4541 Register scratch) {
4542 uint64_t int_val = bit_cast<uint64_t, double>(value);
4543 LoadDoubleLiteral(result, int_val, scratch);
4544 }
4545
LoadFloat32Literal(DoubleRegister result,float value,Register scratch)4546 void MacroAssembler::LoadFloat32Literal(DoubleRegister result, float value,
4547 Register scratch) {
4548 uint32_t hi_32 = bit_cast<uint32_t>(value);
4549 uint32_t lo_32 = 0;
4550
4551 // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
4552 iihf(scratch, Operand(hi_32));
4553 iilf(scratch, Operand(lo_32));
4554 ldgr(result, scratch);
4555 }
4556
CmpSmiLiteral(Register src1,Smi * smi,Register scratch)4557 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
4558 #if V8_TARGET_ARCH_S390X
4559 LoadSmiLiteral(scratch, smi);
4560 cgr(src1, scratch);
4561 #else
4562 // CFI takes 32-bit immediate.
4563 cfi(src1, Operand(smi));
4564 #endif
4565 }
4566
CmpLogicalSmiLiteral(Register src1,Smi * smi,Register scratch)4567 void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
4568 Register scratch) {
4569 #if V8_TARGET_ARCH_S390X
4570 LoadSmiLiteral(scratch, smi);
4571 clgr(src1, scratch);
4572 #else
4573 // CLFI takes 32-bit immediate
4574 clfi(src1, Operand(smi));
4575 #endif
4576 }
4577
AddSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)4578 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
4579 Register scratch) {
4580 #if V8_TARGET_ARCH_S390X
4581 LoadSmiLiteral(scratch, smi);
4582 AddP(dst, src, scratch);
4583 #else
4584 AddP(dst, src, Operand(reinterpret_cast<intptr_t>(smi)));
4585 #endif
4586 }
4587
SubSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)4588 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
4589 Register scratch) {
4590 #if V8_TARGET_ARCH_S390X
4591 LoadSmiLiteral(scratch, smi);
4592 SubP(dst, src, scratch);
4593 #else
4594 AddP(dst, src, Operand(-(reinterpret_cast<intptr_t>(smi))));
4595 #endif
4596 }
4597
AndSmiLiteral(Register dst,Register src,Smi * smi)4598 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
4599 if (!dst.is(src)) LoadRR(dst, src);
4600 #if V8_TARGET_ARCH_S390X
4601 DCHECK((reinterpret_cast<intptr_t>(smi) & 0xffffffff) == 0);
4602 int value = static_cast<int>(reinterpret_cast<intptr_t>(smi) >> 32);
4603 nihf(dst, Operand(value));
4604 #else
4605 nilf(dst, Operand(reinterpret_cast<int>(smi)));
4606 #endif
4607 }
4608
4609 // Load a "pointer" sized value from the memory location
LoadP(Register dst,const MemOperand & mem,Register scratch)4610 void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
4611 Register scratch) {
4612 int offset = mem.offset();
4613
4614 if (!scratch.is(no_reg) && !is_int20(offset)) {
4615 /* cannot use d-form */
4616 LoadIntLiteral(scratch, offset);
4617 #if V8_TARGET_ARCH_S390X
4618 lg(dst, MemOperand(mem.rb(), scratch));
4619 #else
4620 l(dst, MemOperand(mem.rb(), scratch));
4621 #endif
4622 } else {
4623 #if V8_TARGET_ARCH_S390X
4624 lg(dst, mem);
4625 #else
4626 if (is_uint12(offset)) {
4627 l(dst, mem);
4628 } else {
4629 ly(dst, mem);
4630 }
4631 #endif
4632 }
4633 }
4634
4635 // Store a "pointer" sized value to the memory location
StoreP(Register src,const MemOperand & mem,Register scratch)4636 void MacroAssembler::StoreP(Register src, const MemOperand& mem,
4637 Register scratch) {
4638 if (!is_int20(mem.offset())) {
4639 DCHECK(!scratch.is(no_reg));
4640 DCHECK(!scratch.is(r0));
4641 LoadIntLiteral(scratch, mem.offset());
4642 #if V8_TARGET_ARCH_S390X
4643 stg(src, MemOperand(mem.rb(), scratch));
4644 #else
4645 st(src, MemOperand(mem.rb(), scratch));
4646 #endif
4647 } else {
4648 #if V8_TARGET_ARCH_S390X
4649 stg(src, mem);
4650 #else
4651 // StoreW will try to generate ST if offset fits, otherwise
4652 // it'll generate STY.
4653 StoreW(src, mem);
4654 #endif
4655 }
4656 }
4657
4658 // Store a "pointer" sized constant to the memory location
StoreP(const MemOperand & mem,const Operand & opnd,Register scratch)4659 void MacroAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
4660 Register scratch) {
4661 // Relocations not supported
4662 DCHECK(opnd.rmode_ == kRelocInfo_NONEPTR);
4663
4664 // Try to use MVGHI/MVHI
4665 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
4666 mem.getIndexRegister().is(r0) && is_int16(opnd.imm_)) {
4667 #if V8_TARGET_ARCH_S390X
4668 mvghi(mem, opnd);
4669 #else
4670 mvhi(mem, opnd);
4671 #endif
4672 } else {
4673 LoadImmP(scratch, opnd);
4674 StoreP(scratch, mem);
4675 }
4676 }
4677
LoadMultipleP(Register dst1,Register dst2,const MemOperand & mem)4678 void MacroAssembler::LoadMultipleP(Register dst1, Register dst2,
4679 const MemOperand& mem) {
4680 #if V8_TARGET_ARCH_S390X
4681 DCHECK(is_int20(mem.offset()));
4682 lmg(dst1, dst2, mem);
4683 #else
4684 if (is_uint12(mem.offset())) {
4685 lm(dst1, dst2, mem);
4686 } else {
4687 DCHECK(is_int20(mem.offset()));
4688 lmy(dst1, dst2, mem);
4689 }
4690 #endif
4691 }
4692
StoreMultipleP(Register src1,Register src2,const MemOperand & mem)4693 void MacroAssembler::StoreMultipleP(Register src1, Register src2,
4694 const MemOperand& mem) {
4695 #if V8_TARGET_ARCH_S390X
4696 DCHECK(is_int20(mem.offset()));
4697 stmg(src1, src2, mem);
4698 #else
4699 if (is_uint12(mem.offset())) {
4700 stm(src1, src2, mem);
4701 } else {
4702 DCHECK(is_int20(mem.offset()));
4703 stmy(src1, src2, mem);
4704 }
4705 #endif
4706 }
4707
LoadMultipleW(Register dst1,Register dst2,const MemOperand & mem)4708 void MacroAssembler::LoadMultipleW(Register dst1, Register dst2,
4709 const MemOperand& mem) {
4710 if (is_uint12(mem.offset())) {
4711 lm(dst1, dst2, mem);
4712 } else {
4713 DCHECK(is_int20(mem.offset()));
4714 lmy(dst1, dst2, mem);
4715 }
4716 }
4717
StoreMultipleW(Register src1,Register src2,const MemOperand & mem)4718 void MacroAssembler::StoreMultipleW(Register src1, Register src2,
4719 const MemOperand& mem) {
4720 if (is_uint12(mem.offset())) {
4721 stm(src1, src2, mem);
4722 } else {
4723 DCHECK(is_int20(mem.offset()));
4724 stmy(src1, src2, mem);
4725 }
4726 }
4727
4728 // Load 32-bits and sign extend if necessary.
LoadW(Register dst,Register src)4729 void MacroAssembler::LoadW(Register dst, Register src) {
4730 #if V8_TARGET_ARCH_S390X
4731 lgfr(dst, src);
4732 #else
4733 if (!dst.is(src)) lr(dst, src);
4734 #endif
4735 }
4736
4737 // Load 32-bits and sign extend if necessary.
LoadW(Register dst,const MemOperand & mem,Register scratch)4738 void MacroAssembler::LoadW(Register dst, const MemOperand& mem,
4739 Register scratch) {
4740 int offset = mem.offset();
4741
4742 if (!is_int20(offset)) {
4743 DCHECK(!scratch.is(no_reg));
4744 LoadIntLiteral(scratch, offset);
4745 #if V8_TARGET_ARCH_S390X
4746 lgf(dst, MemOperand(mem.rb(), scratch));
4747 #else
4748 l(dst, MemOperand(mem.rb(), scratch));
4749 #endif
4750 } else {
4751 #if V8_TARGET_ARCH_S390X
4752 lgf(dst, mem);
4753 #else
4754 if (is_uint12(offset)) {
4755 l(dst, mem);
4756 } else {
4757 ly(dst, mem);
4758 }
4759 #endif
4760 }
4761 }
4762
4763 // Load 32-bits and zero extend if necessary.
LoadlW(Register dst,Register src)4764 void MacroAssembler::LoadlW(Register dst, Register src) {
4765 #if V8_TARGET_ARCH_S390X
4766 llgfr(dst, src);
4767 #else
4768 if (!dst.is(src)) lr(dst, src);
4769 #endif
4770 }
4771
4772 // Variable length depending on whether offset fits into immediate field
4773 // MemOperand of RX or RXY format
LoadlW(Register dst,const MemOperand & mem,Register scratch)4774 void MacroAssembler::LoadlW(Register dst, const MemOperand& mem,
4775 Register scratch) {
4776 Register base = mem.rb();
4777 int offset = mem.offset();
4778
4779 #if V8_TARGET_ARCH_S390X
4780 if (is_int20(offset)) {
4781 llgf(dst, mem);
4782 } else if (!scratch.is(no_reg)) {
4783 // Materialize offset into scratch register.
4784 LoadIntLiteral(scratch, offset);
4785 llgf(dst, MemOperand(base, scratch));
4786 } else {
4787 DCHECK(false);
4788 }
4789 #else
4790 bool use_RXform = false;
4791 bool use_RXYform = false;
4792 if (is_uint12(offset)) {
4793 // RX-format supports unsigned 12-bits offset.
4794 use_RXform = true;
4795 } else if (is_int20(offset)) {
4796 // RXY-format supports signed 20-bits offset.
4797 use_RXYform = true;
4798 } else if (!scratch.is(no_reg)) {
4799 // Materialize offset into scratch register.
4800 LoadIntLiteral(scratch, offset);
4801 } else {
4802 DCHECK(false);
4803 }
4804
4805 if (use_RXform) {
4806 l(dst, mem);
4807 } else if (use_RXYform) {
4808 ly(dst, mem);
4809 } else {
4810 ly(dst, MemOperand(base, scratch));
4811 }
4812 #endif
4813 }
4814
LoadLogicalHalfWordP(Register dst,const MemOperand & mem)4815 void MacroAssembler::LoadLogicalHalfWordP(Register dst, const MemOperand& mem) {
4816 #if V8_TARGET_ARCH_S390X
4817 llgh(dst, mem);
4818 #else
4819 llh(dst, mem);
4820 #endif
4821 }
4822
LoadLogicalHalfWordP(Register dst,Register src)4823 void MacroAssembler::LoadLogicalHalfWordP(Register dst, Register src) {
4824 #if V8_TARGET_ARCH_S390X
4825 llghr(dst, src);
4826 #else
4827 llhr(dst, src);
4828 #endif
4829 }
4830
LoadB(Register dst,const MemOperand & mem)4831 void MacroAssembler::LoadB(Register dst, const MemOperand& mem) {
4832 #if V8_TARGET_ARCH_S390X
4833 lgb(dst, mem);
4834 #else
4835 lb(dst, mem);
4836 #endif
4837 }
4838
LoadB(Register dst,Register src)4839 void MacroAssembler::LoadB(Register dst, Register src) {
4840 #if V8_TARGET_ARCH_S390X
4841 lgbr(dst, src);
4842 #else
4843 lbr(dst, src);
4844 #endif
4845 }
4846
LoadlB(Register dst,const MemOperand & mem)4847 void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
4848 #if V8_TARGET_ARCH_S390X
4849 llgc(dst, mem);
4850 #else
4851 llc(dst, mem);
4852 #endif
4853 }
4854
LoadLogicalReversedWordP(Register dst,const MemOperand & mem)4855 void MacroAssembler::LoadLogicalReversedWordP(Register dst,
4856 const MemOperand& mem) {
4857 lrv(dst, mem);
4858 LoadlW(dst, dst);
4859 }
4860
4861
LoadLogicalReversedHalfWordP(Register dst,const MemOperand & mem)4862 void MacroAssembler::LoadLogicalReversedHalfWordP(Register dst,
4863 const MemOperand& mem) {
4864 lrvh(dst, mem);
4865 LoadLogicalHalfWordP(dst, dst);
4866 }
4867
4868
4869 // Load And Test (Reg <- Reg)
LoadAndTest32(Register dst,Register src)4870 void MacroAssembler::LoadAndTest32(Register dst, Register src) {
4871 ltr(dst, src);
4872 }
4873
4874 // Load And Test
4875 // (Register dst(ptr) = Register src (32 | 32->64))
4876 // src is treated as a 32-bit signed integer, which is sign extended to
4877 // 64-bit if necessary.
LoadAndTestP_ExtendSrc(Register dst,Register src)4878 void MacroAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
4879 #if V8_TARGET_ARCH_S390X
4880 ltgfr(dst, src);
4881 #else
4882 ltr(dst, src);
4883 #endif
4884 }
4885
4886 // Load And Test Pointer Sized (Reg <- Reg)
LoadAndTestP(Register dst,Register src)4887 void MacroAssembler::LoadAndTestP(Register dst, Register src) {
4888 #if V8_TARGET_ARCH_S390X
4889 ltgr(dst, src);
4890 #else
4891 ltr(dst, src);
4892 #endif
4893 }
4894
4895 // Load And Test 32-bit (Reg <- Mem)
LoadAndTest32(Register dst,const MemOperand & mem)4896 void MacroAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
4897 lt_z(dst, mem);
4898 }
4899
4900 // Load And Test Pointer Sized (Reg <- Mem)
LoadAndTestP(Register dst,const MemOperand & mem)4901 void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
4902 #if V8_TARGET_ARCH_S390X
4903 ltg(dst, mem);
4904 #else
4905 lt_z(dst, mem);
4906 #endif
4907 }
4908
4909 // Load On Condition Pointer Sized (Reg <- Reg)
LoadOnConditionP(Condition cond,Register dst,Register src)4910 void MacroAssembler::LoadOnConditionP(Condition cond, Register dst,
4911 Register src) {
4912 #if V8_TARGET_ARCH_S390X
4913 locgr(cond, dst, src);
4914 #else
4915 locr(cond, dst, src);
4916 #endif
4917 }
4918
4919 // Load Double Precision (64-bit) Floating Point number from memory
LoadDouble(DoubleRegister dst,const MemOperand & mem)4920 void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
4921 // for 32bit and 64bit we all use 64bit floating point regs
4922 if (is_uint12(mem.offset())) {
4923 ld(dst, mem);
4924 } else {
4925 ldy(dst, mem);
4926 }
4927 }
4928
4929 // Load Single Precision (32-bit) Floating Point number from memory
LoadFloat32(DoubleRegister dst,const MemOperand & mem)4930 void MacroAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) {
4931 if (is_uint12(mem.offset())) {
4932 le_z(dst, mem);
4933 } else {
4934 DCHECK(is_int20(mem.offset()));
4935 ley(dst, mem);
4936 }
4937 }
4938
4939 // Load Single Precision (32-bit) Floating Point number from memory,
4940 // and convert to Double Precision (64-bit)
LoadFloat32ConvertToDouble(DoubleRegister dst,const MemOperand & mem)4941 void MacroAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
4942 const MemOperand& mem) {
4943 LoadFloat32(dst, mem);
4944 ldebr(dst, dst);
4945 }
4946
4947 // Store Double Precision (64-bit) Floating Point number to memory
StoreDouble(DoubleRegister dst,const MemOperand & mem)4948 void MacroAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
4949 if (is_uint12(mem.offset())) {
4950 std(dst, mem);
4951 } else {
4952 stdy(dst, mem);
4953 }
4954 }
4955
4956 // Store Single Precision (32-bit) Floating Point number to memory
StoreFloat32(DoubleRegister src,const MemOperand & mem)4957 void MacroAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
4958 if (is_uint12(mem.offset())) {
4959 ste(src, mem);
4960 } else {
4961 stey(src, mem);
4962 }
4963 }
4964
4965 // Convert Double precision (64-bit) to Single Precision (32-bit)
4966 // and store resulting Float32 to memory
StoreDoubleAsFloat32(DoubleRegister src,const MemOperand & mem,DoubleRegister scratch)4967 void MacroAssembler::StoreDoubleAsFloat32(DoubleRegister src,
4968 const MemOperand& mem,
4969 DoubleRegister scratch) {
4970 ledbr(scratch, src);
4971 StoreFloat32(scratch, mem);
4972 }
4973
4974 // Variable length depending on whether offset fits into immediate field
4975 // MemOperand of RX or RXY format
StoreW(Register src,const MemOperand & mem,Register scratch)4976 void MacroAssembler::StoreW(Register src, const MemOperand& mem,
4977 Register scratch) {
4978 Register base = mem.rb();
4979 int offset = mem.offset();
4980
4981 bool use_RXform = false;
4982 bool use_RXYform = false;
4983
4984 if (is_uint12(offset)) {
4985 // RX-format supports unsigned 12-bits offset.
4986 use_RXform = true;
4987 } else if (is_int20(offset)) {
4988 // RXY-format supports signed 20-bits offset.
4989 use_RXYform = true;
4990 } else if (!scratch.is(no_reg)) {
4991 // Materialize offset into scratch register.
4992 LoadIntLiteral(scratch, offset);
4993 } else {
4994 // scratch is no_reg
4995 DCHECK(false);
4996 }
4997
4998 if (use_RXform) {
4999 st(src, mem);
5000 } else if (use_RXYform) {
5001 sty(src, mem);
5002 } else {
5003 StoreW(src, MemOperand(base, scratch));
5004 }
5005 }
5006
5007 // Loads 16-bits half-word value from memory and sign extends to pointer
5008 // sized register
LoadHalfWordP(Register dst,const MemOperand & mem,Register scratch)5009 void MacroAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
5010 Register scratch) {
5011 Register base = mem.rb();
5012 int offset = mem.offset();
5013
5014 if (!is_int20(offset)) {
5015 DCHECK(!scratch.is(no_reg));
5016 LoadIntLiteral(scratch, offset);
5017 #if V8_TARGET_ARCH_S390X
5018 lgh(dst, MemOperand(base, scratch));
5019 #else
5020 lh(dst, MemOperand(base, scratch));
5021 #endif
5022 } else {
5023 #if V8_TARGET_ARCH_S390X
5024 lgh(dst, mem);
5025 #else
5026 if (is_uint12(offset)) {
5027 lh(dst, mem);
5028 } else {
5029 lhy(dst, mem);
5030 }
5031 #endif
5032 }
5033 }
5034
5035 // Variable length depending on whether offset fits into immediate field
5036 // MemOperand current only supports d-form
StoreHalfWord(Register src,const MemOperand & mem,Register scratch)5037 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
5038 Register scratch) {
5039 Register base = mem.rb();
5040 int offset = mem.offset();
5041
5042 if (is_uint12(offset)) {
5043 sth(src, mem);
5044 } else if (is_int20(offset)) {
5045 sthy(src, mem);
5046 } else {
5047 DCHECK(!scratch.is(no_reg));
5048 LoadIntLiteral(scratch, offset);
5049 sth(src, MemOperand(base, scratch));
5050 }
5051 }
5052
5053 // Variable length depending on whether offset fits into immediate field
5054 // MemOperand current only supports d-form
StoreByte(Register src,const MemOperand & mem,Register scratch)5055 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
5056 Register scratch) {
5057 Register base = mem.rb();
5058 int offset = mem.offset();
5059
5060 if (is_uint12(offset)) {
5061 stc(src, mem);
5062 } else if (is_int20(offset)) {
5063 stcy(src, mem);
5064 } else {
5065 DCHECK(!scratch.is(no_reg));
5066 LoadIntLiteral(scratch, offset);
5067 stc(src, MemOperand(base, scratch));
5068 }
5069 }
5070
5071 // Shift left logical for 32-bit integer types.
ShiftLeft(Register dst,Register src,const Operand & val)5072 void MacroAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
5073 if (dst.is(src)) {
5074 sll(dst, val);
5075 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5076 sllk(dst, src, val);
5077 } else {
5078 lr(dst, src);
5079 sll(dst, val);
5080 }
5081 }
5082
5083 // Shift left logical for 32-bit integer types.
ShiftLeft(Register dst,Register src,Register val)5084 void MacroAssembler::ShiftLeft(Register dst, Register src, Register val) {
5085 if (dst.is(src)) {
5086 sll(dst, val);
5087 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5088 sllk(dst, src, val);
5089 } else {
5090 DCHECK(!dst.is(val)); // The lr/sll path clobbers val.
5091 lr(dst, src);
5092 sll(dst, val);
5093 }
5094 }
5095
5096 // Shift right logical for 32-bit integer types.
ShiftRight(Register dst,Register src,const Operand & val)5097 void MacroAssembler::ShiftRight(Register dst, Register src,
5098 const Operand& val) {
5099 if (dst.is(src)) {
5100 srl(dst, val);
5101 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5102 srlk(dst, src, val);
5103 } else {
5104 lr(dst, src);
5105 srl(dst, val);
5106 }
5107 }
5108
5109 // Shift right logical for 32-bit integer types.
ShiftRight(Register dst,Register src,Register val)5110 void MacroAssembler::ShiftRight(Register dst, Register src, Register val) {
5111 if (dst.is(src)) {
5112 srl(dst, val);
5113 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5114 srlk(dst, src, val);
5115 } else {
5116 DCHECK(!dst.is(val)); // The lr/srl path clobbers val.
5117 lr(dst, src);
5118 srl(dst, val);
5119 }
5120 }
5121
5122 // Shift left arithmetic for 32-bit integer types.
ShiftLeftArith(Register dst,Register src,const Operand & val)5123 void MacroAssembler::ShiftLeftArith(Register dst, Register src,
5124 const Operand& val) {
5125 if (dst.is(src)) {
5126 sla(dst, val);
5127 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5128 slak(dst, src, val);
5129 } else {
5130 lr(dst, src);
5131 sla(dst, val);
5132 }
5133 }
5134
5135 // Shift left arithmetic for 32-bit integer types.
ShiftLeftArith(Register dst,Register src,Register val)5136 void MacroAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
5137 if (dst.is(src)) {
5138 sla(dst, val);
5139 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5140 slak(dst, src, val);
5141 } else {
5142 DCHECK(!dst.is(val)); // The lr/sla path clobbers val.
5143 lr(dst, src);
5144 sla(dst, val);
5145 }
5146 }
5147
5148 // Shift right arithmetic for 32-bit integer types.
ShiftRightArith(Register dst,Register src,const Operand & val)5149 void MacroAssembler::ShiftRightArith(Register dst, Register src,
5150 const Operand& val) {
5151 if (dst.is(src)) {
5152 sra(dst, val);
5153 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5154 srak(dst, src, val);
5155 } else {
5156 lr(dst, src);
5157 sra(dst, val);
5158 }
5159 }
5160
5161 // Shift right arithmetic for 32-bit integer types.
ShiftRightArith(Register dst,Register src,Register val)5162 void MacroAssembler::ShiftRightArith(Register dst, Register src, Register val) {
5163 if (dst.is(src)) {
5164 sra(dst, val);
5165 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
5166 srak(dst, src, val);
5167 } else {
5168 DCHECK(!dst.is(val)); // The lr/sra path clobbers val.
5169 lr(dst, src);
5170 sra(dst, val);
5171 }
5172 }
5173
5174 // Clear right most # of bits
ClearRightImm(Register dst,Register src,const Operand & val)5175 void MacroAssembler::ClearRightImm(Register dst, Register src,
5176 const Operand& val) {
5177 int numBitsToClear = val.imm_ % (kPointerSize * 8);
5178
5179 // Try to use RISBG if possible
5180 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
5181 int endBit = 63 - numBitsToClear;
5182 risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true);
5183 return;
5184 }
5185
5186 uint64_t hexMask = ~((1L << numBitsToClear) - 1);
5187
5188 // S390 AND instr clobbers source. Make a copy if necessary
5189 if (!dst.is(src)) LoadRR(dst, src);
5190
5191 if (numBitsToClear <= 16) {
5192 nill(dst, Operand(static_cast<uint16_t>(hexMask)));
5193 } else if (numBitsToClear <= 32) {
5194 nilf(dst, Operand(static_cast<uint32_t>(hexMask)));
5195 } else if (numBitsToClear <= 64) {
5196 nilf(dst, Operand(static_cast<intptr_t>(0)));
5197 nihf(dst, Operand(hexMask >> 32));
5198 }
5199 }
5200
Popcnt32(Register dst,Register src)5201 void MacroAssembler::Popcnt32(Register dst, Register src) {
5202 DCHECK(!src.is(r0));
5203 DCHECK(!dst.is(r0));
5204
5205 popcnt(dst, src);
5206 ShiftRight(r0, dst, Operand(16));
5207 ar(dst, r0);
5208 ShiftRight(r0, dst, Operand(8));
5209 ar(dst, r0);
5210 LoadB(dst, dst);
5211 }
5212
5213 #ifdef V8_TARGET_ARCH_S390X
Popcnt64(Register dst,Register src)5214 void MacroAssembler::Popcnt64(Register dst, Register src) {
5215 DCHECK(!src.is(r0));
5216 DCHECK(!dst.is(r0));
5217
5218 popcnt(dst, src);
5219 ShiftRightP(r0, dst, Operand(32));
5220 AddP(dst, r0);
5221 ShiftRightP(r0, dst, Operand(16));
5222 AddP(dst, r0);
5223 ShiftRightP(r0, dst, Operand(8));
5224 AddP(dst, r0);
5225 LoadB(dst, dst);
5226 }
5227 #endif
5228
5229 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8,Register reg9,Register reg10)5230 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
5231 Register reg5, Register reg6, Register reg7, Register reg8,
5232 Register reg9, Register reg10) {
5233 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
5234 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5235 reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
5236 reg10.is_valid();
5237
5238 RegList regs = 0;
5239 if (reg1.is_valid()) regs |= reg1.bit();
5240 if (reg2.is_valid()) regs |= reg2.bit();
5241 if (reg3.is_valid()) regs |= reg3.bit();
5242 if (reg4.is_valid()) regs |= reg4.bit();
5243 if (reg5.is_valid()) regs |= reg5.bit();
5244 if (reg6.is_valid()) regs |= reg6.bit();
5245 if (reg7.is_valid()) regs |= reg7.bit();
5246 if (reg8.is_valid()) regs |= reg8.bit();
5247 if (reg9.is_valid()) regs |= reg9.bit();
5248 if (reg10.is_valid()) regs |= reg10.bit();
5249 int n_of_non_aliasing_regs = NumRegs(regs);
5250
5251 return n_of_valid_regs != n_of_non_aliasing_regs;
5252 }
5253 #endif
5254
CodePatcher(Isolate * isolate,byte * address,int size,FlushICache flush_cache)5255 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size,
5256 FlushICache flush_cache)
5257 : address_(address),
5258 size_(size),
5259 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
5260 flush_cache_(flush_cache) {
5261 // Create a new macro assembler pointing to the address of the code to patch.
5262 // The size is adjusted with kGap on order for the assembler to generate size
5263 // bytes of instructions without failing with buffer size constraints.
5264 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5265 }
5266
~CodePatcher()5267 CodePatcher::~CodePatcher() {
5268 // Indicate that code has changed.
5269 if (flush_cache_ == FLUSH) {
5270 Assembler::FlushICache(masm_.isolate(), address_, size_);
5271 }
5272
5273 // Check that the code was patched as expected.
5274 DCHECK(masm_.pc_ == address_ + size_);
5275 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5276 }
5277
TruncatingDiv(Register result,Register dividend,int32_t divisor)5278 void MacroAssembler::TruncatingDiv(Register result, Register dividend,
5279 int32_t divisor) {
5280 DCHECK(!dividend.is(result));
5281 DCHECK(!dividend.is(r0));
5282 DCHECK(!result.is(r0));
5283 base::MagicNumbersForDivision<uint32_t> mag =
5284 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5285 #ifdef V8_TARGET_ARCH_S390X
5286 LoadRR(result, dividend);
5287 MulP(result, Operand(mag.multiplier));
5288 ShiftRightArithP(result, result, Operand(32));
5289
5290 #else
5291 lay(sp, MemOperand(sp, -kPointerSize));
5292 StoreP(r1, MemOperand(sp));
5293
5294 mov(r1, Operand(mag.multiplier));
5295 mr_z(r0, dividend); // r0:r1 = r1 * dividend
5296
5297 LoadRR(result, r0);
5298 LoadP(r1, MemOperand(sp));
5299 la(sp, MemOperand(sp, kPointerSize));
5300 #endif
5301 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5302 if (divisor > 0 && neg) {
5303 AddP(result, dividend);
5304 }
5305 if (divisor < 0 && !neg && mag.multiplier > 0) {
5306 SubP(result, dividend);
5307 }
5308 if (mag.shift > 0) ShiftRightArith(result, result, Operand(mag.shift));
5309 ExtractBit(r0, dividend, 31);
5310 AddP(result, r0);
5311 }
5312
5313 } // namespace internal
5314 } // namespace v8
5315
5316 #endif // V8_TARGET_ARCH_S390
5317