1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_X64
6
7 #include "src/base/bits.h"
8 #include "src/base/division-by-constant.h"
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/counters.h"
12 #include "src/debug/debug.h"
13 #include "src/heap/heap-inl.h"
14 #include "src/objects-inl.h"
15 #include "src/register-configuration.h"
16 #include "src/x64/assembler-x64.h"
17
18 #include "src/x64/macro-assembler-x64.h" // Cannot be the first include.
19
20 namespace v8 {
21 namespace internal {
22
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
24 CodeObjectRequired create_code_object)
25 : Assembler(arg_isolate, buffer, size),
26 generating_stub_(false),
27 has_frame_(false),
28 root_array_available_(true) {
29 if (create_code_object == CodeObjectRequired::kYes) {
30 code_object_ =
31 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
32 }
33 }
34
35
36 static const int64_t kInvalidRootRegisterDelta = -1;
37
38
RootRegisterDelta(ExternalReference other)39 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
40 if (predictable_code_size() &&
41 (other.address() < reinterpret_cast<Address>(isolate()) ||
42 other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
43 return kInvalidRootRegisterDelta;
44 }
45 Address roots_register_value = kRootRegisterBias +
46 reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
47
48 int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
49 if (kPointerSize == kInt64Size) {
50 delta = other.address() - roots_register_value;
51 } else {
52 // For x32, zero extend the address to 64-bit and calculate the delta.
53 uint64_t o = static_cast<uint32_t>(
54 reinterpret_cast<intptr_t>(other.address()));
55 uint64_t r = static_cast<uint32_t>(
56 reinterpret_cast<intptr_t>(roots_register_value));
57 delta = o - r;
58 }
59 return delta;
60 }
61
62
ExternalOperand(ExternalReference target,Register scratch)63 Operand MacroAssembler::ExternalOperand(ExternalReference target,
64 Register scratch) {
65 if (root_array_available_ && !serializer_enabled()) {
66 int64_t delta = RootRegisterDelta(target);
67 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
68 return Operand(kRootRegister, static_cast<int32_t>(delta));
69 }
70 }
71 Move(scratch, target);
72 return Operand(scratch, 0);
73 }
74
75
Load(Register destination,ExternalReference source)76 void MacroAssembler::Load(Register destination, ExternalReference source) {
77 if (root_array_available_ && !serializer_enabled()) {
78 int64_t delta = RootRegisterDelta(source);
79 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
80 movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
81 return;
82 }
83 }
84 // Safe code.
85 if (destination.is(rax)) {
86 load_rax(source);
87 } else {
88 Move(kScratchRegister, source);
89 movp(destination, Operand(kScratchRegister, 0));
90 }
91 }
92
93
Store(ExternalReference destination,Register source)94 void MacroAssembler::Store(ExternalReference destination, Register source) {
95 if (root_array_available_ && !serializer_enabled()) {
96 int64_t delta = RootRegisterDelta(destination);
97 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
98 movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
99 return;
100 }
101 }
102 // Safe code.
103 if (source.is(rax)) {
104 store_rax(destination);
105 } else {
106 Move(kScratchRegister, destination);
107 movp(Operand(kScratchRegister, 0), source);
108 }
109 }
110
111
LoadAddress(Register destination,ExternalReference source)112 void MacroAssembler::LoadAddress(Register destination,
113 ExternalReference source) {
114 if (root_array_available_ && !serializer_enabled()) {
115 int64_t delta = RootRegisterDelta(source);
116 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
117 leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
118 return;
119 }
120 }
121 // Safe code.
122 Move(destination, source);
123 }
124
125
LoadAddressSize(ExternalReference source)126 int MacroAssembler::LoadAddressSize(ExternalReference source) {
127 if (root_array_available_ && !serializer_enabled()) {
128 // This calculation depends on the internals of LoadAddress.
129 // It's correctness is ensured by the asserts in the Call
130 // instruction below.
131 int64_t delta = RootRegisterDelta(source);
132 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
133 // Operand is leap(scratch, Operand(kRootRegister, delta));
134 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
135 int size = 4;
136 if (!is_int8(static_cast<int32_t>(delta))) {
137 size += 3; // Need full four-byte displacement in lea.
138 }
139 return size;
140 }
141 }
142 // Size of movp(destination, src);
143 return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
144 }
145
146
PushAddress(ExternalReference source)147 void MacroAssembler::PushAddress(ExternalReference source) {
148 int64_t address = reinterpret_cast<int64_t>(source.address());
149 if (is_int32(address) && !serializer_enabled()) {
150 if (emit_debug_code()) {
151 Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
152 }
153 Push(Immediate(static_cast<int32_t>(address)));
154 return;
155 }
156 LoadAddress(kScratchRegister, source);
157 Push(kScratchRegister);
158 }
159
160
LoadRoot(Register destination,Heap::RootListIndex index)161 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
162 DCHECK(root_array_available_);
163 movp(destination, Operand(kRootRegister,
164 (index << kPointerSizeLog2) - kRootRegisterBias));
165 }
166
167
LoadRootIndexed(Register destination,Register variable_offset,int fixed_offset)168 void MacroAssembler::LoadRootIndexed(Register destination,
169 Register variable_offset,
170 int fixed_offset) {
171 DCHECK(root_array_available_);
172 movp(destination,
173 Operand(kRootRegister,
174 variable_offset, times_pointer_size,
175 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
176 }
177
178
StoreRoot(Register source,Heap::RootListIndex index)179 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
180 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
181 DCHECK(root_array_available_);
182 movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
183 source);
184 }
185
186
PushRoot(Heap::RootListIndex index)187 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
188 DCHECK(root_array_available_);
189 Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
190 }
191
192
CompareRoot(Register with,Heap::RootListIndex index)193 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
194 DCHECK(root_array_available_);
195 cmpp(with, Operand(kRootRegister,
196 (index << kPointerSizeLog2) - kRootRegisterBias));
197 }
198
199
CompareRoot(const Operand & with,Heap::RootListIndex index)200 void MacroAssembler::CompareRoot(const Operand& with,
201 Heap::RootListIndex index) {
202 DCHECK(root_array_available_);
203 DCHECK(!with.AddressUsesRegister(kScratchRegister));
204 LoadRoot(kScratchRegister, index);
205 cmpp(with, kScratchRegister);
206 }
207
208
RememberedSetHelper(Register object,Register addr,Register scratch,SaveFPRegsMode save_fp,RememberedSetFinalAction and_then)209 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
210 Register addr,
211 Register scratch,
212 SaveFPRegsMode save_fp,
213 RememberedSetFinalAction and_then) {
214 if (emit_debug_code()) {
215 Label ok;
216 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
217 int3();
218 bind(&ok);
219 }
220 // Load store buffer top.
221 ExternalReference store_buffer =
222 ExternalReference::store_buffer_top(isolate());
223 movp(scratch, ExternalOperand(store_buffer));
224 // Store pointer to buffer.
225 movp(Operand(scratch, 0), addr);
226 // Increment buffer top.
227 addp(scratch, Immediate(kPointerSize));
228 // Write back new top of buffer.
229 movp(ExternalOperand(store_buffer), scratch);
230 // Call stub on end of buffer.
231 Label done;
232 // Check for end of buffer.
233 testp(scratch, Immediate(StoreBuffer::kStoreBufferMask));
234 if (and_then == kReturnAtEnd) {
235 Label buffer_overflowed;
236 j(equal, &buffer_overflowed, Label::kNear);
237 ret(0);
238 bind(&buffer_overflowed);
239 } else {
240 DCHECK(and_then == kFallThroughAtEnd);
241 j(not_equal, &done, Label::kNear);
242 }
243 StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
244 CallStub(&store_buffer_overflow);
245 if (and_then == kReturnAtEnd) {
246 ret(0);
247 } else {
248 DCHECK(and_then == kFallThroughAtEnd);
249 bind(&done);
250 }
251 }
252
253
InNewSpace(Register object,Register scratch,Condition cc,Label * branch,Label::Distance distance)254 void MacroAssembler::InNewSpace(Register object,
255 Register scratch,
256 Condition cc,
257 Label* branch,
258 Label::Distance distance) {
259 CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch,
260 distance);
261 }
262
263
RecordWriteField(Register object,int offset,Register value,Register dst,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)264 void MacroAssembler::RecordWriteField(
265 Register object,
266 int offset,
267 Register value,
268 Register dst,
269 SaveFPRegsMode save_fp,
270 RememberedSetAction remembered_set_action,
271 SmiCheck smi_check,
272 PointersToHereCheck pointers_to_here_check_for_value) {
273 // First, check if a write barrier is even needed. The tests below
274 // catch stores of Smis.
275 Label done;
276
277 // Skip barrier if writing a smi.
278 if (smi_check == INLINE_SMI_CHECK) {
279 JumpIfSmi(value, &done);
280 }
281
282 // Although the object register is tagged, the offset is relative to the start
283 // of the object, so so offset must be a multiple of kPointerSize.
284 DCHECK(IsAligned(offset, kPointerSize));
285
286 leap(dst, FieldOperand(object, offset));
287 if (emit_debug_code()) {
288 Label ok;
289 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
290 j(zero, &ok, Label::kNear);
291 int3();
292 bind(&ok);
293 }
294
295 RecordWrite(object, dst, value, save_fp, remembered_set_action,
296 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
297
298 bind(&done);
299
300 // Clobber clobbered input registers when running with the debug-code flag
301 // turned on to provoke errors.
302 if (emit_debug_code()) {
303 Move(value, kZapValue, Assembler::RelocInfoNone());
304 Move(dst, kZapValue, Assembler::RelocInfoNone());
305 }
306 }
307
308
RecordWriteArray(Register object,Register value,Register index,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)309 void MacroAssembler::RecordWriteArray(
310 Register object,
311 Register value,
312 Register index,
313 SaveFPRegsMode save_fp,
314 RememberedSetAction remembered_set_action,
315 SmiCheck smi_check,
316 PointersToHereCheck pointers_to_here_check_for_value) {
317 // First, check if a write barrier is even needed. The tests below
318 // catch stores of Smis.
319 Label done;
320
321 // Skip barrier if writing a smi.
322 if (smi_check == INLINE_SMI_CHECK) {
323 JumpIfSmi(value, &done);
324 }
325
326 // Array access: calculate the destination address. Index is not a smi.
327 Register dst = index;
328 leap(dst, Operand(object, index, times_pointer_size,
329 FixedArray::kHeaderSize - kHeapObjectTag));
330
331 RecordWrite(object, dst, value, save_fp, remembered_set_action,
332 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
333
334 bind(&done);
335
336 // Clobber clobbered input registers when running with the debug-code flag
337 // turned on to provoke errors.
338 if (emit_debug_code()) {
339 Move(value, kZapValue, Assembler::RelocInfoNone());
340 Move(index, kZapValue, Assembler::RelocInfoNone());
341 }
342 }
343
344
RecordWriteForMap(Register object,Register map,Register dst,SaveFPRegsMode fp_mode)345 void MacroAssembler::RecordWriteForMap(Register object,
346 Register map,
347 Register dst,
348 SaveFPRegsMode fp_mode) {
349 DCHECK(!object.is(kScratchRegister));
350 DCHECK(!object.is(map));
351 DCHECK(!object.is(dst));
352 DCHECK(!map.is(dst));
353 AssertNotSmi(object);
354
355 if (emit_debug_code()) {
356 Label ok;
357 if (map.is(kScratchRegister)) pushq(map);
358 CompareMap(map, isolate()->factory()->meta_map());
359 if (map.is(kScratchRegister)) popq(map);
360 j(equal, &ok, Label::kNear);
361 int3();
362 bind(&ok);
363 }
364
365 if (!FLAG_incremental_marking) {
366 return;
367 }
368
369 if (emit_debug_code()) {
370 Label ok;
371 if (map.is(kScratchRegister)) pushq(map);
372 cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
373 if (map.is(kScratchRegister)) popq(map);
374 j(equal, &ok, Label::kNear);
375 int3();
376 bind(&ok);
377 }
378
379 // Compute the address.
380 leap(dst, FieldOperand(object, HeapObject::kMapOffset));
381
382 // First, check if a write barrier is even needed. The tests below
383 // catch stores of smis and stores into the young generation.
384 Label done;
385
386 // A single check of the map's pages interesting flag suffices, since it is
387 // only set during incremental collection, and then it's also guaranteed that
388 // the from object's page's interesting flag is also set. This optimization
389 // relies on the fact that maps can never be in new space.
390 CheckPageFlag(map,
391 map, // Used as scratch.
392 MemoryChunk::kPointersToHereAreInterestingMask,
393 zero,
394 &done,
395 Label::kNear);
396
397 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
398 fp_mode);
399 CallStub(&stub);
400
401 bind(&done);
402
403 // Count number of write barriers in generated code.
404 isolate()->counters()->write_barriers_static()->Increment();
405 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
406
407 // Clobber clobbered registers when running with the debug-code flag
408 // turned on to provoke errors.
409 if (emit_debug_code()) {
410 Move(dst, kZapValue, Assembler::RelocInfoNone());
411 Move(map, kZapValue, Assembler::RelocInfoNone());
412 }
413 }
414
415
RecordWrite(Register object,Register address,Register value,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)416 void MacroAssembler::RecordWrite(
417 Register object,
418 Register address,
419 Register value,
420 SaveFPRegsMode fp_mode,
421 RememberedSetAction remembered_set_action,
422 SmiCheck smi_check,
423 PointersToHereCheck pointers_to_here_check_for_value) {
424 DCHECK(!object.is(value));
425 DCHECK(!object.is(address));
426 DCHECK(!value.is(address));
427 AssertNotSmi(object);
428
429 if (remembered_set_action == OMIT_REMEMBERED_SET &&
430 !FLAG_incremental_marking) {
431 return;
432 }
433
434 if (emit_debug_code()) {
435 Label ok;
436 cmpp(value, Operand(address, 0));
437 j(equal, &ok, Label::kNear);
438 int3();
439 bind(&ok);
440 }
441
442 // First, check if a write barrier is even needed. The tests below
443 // catch stores of smis and stores into the young generation.
444 Label done;
445
446 if (smi_check == INLINE_SMI_CHECK) {
447 // Skip barrier if writing a smi.
448 JumpIfSmi(value, &done);
449 }
450
451 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
452 CheckPageFlag(value,
453 value, // Used as scratch.
454 MemoryChunk::kPointersToHereAreInterestingMask,
455 zero,
456 &done,
457 Label::kNear);
458 }
459
460 CheckPageFlag(object,
461 value, // Used as scratch.
462 MemoryChunk::kPointersFromHereAreInterestingMask,
463 zero,
464 &done,
465 Label::kNear);
466
467 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
468 fp_mode);
469 CallStub(&stub);
470
471 bind(&done);
472
473 // Count number of write barriers in generated code.
474 isolate()->counters()->write_barriers_static()->Increment();
475 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
476
477 // Clobber clobbered registers when running with the debug-code flag
478 // turned on to provoke errors.
479 if (emit_debug_code()) {
480 Move(address, kZapValue, Assembler::RelocInfoNone());
481 Move(value, kZapValue, Assembler::RelocInfoNone());
482 }
483 }
484
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)485 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
486 Register code_entry,
487 Register scratch) {
488 const int offset = JSFunction::kCodeEntryOffset;
489
490 // The input registers are fixed to make calling the C write barrier function
491 // easier.
492 DCHECK(js_function.is(rdi));
493 DCHECK(code_entry.is(rcx));
494 DCHECK(scratch.is(r15));
495
496 // Since a code entry (value) is always in old space, we don't need to update
497 // remembered set. If incremental marking is off, there is nothing for us to
498 // do.
499 if (!FLAG_incremental_marking) return;
500
501 AssertNotSmi(js_function);
502
503 if (emit_debug_code()) {
504 Label ok;
505 leap(scratch, FieldOperand(js_function, offset));
506 cmpp(code_entry, Operand(scratch, 0));
507 j(equal, &ok, Label::kNear);
508 int3();
509 bind(&ok);
510 }
511
512 // First, check if a write barrier is even needed. The tests below
513 // catch stores of Smis and stores into young gen.
514 Label done;
515
516 CheckPageFlag(code_entry, scratch,
517 MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
518 Label::kNear);
519 CheckPageFlag(js_function, scratch,
520 MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
521 Label::kNear);
522
523 // Save input registers.
524 Push(js_function);
525 Push(code_entry);
526
527 const Register dst = scratch;
528 leap(dst, FieldOperand(js_function, offset));
529
530 // Save caller-saved registers.
531 PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
532
533 int argument_count = 3;
534 PrepareCallCFunction(argument_count);
535
536 // Load the argument registers.
537 if (arg_reg_1.is(rcx)) {
538 // Windows calling convention.
539 DCHECK(arg_reg_2.is(rdx) && arg_reg_3.is(r8));
540
541 movp(arg_reg_1, js_function); // rcx gets rdi.
542 movp(arg_reg_2, dst); // rdx gets r15.
543 } else {
544 // AMD64 calling convention.
545 DCHECK(arg_reg_1.is(rdi) && arg_reg_2.is(rsi) && arg_reg_3.is(rdx));
546
547 // rdi is already loaded with js_function.
548 movp(arg_reg_2, dst); // rsi gets r15.
549 }
550 Move(arg_reg_3, ExternalReference::isolate_address(isolate()));
551
552 {
553 AllowExternalCallThatCantCauseGC scope(this);
554 CallCFunction(
555 ExternalReference::incremental_marking_record_write_code_entry_function(
556 isolate()),
557 argument_count);
558 }
559
560 // Restore caller-saved registers.
561 PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
562
563 // Restore input registers.
564 Pop(code_entry);
565 Pop(js_function);
566
567 bind(&done);
568 }
569
Assert(Condition cc,BailoutReason reason)570 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
571 if (emit_debug_code()) Check(cc, reason);
572 }
573
574
AssertFastElements(Register elements)575 void MacroAssembler::AssertFastElements(Register elements) {
576 if (emit_debug_code()) {
577 Label ok;
578 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
579 Heap::kFixedArrayMapRootIndex);
580 j(equal, &ok, Label::kNear);
581 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
582 Heap::kFixedDoubleArrayMapRootIndex);
583 j(equal, &ok, Label::kNear);
584 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
585 Heap::kFixedCOWArrayMapRootIndex);
586 j(equal, &ok, Label::kNear);
587 Abort(kJSObjectWithFastElementsMapHasSlowElements);
588 bind(&ok);
589 }
590 }
591
592
Check(Condition cc,BailoutReason reason)593 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
594 Label L;
595 j(cc, &L, Label::kNear);
596 Abort(reason);
597 // Control will not return here.
598 bind(&L);
599 }
600
601
CheckStackAlignment()602 void MacroAssembler::CheckStackAlignment() {
603 int frame_alignment = base::OS::ActivationFrameAlignment();
604 int frame_alignment_mask = frame_alignment - 1;
605 if (frame_alignment > kPointerSize) {
606 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
607 Label alignment_as_expected;
608 testp(rsp, Immediate(frame_alignment_mask));
609 j(zero, &alignment_as_expected, Label::kNear);
610 // Abort if stack is not aligned.
611 int3();
612 bind(&alignment_as_expected);
613 }
614 }
615
616
NegativeZeroTest(Register result,Register op,Label * then_label)617 void MacroAssembler::NegativeZeroTest(Register result,
618 Register op,
619 Label* then_label) {
620 Label ok;
621 testl(result, result);
622 j(not_zero, &ok, Label::kNear);
623 testl(op, op);
624 j(sign, then_label);
625 bind(&ok);
626 }
627
628
Abort(BailoutReason reason)629 void MacroAssembler::Abort(BailoutReason reason) {
630 #ifdef DEBUG
631 const char* msg = GetBailoutReason(reason);
632 if (msg != NULL) {
633 RecordComment("Abort message: ");
634 RecordComment(msg);
635 }
636
637 if (FLAG_trap_on_abort) {
638 int3();
639 return;
640 }
641 #endif
642
643 // Check if Abort() has already been initialized.
644 DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
645
646 Move(rdx, Smi::FromInt(static_cast<int>(reason)));
647
648 if (!has_frame_) {
649 // We don't actually want to generate a pile of code for this, so just
650 // claim there is a stack frame, without generating one.
651 FrameScope scope(this, StackFrame::NONE);
652 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
653 } else {
654 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
655 }
656 // Control will not return here.
657 int3();
658 }
659
660
CallStub(CodeStub * stub,TypeFeedbackId ast_id)661 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
662 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
663 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
664 }
665
666
TailCallStub(CodeStub * stub)667 void MacroAssembler::TailCallStub(CodeStub* stub) {
668 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
669 }
670
671
StubReturn(int argc)672 void MacroAssembler::StubReturn(int argc) {
673 DCHECK(argc >= 1 && generating_stub());
674 ret((argc - 1) * kPointerSize);
675 }
676
677
AllowThisStubCall(CodeStub * stub)678 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
679 return has_frame_ || !stub->SometimesSetsUpAFrame();
680 }
681
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)682 void MacroAssembler::CallRuntime(const Runtime::Function* f,
683 int num_arguments,
684 SaveFPRegsMode save_doubles) {
685 // If the expected number of arguments of the runtime function is
686 // constant, we check that the actual number of arguments match the
687 // expectation.
688 CHECK(f->nargs < 0 || f->nargs == num_arguments);
689
690 // TODO(1236192): Most runtime routines don't need the number of
691 // arguments passed in because it is constant. At some point we
692 // should remove this need and make the runtime routine entry code
693 // smarter.
694 Set(rax, num_arguments);
695 LoadAddress(rbx, ExternalReference(f, isolate()));
696 CEntryStub ces(isolate(), f->result_size, save_doubles);
697 CallStub(&ces);
698 }
699
700
CallExternalReference(const ExternalReference & ext,int num_arguments)701 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
702 int num_arguments) {
703 Set(rax, num_arguments);
704 LoadAddress(rbx, ext);
705
706 CEntryStub stub(isolate(), 1);
707 CallStub(&stub);
708 }
709
710
TailCallRuntime(Runtime::FunctionId fid)711 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
712 // ----------- S t a t e -------------
713 // -- rsp[0] : return address
714 // -- rsp[8] : argument num_arguments - 1
715 // ...
716 // -- rsp[8 * num_arguments] : argument 0 (receiver)
717 //
718 // For runtime functions with variable arguments:
719 // -- rax : number of arguments
720 // -----------------------------------
721
722 const Runtime::Function* function = Runtime::FunctionForId(fid);
723 DCHECK_EQ(1, function->result_size);
724 if (function->nargs >= 0) {
725 Set(rax, function->nargs);
726 }
727 JumpToExternalReference(ExternalReference(fid, isolate()));
728 }
729
JumpToExternalReference(const ExternalReference & ext,bool builtin_exit_frame)730 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
731 bool builtin_exit_frame) {
732 // Set the entry point and jump to the C entry runtime stub.
733 LoadAddress(rbx, ext);
734 CEntryStub ces(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
735 builtin_exit_frame);
736 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
737 }
738
739 #define REG(Name) \
740 { Register::kCode_##Name }
741
742 static const Register saved_regs[] = {
743 REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
744 REG(r9), REG(r10), REG(r11)
745 };
746
747 #undef REG
748
749 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
750
751
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)752 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
753 Register exclusion1,
754 Register exclusion2,
755 Register exclusion3) {
756 // We don't allow a GC during a store buffer overflow so there is no need to
757 // store the registers in any particular way, but we do have to store and
758 // restore them.
759 for (int i = 0; i < kNumberOfSavedRegs; i++) {
760 Register reg = saved_regs[i];
761 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
762 pushq(reg);
763 }
764 }
765 // R12 to r15 are callee save on all platforms.
766 if (fp_mode == kSaveFPRegs) {
767 subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
768 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
769 XMMRegister reg = XMMRegister::from_code(i);
770 Movsd(Operand(rsp, i * kDoubleSize), reg);
771 }
772 }
773 }
774
775
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)776 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
777 Register exclusion1,
778 Register exclusion2,
779 Register exclusion3) {
780 if (fp_mode == kSaveFPRegs) {
781 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
782 XMMRegister reg = XMMRegister::from_code(i);
783 Movsd(reg, Operand(rsp, i * kDoubleSize));
784 }
785 addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
786 }
787 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
788 Register reg = saved_regs[i];
789 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
790 popq(reg);
791 }
792 }
793 }
794
795
Cvtss2sd(XMMRegister dst,XMMRegister src)796 void MacroAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
797 if (CpuFeatures::IsSupported(AVX)) {
798 CpuFeatureScope scope(this, AVX);
799 vcvtss2sd(dst, src, src);
800 } else {
801 cvtss2sd(dst, src);
802 }
803 }
804
805
Cvtss2sd(XMMRegister dst,const Operand & src)806 void MacroAssembler::Cvtss2sd(XMMRegister dst, const Operand& src) {
807 if (CpuFeatures::IsSupported(AVX)) {
808 CpuFeatureScope scope(this, AVX);
809 vcvtss2sd(dst, dst, src);
810 } else {
811 cvtss2sd(dst, src);
812 }
813 }
814
815
Cvtsd2ss(XMMRegister dst,XMMRegister src)816 void MacroAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
817 if (CpuFeatures::IsSupported(AVX)) {
818 CpuFeatureScope scope(this, AVX);
819 vcvtsd2ss(dst, src, src);
820 } else {
821 cvtsd2ss(dst, src);
822 }
823 }
824
825
Cvtsd2ss(XMMRegister dst,const Operand & src)826 void MacroAssembler::Cvtsd2ss(XMMRegister dst, const Operand& src) {
827 if (CpuFeatures::IsSupported(AVX)) {
828 CpuFeatureScope scope(this, AVX);
829 vcvtsd2ss(dst, dst, src);
830 } else {
831 cvtsd2ss(dst, src);
832 }
833 }
834
835
Cvtlsi2sd(XMMRegister dst,Register src)836 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
837 if (CpuFeatures::IsSupported(AVX)) {
838 CpuFeatureScope scope(this, AVX);
839 vxorpd(dst, dst, dst);
840 vcvtlsi2sd(dst, dst, src);
841 } else {
842 xorpd(dst, dst);
843 cvtlsi2sd(dst, src);
844 }
845 }
846
847
Cvtlsi2sd(XMMRegister dst,const Operand & src)848 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
849 if (CpuFeatures::IsSupported(AVX)) {
850 CpuFeatureScope scope(this, AVX);
851 vxorpd(dst, dst, dst);
852 vcvtlsi2sd(dst, dst, src);
853 } else {
854 xorpd(dst, dst);
855 cvtlsi2sd(dst, src);
856 }
857 }
858
859
Cvtlsi2ss(XMMRegister dst,Register src)860 void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
861 if (CpuFeatures::IsSupported(AVX)) {
862 CpuFeatureScope scope(this, AVX);
863 vxorps(dst, dst, dst);
864 vcvtlsi2ss(dst, dst, src);
865 } else {
866 xorps(dst, dst);
867 cvtlsi2ss(dst, src);
868 }
869 }
870
871
Cvtlsi2ss(XMMRegister dst,const Operand & src)872 void MacroAssembler::Cvtlsi2ss(XMMRegister dst, const Operand& src) {
873 if (CpuFeatures::IsSupported(AVX)) {
874 CpuFeatureScope scope(this, AVX);
875 vxorps(dst, dst, dst);
876 vcvtlsi2ss(dst, dst, src);
877 } else {
878 xorps(dst, dst);
879 cvtlsi2ss(dst, src);
880 }
881 }
882
883
Cvtqsi2ss(XMMRegister dst,Register src)884 void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
885 if (CpuFeatures::IsSupported(AVX)) {
886 CpuFeatureScope scope(this, AVX);
887 vxorps(dst, dst, dst);
888 vcvtqsi2ss(dst, dst, src);
889 } else {
890 xorps(dst, dst);
891 cvtqsi2ss(dst, src);
892 }
893 }
894
895
Cvtqsi2ss(XMMRegister dst,const Operand & src)896 void MacroAssembler::Cvtqsi2ss(XMMRegister dst, const Operand& src) {
897 if (CpuFeatures::IsSupported(AVX)) {
898 CpuFeatureScope scope(this, AVX);
899 vxorps(dst, dst, dst);
900 vcvtqsi2ss(dst, dst, src);
901 } else {
902 xorps(dst, dst);
903 cvtqsi2ss(dst, src);
904 }
905 }
906
907
Cvtqsi2sd(XMMRegister dst,Register src)908 void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
909 if (CpuFeatures::IsSupported(AVX)) {
910 CpuFeatureScope scope(this, AVX);
911 vxorpd(dst, dst, dst);
912 vcvtqsi2sd(dst, dst, src);
913 } else {
914 xorpd(dst, dst);
915 cvtqsi2sd(dst, src);
916 }
917 }
918
919
Cvtqsi2sd(XMMRegister dst,const Operand & src)920 void MacroAssembler::Cvtqsi2sd(XMMRegister dst, const Operand& src) {
921 if (CpuFeatures::IsSupported(AVX)) {
922 CpuFeatureScope scope(this, AVX);
923 vxorpd(dst, dst, dst);
924 vcvtqsi2sd(dst, dst, src);
925 } else {
926 xorpd(dst, dst);
927 cvtqsi2sd(dst, src);
928 }
929 }
930
931
Cvtqui2ss(XMMRegister dst,Register src,Register tmp)932 void MacroAssembler::Cvtqui2ss(XMMRegister dst, Register src, Register tmp) {
933 Label msb_set_src;
934 Label jmp_return;
935 testq(src, src);
936 j(sign, &msb_set_src, Label::kNear);
937 Cvtqsi2ss(dst, src);
938 jmp(&jmp_return, Label::kNear);
939 bind(&msb_set_src);
940 movq(tmp, src);
941 shrq(src, Immediate(1));
942 // Recover the least significant bit to avoid rounding errors.
943 andq(tmp, Immediate(1));
944 orq(src, tmp);
945 Cvtqsi2ss(dst, src);
946 addss(dst, dst);
947 bind(&jmp_return);
948 }
949
950
Cvtqui2sd(XMMRegister dst,Register src,Register tmp)951 void MacroAssembler::Cvtqui2sd(XMMRegister dst, Register src, Register tmp) {
952 Label msb_set_src;
953 Label jmp_return;
954 testq(src, src);
955 j(sign, &msb_set_src, Label::kNear);
956 Cvtqsi2sd(dst, src);
957 jmp(&jmp_return, Label::kNear);
958 bind(&msb_set_src);
959 movq(tmp, src);
960 shrq(src, Immediate(1));
961 andq(tmp, Immediate(1));
962 orq(src, tmp);
963 Cvtqsi2sd(dst, src);
964 addsd(dst, dst);
965 bind(&jmp_return);
966 }
967
968
Cvtsd2si(Register dst,XMMRegister src)969 void MacroAssembler::Cvtsd2si(Register dst, XMMRegister src) {
970 if (CpuFeatures::IsSupported(AVX)) {
971 CpuFeatureScope scope(this, AVX);
972 vcvtsd2si(dst, src);
973 } else {
974 cvtsd2si(dst, src);
975 }
976 }
977
978
Cvttss2si(Register dst,XMMRegister src)979 void MacroAssembler::Cvttss2si(Register dst, XMMRegister src) {
980 if (CpuFeatures::IsSupported(AVX)) {
981 CpuFeatureScope scope(this, AVX);
982 vcvttss2si(dst, src);
983 } else {
984 cvttss2si(dst, src);
985 }
986 }
987
988
Cvttss2si(Register dst,const Operand & src)989 void MacroAssembler::Cvttss2si(Register dst, const Operand& src) {
990 if (CpuFeatures::IsSupported(AVX)) {
991 CpuFeatureScope scope(this, AVX);
992 vcvttss2si(dst, src);
993 } else {
994 cvttss2si(dst, src);
995 }
996 }
997
998
Cvttsd2si(Register dst,XMMRegister src)999 void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) {
1000 if (CpuFeatures::IsSupported(AVX)) {
1001 CpuFeatureScope scope(this, AVX);
1002 vcvttsd2si(dst, src);
1003 } else {
1004 cvttsd2si(dst, src);
1005 }
1006 }
1007
1008
Cvttsd2si(Register dst,const Operand & src)1009 void MacroAssembler::Cvttsd2si(Register dst, const Operand& src) {
1010 if (CpuFeatures::IsSupported(AVX)) {
1011 CpuFeatureScope scope(this, AVX);
1012 vcvttsd2si(dst, src);
1013 } else {
1014 cvttsd2si(dst, src);
1015 }
1016 }
1017
1018
Cvttss2siq(Register dst,XMMRegister src)1019 void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) {
1020 if (CpuFeatures::IsSupported(AVX)) {
1021 CpuFeatureScope scope(this, AVX);
1022 vcvttss2siq(dst, src);
1023 } else {
1024 cvttss2siq(dst, src);
1025 }
1026 }
1027
1028
Cvttss2siq(Register dst,const Operand & src)1029 void MacroAssembler::Cvttss2siq(Register dst, const Operand& src) {
1030 if (CpuFeatures::IsSupported(AVX)) {
1031 CpuFeatureScope scope(this, AVX);
1032 vcvttss2siq(dst, src);
1033 } else {
1034 cvttss2siq(dst, src);
1035 }
1036 }
1037
1038
Cvttsd2siq(Register dst,XMMRegister src)1039 void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
1040 if (CpuFeatures::IsSupported(AVX)) {
1041 CpuFeatureScope scope(this, AVX);
1042 vcvttsd2siq(dst, src);
1043 } else {
1044 cvttsd2siq(dst, src);
1045 }
1046 }
1047
1048
Cvttsd2siq(Register dst,const Operand & src)1049 void MacroAssembler::Cvttsd2siq(Register dst, const Operand& src) {
1050 if (CpuFeatures::IsSupported(AVX)) {
1051 CpuFeatureScope scope(this, AVX);
1052 vcvttsd2siq(dst, src);
1053 } else {
1054 cvttsd2siq(dst, src);
1055 }
1056 }
1057
1058
Load(Register dst,const Operand & src,Representation r)1059 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
1060 DCHECK(!r.IsDouble());
1061 if (r.IsInteger8()) {
1062 movsxbq(dst, src);
1063 } else if (r.IsUInteger8()) {
1064 movzxbl(dst, src);
1065 } else if (r.IsInteger16()) {
1066 movsxwq(dst, src);
1067 } else if (r.IsUInteger16()) {
1068 movzxwl(dst, src);
1069 } else if (r.IsInteger32()) {
1070 movl(dst, src);
1071 } else {
1072 movp(dst, src);
1073 }
1074 }
1075
1076
Store(const Operand & dst,Register src,Representation r)1077 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
1078 DCHECK(!r.IsDouble());
1079 if (r.IsInteger8() || r.IsUInteger8()) {
1080 movb(dst, src);
1081 } else if (r.IsInteger16() || r.IsUInteger16()) {
1082 movw(dst, src);
1083 } else if (r.IsInteger32()) {
1084 movl(dst, src);
1085 } else {
1086 if (r.IsHeapObject()) {
1087 AssertNotSmi(src);
1088 } else if (r.IsSmi()) {
1089 AssertSmi(src);
1090 }
1091 movp(dst, src);
1092 }
1093 }
1094
1095
Set(Register dst,int64_t x)1096 void MacroAssembler::Set(Register dst, int64_t x) {
1097 if (x == 0) {
1098 xorl(dst, dst);
1099 } else if (is_uint32(x)) {
1100 movl(dst, Immediate(static_cast<uint32_t>(x)));
1101 } else if (is_int32(x)) {
1102 movq(dst, Immediate(static_cast<int32_t>(x)));
1103 } else {
1104 movq(dst, x);
1105 }
1106 }
1107
Set(const Operand & dst,intptr_t x)1108 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
1109 if (kPointerSize == kInt64Size) {
1110 if (is_int32(x)) {
1111 movp(dst, Immediate(static_cast<int32_t>(x)));
1112 } else {
1113 Set(kScratchRegister, x);
1114 movp(dst, kScratchRegister);
1115 }
1116 } else {
1117 movp(dst, Immediate(static_cast<int32_t>(x)));
1118 }
1119 }
1120
1121
1122 // ----------------------------------------------------------------------------
1123 // Smi tagging, untagging and tag detection.
1124
IsUnsafeInt(const int32_t x)1125 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
1126 static const int kMaxBits = 17;
1127 return !is_intn(x, kMaxBits);
1128 }
1129
1130
SafeMove(Register dst,Smi * src)1131 void MacroAssembler::SafeMove(Register dst, Smi* src) {
1132 DCHECK(!dst.is(kScratchRegister));
1133 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1134 if (SmiValuesAre32Bits()) {
1135 // JIT cookie can be converted to Smi.
1136 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1137 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1138 xorp(dst, kScratchRegister);
1139 } else {
1140 DCHECK(SmiValuesAre31Bits());
1141 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1142 movp(dst, Immediate(value ^ jit_cookie()));
1143 xorp(dst, Immediate(jit_cookie()));
1144 }
1145 } else {
1146 Move(dst, src);
1147 }
1148 }
1149
1150
SafePush(Smi * src)1151 void MacroAssembler::SafePush(Smi* src) {
1152 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1153 if (SmiValuesAre32Bits()) {
1154 // JIT cookie can be converted to Smi.
1155 Push(Smi::FromInt(src->value() ^ jit_cookie()));
1156 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1157 xorp(Operand(rsp, 0), kScratchRegister);
1158 } else {
1159 DCHECK(SmiValuesAre31Bits());
1160 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1161 Push(Immediate(value ^ jit_cookie()));
1162 xorp(Operand(rsp, 0), Immediate(jit_cookie()));
1163 }
1164 } else {
1165 Push(src);
1166 }
1167 }
1168
1169
GetSmiConstant(Smi * source)1170 Register MacroAssembler::GetSmiConstant(Smi* source) {
1171 STATIC_ASSERT(kSmiTag == 0);
1172 int value = source->value();
1173 if (value == 0) {
1174 xorl(kScratchRegister, kScratchRegister);
1175 return kScratchRegister;
1176 }
1177 LoadSmiConstant(kScratchRegister, source);
1178 return kScratchRegister;
1179 }
1180
1181
LoadSmiConstant(Register dst,Smi * source)1182 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1183 STATIC_ASSERT(kSmiTag == 0);
1184 int value = source->value();
1185 if (value == 0) {
1186 xorl(dst, dst);
1187 } else {
1188 Move(dst, source, Assembler::RelocInfoNone());
1189 }
1190 }
1191
1192
Integer32ToSmi(Register dst,Register src)1193 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1194 STATIC_ASSERT(kSmiTag == 0);
1195 if (!dst.is(src)) {
1196 movl(dst, src);
1197 }
1198 shlp(dst, Immediate(kSmiShift));
1199 }
1200
1201
Integer32ToSmiField(const Operand & dst,Register src)1202 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1203 if (emit_debug_code()) {
1204 testb(dst, Immediate(0x01));
1205 Label ok;
1206 j(zero, &ok, Label::kNear);
1207 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1208 bind(&ok);
1209 }
1210
1211 if (SmiValuesAre32Bits()) {
1212 DCHECK(kSmiShift % kBitsPerByte == 0);
1213 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1214 } else {
1215 DCHECK(SmiValuesAre31Bits());
1216 Integer32ToSmi(kScratchRegister, src);
1217 movp(dst, kScratchRegister);
1218 }
1219 }
1220
1221
Integer64PlusConstantToSmi(Register dst,Register src,int constant)1222 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1223 Register src,
1224 int constant) {
1225 if (dst.is(src)) {
1226 addl(dst, Immediate(constant));
1227 } else {
1228 leal(dst, Operand(src, constant));
1229 }
1230 shlp(dst, Immediate(kSmiShift));
1231 }
1232
1233
SmiToInteger32(Register dst,Register src)1234 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1235 STATIC_ASSERT(kSmiTag == 0);
1236 if (!dst.is(src)) {
1237 movp(dst, src);
1238 }
1239
1240 if (SmiValuesAre32Bits()) {
1241 shrp(dst, Immediate(kSmiShift));
1242 } else {
1243 DCHECK(SmiValuesAre31Bits());
1244 sarl(dst, Immediate(kSmiShift));
1245 }
1246 }
1247
1248
SmiToInteger32(Register dst,const Operand & src)1249 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1250 if (SmiValuesAre32Bits()) {
1251 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1252 } else {
1253 DCHECK(SmiValuesAre31Bits());
1254 movl(dst, src);
1255 sarl(dst, Immediate(kSmiShift));
1256 }
1257 }
1258
1259
SmiToInteger64(Register dst,Register src)1260 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1261 STATIC_ASSERT(kSmiTag == 0);
1262 if (!dst.is(src)) {
1263 movp(dst, src);
1264 }
1265 sarp(dst, Immediate(kSmiShift));
1266 if (kPointerSize == kInt32Size) {
1267 // Sign extend to 64-bit.
1268 movsxlq(dst, dst);
1269 }
1270 }
1271
1272
SmiToInteger64(Register dst,const Operand & src)1273 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1274 if (SmiValuesAre32Bits()) {
1275 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1276 } else {
1277 DCHECK(SmiValuesAre31Bits());
1278 movp(dst, src);
1279 SmiToInteger64(dst, dst);
1280 }
1281 }
1282
1283
SmiTest(Register src)1284 void MacroAssembler::SmiTest(Register src) {
1285 AssertSmi(src);
1286 testp(src, src);
1287 }
1288
1289
SmiCompare(Register smi1,Register smi2)1290 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1291 AssertSmi(smi1);
1292 AssertSmi(smi2);
1293 cmpp(smi1, smi2);
1294 }
1295
1296
SmiCompare(Register dst,Smi * src)1297 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1298 AssertSmi(dst);
1299 Cmp(dst, src);
1300 }
1301
1302
Cmp(Register dst,Smi * src)1303 void MacroAssembler::Cmp(Register dst, Smi* src) {
1304 DCHECK(!dst.is(kScratchRegister));
1305 if (src->value() == 0) {
1306 testp(dst, dst);
1307 } else {
1308 Register constant_reg = GetSmiConstant(src);
1309 cmpp(dst, constant_reg);
1310 }
1311 }
1312
1313
SmiCompare(Register dst,const Operand & src)1314 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1315 AssertSmi(dst);
1316 AssertSmi(src);
1317 cmpp(dst, src);
1318 }
1319
1320
SmiCompare(const Operand & dst,Register src)1321 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1322 AssertSmi(dst);
1323 AssertSmi(src);
1324 cmpp(dst, src);
1325 }
1326
1327
SmiCompare(const Operand & dst,Smi * src)1328 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1329 AssertSmi(dst);
1330 if (SmiValuesAre32Bits()) {
1331 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1332 } else {
1333 DCHECK(SmiValuesAre31Bits());
1334 cmpl(dst, Immediate(src));
1335 }
1336 }
1337
1338
Cmp(const Operand & dst,Smi * src)1339 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1340 // The Operand cannot use the smi register.
1341 Register smi_reg = GetSmiConstant(src);
1342 DCHECK(!dst.AddressUsesRegister(smi_reg));
1343 cmpp(dst, smi_reg);
1344 }
1345
1346
SmiCompareInteger32(const Operand & dst,Register src)1347 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1348 if (SmiValuesAre32Bits()) {
1349 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1350 } else {
1351 DCHECK(SmiValuesAre31Bits());
1352 SmiToInteger32(kScratchRegister, dst);
1353 cmpl(kScratchRegister, src);
1354 }
1355 }
1356
1357
PositiveSmiTimesPowerOfTwoToInteger64(Register dst,Register src,int power)1358 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1359 Register src,
1360 int power) {
1361 DCHECK(power >= 0);
1362 DCHECK(power < 64);
1363 if (power == 0) {
1364 SmiToInteger64(dst, src);
1365 return;
1366 }
1367 if (!dst.is(src)) {
1368 movp(dst, src);
1369 }
1370 if (power < kSmiShift) {
1371 sarp(dst, Immediate(kSmiShift - power));
1372 } else if (power > kSmiShift) {
1373 shlp(dst, Immediate(power - kSmiShift));
1374 }
1375 }
1376
1377
PositiveSmiDivPowerOfTwoToInteger32(Register dst,Register src,int power)1378 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1379 Register src,
1380 int power) {
1381 DCHECK((0 <= power) && (power < 32));
1382 if (dst.is(src)) {
1383 shrp(dst, Immediate(power + kSmiShift));
1384 } else {
1385 UNIMPLEMENTED(); // Not used.
1386 }
1387 }
1388
1389
SmiOrIfSmis(Register dst,Register src1,Register src2,Label * on_not_smis,Label::Distance near_jump)1390 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1391 Label* on_not_smis,
1392 Label::Distance near_jump) {
1393 if (dst.is(src1) || dst.is(src2)) {
1394 DCHECK(!src1.is(kScratchRegister));
1395 DCHECK(!src2.is(kScratchRegister));
1396 movp(kScratchRegister, src1);
1397 orp(kScratchRegister, src2);
1398 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1399 movp(dst, kScratchRegister);
1400 } else {
1401 movp(dst, src1);
1402 orp(dst, src2);
1403 JumpIfNotSmi(dst, on_not_smis, near_jump);
1404 }
1405 }
1406
1407
CheckSmi(Register src)1408 Condition MacroAssembler::CheckSmi(Register src) {
1409 STATIC_ASSERT(kSmiTag == 0);
1410 testb(src, Immediate(kSmiTagMask));
1411 return zero;
1412 }
1413
1414
CheckSmi(const Operand & src)1415 Condition MacroAssembler::CheckSmi(const Operand& src) {
1416 STATIC_ASSERT(kSmiTag == 0);
1417 testb(src, Immediate(kSmiTagMask));
1418 return zero;
1419 }
1420
1421
CheckNonNegativeSmi(Register src)1422 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1423 STATIC_ASSERT(kSmiTag == 0);
1424 // Test that both bits of the mask 0x8000000000000001 are zero.
1425 movp(kScratchRegister, src);
1426 rolp(kScratchRegister, Immediate(1));
1427 testb(kScratchRegister, Immediate(3));
1428 return zero;
1429 }
1430
1431
CheckBothSmi(Register first,Register second)1432 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1433 if (first.is(second)) {
1434 return CheckSmi(first);
1435 }
1436 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1437 if (SmiValuesAre32Bits()) {
1438 leal(kScratchRegister, Operand(first, second, times_1, 0));
1439 testb(kScratchRegister, Immediate(0x03));
1440 } else {
1441 DCHECK(SmiValuesAre31Bits());
1442 movl(kScratchRegister, first);
1443 orl(kScratchRegister, second);
1444 testb(kScratchRegister, Immediate(kSmiTagMask));
1445 }
1446 return zero;
1447 }
1448
1449
CheckBothNonNegativeSmi(Register first,Register second)1450 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1451 Register second) {
1452 if (first.is(second)) {
1453 return CheckNonNegativeSmi(first);
1454 }
1455 movp(kScratchRegister, first);
1456 orp(kScratchRegister, second);
1457 rolp(kScratchRegister, Immediate(1));
1458 testl(kScratchRegister, Immediate(3));
1459 return zero;
1460 }
1461
1462
CheckEitherSmi(Register first,Register second,Register scratch)1463 Condition MacroAssembler::CheckEitherSmi(Register first,
1464 Register second,
1465 Register scratch) {
1466 if (first.is(second)) {
1467 return CheckSmi(first);
1468 }
1469 if (scratch.is(second)) {
1470 andl(scratch, first);
1471 } else {
1472 if (!scratch.is(first)) {
1473 movl(scratch, first);
1474 }
1475 andl(scratch, second);
1476 }
1477 testb(scratch, Immediate(kSmiTagMask));
1478 return zero;
1479 }
1480
1481
CheckInteger32ValidSmiValue(Register src)1482 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1483 if (SmiValuesAre32Bits()) {
1484 // A 32-bit integer value can always be converted to a smi.
1485 return always;
1486 } else {
1487 DCHECK(SmiValuesAre31Bits());
1488 cmpl(src, Immediate(0xc0000000));
1489 return positive;
1490 }
1491 }
1492
1493
CheckUInteger32ValidSmiValue(Register src)1494 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1495 if (SmiValuesAre32Bits()) {
1496 // An unsigned 32-bit integer value is valid as long as the high bit
1497 // is not set.
1498 testl(src, src);
1499 return positive;
1500 } else {
1501 DCHECK(SmiValuesAre31Bits());
1502 testl(src, Immediate(0xc0000000));
1503 return zero;
1504 }
1505 }
1506
1507
CheckSmiToIndicator(Register dst,Register src)1508 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1509 if (dst.is(src)) {
1510 andl(dst, Immediate(kSmiTagMask));
1511 } else {
1512 movl(dst, Immediate(kSmiTagMask));
1513 andl(dst, src);
1514 }
1515 }
1516
1517
CheckSmiToIndicator(Register dst,const Operand & src)1518 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1519 if (!(src.AddressUsesRegister(dst))) {
1520 movl(dst, Immediate(kSmiTagMask));
1521 andl(dst, src);
1522 } else {
1523 movl(dst, src);
1524 andl(dst, Immediate(kSmiTagMask));
1525 }
1526 }
1527
1528
JumpIfValidSmiValue(Register src,Label * on_valid,Label::Distance near_jump)1529 void MacroAssembler::JumpIfValidSmiValue(Register src,
1530 Label* on_valid,
1531 Label::Distance near_jump) {
1532 Condition is_valid = CheckInteger32ValidSmiValue(src);
1533 j(is_valid, on_valid, near_jump);
1534 }
1535
1536
JumpIfNotValidSmiValue(Register src,Label * on_invalid,Label::Distance near_jump)1537 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1538 Label* on_invalid,
1539 Label::Distance near_jump) {
1540 Condition is_valid = CheckInteger32ValidSmiValue(src);
1541 j(NegateCondition(is_valid), on_invalid, near_jump);
1542 }
1543
1544
JumpIfUIntValidSmiValue(Register src,Label * on_valid,Label::Distance near_jump)1545 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1546 Label* on_valid,
1547 Label::Distance near_jump) {
1548 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1549 j(is_valid, on_valid, near_jump);
1550 }
1551
1552
JumpIfUIntNotValidSmiValue(Register src,Label * on_invalid,Label::Distance near_jump)1553 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1554 Label* on_invalid,
1555 Label::Distance near_jump) {
1556 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1557 j(NegateCondition(is_valid), on_invalid, near_jump);
1558 }
1559
1560
JumpIfSmi(Register src,Label * on_smi,Label::Distance near_jump)1561 void MacroAssembler::JumpIfSmi(Register src,
1562 Label* on_smi,
1563 Label::Distance near_jump) {
1564 Condition smi = CheckSmi(src);
1565 j(smi, on_smi, near_jump);
1566 }
1567
1568
JumpIfNotSmi(Register src,Label * on_not_smi,Label::Distance near_jump)1569 void MacroAssembler::JumpIfNotSmi(Register src,
1570 Label* on_not_smi,
1571 Label::Distance near_jump) {
1572 Condition smi = CheckSmi(src);
1573 j(NegateCondition(smi), on_not_smi, near_jump);
1574 }
1575
JumpIfNotSmi(Operand src,Label * on_not_smi,Label::Distance near_jump)1576 void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
1577 Label::Distance near_jump) {
1578 Condition smi = CheckSmi(src);
1579 j(NegateCondition(smi), on_not_smi, near_jump);
1580 }
1581
JumpUnlessNonNegativeSmi(Register src,Label * on_not_smi_or_negative,Label::Distance near_jump)1582 void MacroAssembler::JumpUnlessNonNegativeSmi(
1583 Register src, Label* on_not_smi_or_negative,
1584 Label::Distance near_jump) {
1585 Condition non_negative_smi = CheckNonNegativeSmi(src);
1586 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1587 }
1588
1589
JumpIfSmiEqualsConstant(Register src,Smi * constant,Label * on_equals,Label::Distance near_jump)1590 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1591 Smi* constant,
1592 Label* on_equals,
1593 Label::Distance near_jump) {
1594 SmiCompare(src, constant);
1595 j(equal, on_equals, near_jump);
1596 }
1597
1598
JumpIfNotBothSmi(Register src1,Register src2,Label * on_not_both_smi,Label::Distance near_jump)1599 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1600 Register src2,
1601 Label* on_not_both_smi,
1602 Label::Distance near_jump) {
1603 Condition both_smi = CheckBothSmi(src1, src2);
1604 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1605 }
1606
1607
JumpUnlessBothNonNegativeSmi(Register src1,Register src2,Label * on_not_both_smi,Label::Distance near_jump)1608 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1609 Register src2,
1610 Label* on_not_both_smi,
1611 Label::Distance near_jump) {
1612 Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1613 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1614 }
1615
1616
SmiAddConstant(Register dst,Register src,Smi * constant)1617 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1618 if (constant->value() == 0) {
1619 if (!dst.is(src)) {
1620 movp(dst, src);
1621 }
1622 return;
1623 } else if (dst.is(src)) {
1624 DCHECK(!dst.is(kScratchRegister));
1625 Register constant_reg = GetSmiConstant(constant);
1626 addp(dst, constant_reg);
1627 } else {
1628 LoadSmiConstant(dst, constant);
1629 addp(dst, src);
1630 }
1631 }
1632
1633
SmiAddConstant(const Operand & dst,Smi * constant)1634 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1635 if (constant->value() != 0) {
1636 if (SmiValuesAre32Bits()) {
1637 addl(Operand(dst, kSmiShift / kBitsPerByte),
1638 Immediate(constant->value()));
1639 } else {
1640 DCHECK(SmiValuesAre31Bits());
1641 addp(dst, Immediate(constant));
1642 }
1643 }
1644 }
1645
1646
SmiAddConstant(Register dst,Register src,Smi * constant,SmiOperationConstraints constraints,Label * bailout_label,Label::Distance near_jump)1647 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant,
1648 SmiOperationConstraints constraints,
1649 Label* bailout_label,
1650 Label::Distance near_jump) {
1651 if (constant->value() == 0) {
1652 if (!dst.is(src)) {
1653 movp(dst, src);
1654 }
1655 } else if (dst.is(src)) {
1656 DCHECK(!dst.is(kScratchRegister));
1657 LoadSmiConstant(kScratchRegister, constant);
1658 addp(dst, kScratchRegister);
1659 if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
1660 j(no_overflow, bailout_label, near_jump);
1661 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1662 subp(dst, kScratchRegister);
1663 } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
1664 if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
1665 Label done;
1666 j(no_overflow, &done, Label::kNear);
1667 subp(dst, kScratchRegister);
1668 jmp(bailout_label, near_jump);
1669 bind(&done);
1670 } else {
1671 // Bailout if overflow without reserving src.
1672 j(overflow, bailout_label, near_jump);
1673 }
1674 } else {
1675 UNREACHABLE();
1676 }
1677 } else {
1678 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1679 DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
1680 LoadSmiConstant(dst, constant);
1681 addp(dst, src);
1682 j(overflow, bailout_label, near_jump);
1683 }
1684 }
1685
1686
SmiSubConstant(Register dst,Register src,Smi * constant)1687 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1688 if (constant->value() == 0) {
1689 if (!dst.is(src)) {
1690 movp(dst, src);
1691 }
1692 } else if (dst.is(src)) {
1693 DCHECK(!dst.is(kScratchRegister));
1694 Register constant_reg = GetSmiConstant(constant);
1695 subp(dst, constant_reg);
1696 } else {
1697 if (constant->value() == Smi::kMinValue) {
1698 LoadSmiConstant(dst, constant);
1699 // Adding and subtracting the min-value gives the same result, it only
1700 // differs on the overflow bit, which we don't check here.
1701 addp(dst, src);
1702 } else {
1703 // Subtract by adding the negation.
1704 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1705 addp(dst, src);
1706 }
1707 }
1708 }
1709
1710
SmiSubConstant(Register dst,Register src,Smi * constant,SmiOperationConstraints constraints,Label * bailout_label,Label::Distance near_jump)1711 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant,
1712 SmiOperationConstraints constraints,
1713 Label* bailout_label,
1714 Label::Distance near_jump) {
1715 if (constant->value() == 0) {
1716 if (!dst.is(src)) {
1717 movp(dst, src);
1718 }
1719 } else if (dst.is(src)) {
1720 DCHECK(!dst.is(kScratchRegister));
1721 LoadSmiConstant(kScratchRegister, constant);
1722 subp(dst, kScratchRegister);
1723 if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
1724 j(no_overflow, bailout_label, near_jump);
1725 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1726 addp(dst, kScratchRegister);
1727 } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
1728 if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
1729 Label done;
1730 j(no_overflow, &done, Label::kNear);
1731 addp(dst, kScratchRegister);
1732 jmp(bailout_label, near_jump);
1733 bind(&done);
1734 } else {
1735 // Bailout if overflow without reserving src.
1736 j(overflow, bailout_label, near_jump);
1737 }
1738 } else {
1739 UNREACHABLE();
1740 }
1741 } else {
1742 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1743 DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
1744 if (constant->value() == Smi::kMinValue) {
1745 DCHECK(!dst.is(kScratchRegister));
1746 movp(dst, src);
1747 LoadSmiConstant(kScratchRegister, constant);
1748 subp(dst, kScratchRegister);
1749 j(overflow, bailout_label, near_jump);
1750 } else {
1751 // Subtract by adding the negation.
1752 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1753 addp(dst, src);
1754 j(overflow, bailout_label, near_jump);
1755 }
1756 }
1757 }
1758
1759
SmiNeg(Register dst,Register src,Label * on_smi_result,Label::Distance near_jump)1760 void MacroAssembler::SmiNeg(Register dst,
1761 Register src,
1762 Label* on_smi_result,
1763 Label::Distance near_jump) {
1764 if (dst.is(src)) {
1765 DCHECK(!dst.is(kScratchRegister));
1766 movp(kScratchRegister, src);
1767 negp(dst); // Low 32 bits are retained as zero by negation.
1768 // Test if result is zero or Smi::kMinValue.
1769 cmpp(dst, kScratchRegister);
1770 j(not_equal, on_smi_result, near_jump);
1771 movp(src, kScratchRegister);
1772 } else {
1773 movp(dst, src);
1774 negp(dst);
1775 cmpp(dst, src);
1776 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1777 j(not_equal, on_smi_result, near_jump);
1778 }
1779 }
1780
1781
1782 template<class T>
SmiAddHelper(MacroAssembler * masm,Register dst,Register src1,T src2,Label * on_not_smi_result,Label::Distance near_jump)1783 static void SmiAddHelper(MacroAssembler* masm,
1784 Register dst,
1785 Register src1,
1786 T src2,
1787 Label* on_not_smi_result,
1788 Label::Distance near_jump) {
1789 if (dst.is(src1)) {
1790 Label done;
1791 masm->addp(dst, src2);
1792 masm->j(no_overflow, &done, Label::kNear);
1793 // Restore src1.
1794 masm->subp(dst, src2);
1795 masm->jmp(on_not_smi_result, near_jump);
1796 masm->bind(&done);
1797 } else {
1798 masm->movp(dst, src1);
1799 masm->addp(dst, src2);
1800 masm->j(overflow, on_not_smi_result, near_jump);
1801 }
1802 }
1803
1804
SmiAdd(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1805 void MacroAssembler::SmiAdd(Register dst,
1806 Register src1,
1807 Register src2,
1808 Label* on_not_smi_result,
1809 Label::Distance near_jump) {
1810 DCHECK_NOT_NULL(on_not_smi_result);
1811 DCHECK(!dst.is(src2));
1812 SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1813 }
1814
1815
SmiAdd(Register dst,Register src1,const Operand & src2,Label * on_not_smi_result,Label::Distance near_jump)1816 void MacroAssembler::SmiAdd(Register dst,
1817 Register src1,
1818 const Operand& src2,
1819 Label* on_not_smi_result,
1820 Label::Distance near_jump) {
1821 DCHECK_NOT_NULL(on_not_smi_result);
1822 DCHECK(!src2.AddressUsesRegister(dst));
1823 SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1824 }
1825
1826
SmiAdd(Register dst,Register src1,Register src2)1827 void MacroAssembler::SmiAdd(Register dst,
1828 Register src1,
1829 Register src2) {
1830 // No overflow checking. Use only when it's known that
1831 // overflowing is impossible.
1832 if (!dst.is(src1)) {
1833 if (emit_debug_code()) {
1834 movp(kScratchRegister, src1);
1835 addp(kScratchRegister, src2);
1836 Check(no_overflow, kSmiAdditionOverflow);
1837 }
1838 leap(dst, Operand(src1, src2, times_1, 0));
1839 } else {
1840 addp(dst, src2);
1841 Assert(no_overflow, kSmiAdditionOverflow);
1842 }
1843 }
1844
1845
1846 template<class T>
SmiSubHelper(MacroAssembler * masm,Register dst,Register src1,T src2,Label * on_not_smi_result,Label::Distance near_jump)1847 static void SmiSubHelper(MacroAssembler* masm,
1848 Register dst,
1849 Register src1,
1850 T src2,
1851 Label* on_not_smi_result,
1852 Label::Distance near_jump) {
1853 if (dst.is(src1)) {
1854 Label done;
1855 masm->subp(dst, src2);
1856 masm->j(no_overflow, &done, Label::kNear);
1857 // Restore src1.
1858 masm->addp(dst, src2);
1859 masm->jmp(on_not_smi_result, near_jump);
1860 masm->bind(&done);
1861 } else {
1862 masm->movp(dst, src1);
1863 masm->subp(dst, src2);
1864 masm->j(overflow, on_not_smi_result, near_jump);
1865 }
1866 }
1867
1868
SmiSub(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1869 void MacroAssembler::SmiSub(Register dst,
1870 Register src1,
1871 Register src2,
1872 Label* on_not_smi_result,
1873 Label::Distance near_jump) {
1874 DCHECK_NOT_NULL(on_not_smi_result);
1875 DCHECK(!dst.is(src2));
1876 SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1877 }
1878
1879
SmiSub(Register dst,Register src1,const Operand & src2,Label * on_not_smi_result,Label::Distance near_jump)1880 void MacroAssembler::SmiSub(Register dst,
1881 Register src1,
1882 const Operand& src2,
1883 Label* on_not_smi_result,
1884 Label::Distance near_jump) {
1885 DCHECK_NOT_NULL(on_not_smi_result);
1886 DCHECK(!src2.AddressUsesRegister(dst));
1887 SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1888 }
1889
1890
1891 template<class T>
SmiSubNoOverflowHelper(MacroAssembler * masm,Register dst,Register src1,T src2)1892 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1893 Register dst,
1894 Register src1,
1895 T src2) {
1896 // No overflow checking. Use only when it's known that
1897 // overflowing is impossible (e.g., subtracting two positive smis).
1898 if (!dst.is(src1)) {
1899 masm->movp(dst, src1);
1900 }
1901 masm->subp(dst, src2);
1902 masm->Assert(no_overflow, kSmiSubtractionOverflow);
1903 }
1904
1905
SmiSub(Register dst,Register src1,Register src2)1906 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1907 DCHECK(!dst.is(src2));
1908 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1909 }
1910
1911
SmiSub(Register dst,Register src1,const Operand & src2)1912 void MacroAssembler::SmiSub(Register dst,
1913 Register src1,
1914 const Operand& src2) {
1915 SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1916 }
1917
1918
SmiMul(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1919 void MacroAssembler::SmiMul(Register dst,
1920 Register src1,
1921 Register src2,
1922 Label* on_not_smi_result,
1923 Label::Distance near_jump) {
1924 DCHECK(!dst.is(src2));
1925 DCHECK(!dst.is(kScratchRegister));
1926 DCHECK(!src1.is(kScratchRegister));
1927 DCHECK(!src2.is(kScratchRegister));
1928
1929 if (dst.is(src1)) {
1930 Label failure, zero_correct_result;
1931 movp(kScratchRegister, src1); // Create backup for later testing.
1932 SmiToInteger64(dst, src1);
1933 imulp(dst, src2);
1934 j(overflow, &failure, Label::kNear);
1935
1936 // Check for negative zero result. If product is zero, and one
1937 // argument is negative, go to slow case.
1938 Label correct_result;
1939 testp(dst, dst);
1940 j(not_zero, &correct_result, Label::kNear);
1941
1942 movp(dst, kScratchRegister);
1943 xorp(dst, src2);
1944 // Result was positive zero.
1945 j(positive, &zero_correct_result, Label::kNear);
1946
1947 bind(&failure); // Reused failure exit, restores src1.
1948 movp(src1, kScratchRegister);
1949 jmp(on_not_smi_result, near_jump);
1950
1951 bind(&zero_correct_result);
1952 Set(dst, 0);
1953
1954 bind(&correct_result);
1955 } else {
1956 SmiToInteger64(dst, src1);
1957 imulp(dst, src2);
1958 j(overflow, on_not_smi_result, near_jump);
1959 // Check for negative zero result. If product is zero, and one
1960 // argument is negative, go to slow case.
1961 Label correct_result;
1962 testp(dst, dst);
1963 j(not_zero, &correct_result, Label::kNear);
1964 // One of src1 and src2 is zero, the check whether the other is
1965 // negative.
1966 movp(kScratchRegister, src1);
1967 xorp(kScratchRegister, src2);
1968 j(negative, on_not_smi_result, near_jump);
1969 bind(&correct_result);
1970 }
1971 }
1972
1973
SmiDiv(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1974 void MacroAssembler::SmiDiv(Register dst,
1975 Register src1,
1976 Register src2,
1977 Label* on_not_smi_result,
1978 Label::Distance near_jump) {
1979 DCHECK(!src1.is(kScratchRegister));
1980 DCHECK(!src2.is(kScratchRegister));
1981 DCHECK(!dst.is(kScratchRegister));
1982 DCHECK(!src2.is(rax));
1983 DCHECK(!src2.is(rdx));
1984 DCHECK(!src1.is(rdx));
1985
1986 // Check for 0 divisor (result is +/-Infinity).
1987 testp(src2, src2);
1988 j(zero, on_not_smi_result, near_jump);
1989
1990 if (src1.is(rax)) {
1991 movp(kScratchRegister, src1);
1992 }
1993 SmiToInteger32(rax, src1);
1994 // We need to rule out dividing Smi::kMinValue by -1, since that would
1995 // overflow in idiv and raise an exception.
1996 // We combine this with negative zero test (negative zero only happens
1997 // when dividing zero by a negative number).
1998
1999 // We overshoot a little and go to slow case if we divide min-value
2000 // by any negative value, not just -1.
2001 Label safe_div;
2002 testl(rax, Immediate(~Smi::kMinValue));
2003 j(not_zero, &safe_div, Label::kNear);
2004 testp(src2, src2);
2005 if (src1.is(rax)) {
2006 j(positive, &safe_div, Label::kNear);
2007 movp(src1, kScratchRegister);
2008 jmp(on_not_smi_result, near_jump);
2009 } else {
2010 j(negative, on_not_smi_result, near_jump);
2011 }
2012 bind(&safe_div);
2013
2014 SmiToInteger32(src2, src2);
2015 // Sign extend src1 into edx:eax.
2016 cdq();
2017 idivl(src2);
2018 Integer32ToSmi(src2, src2);
2019 // Check that the remainder is zero.
2020 testl(rdx, rdx);
2021 if (src1.is(rax)) {
2022 Label smi_result;
2023 j(zero, &smi_result, Label::kNear);
2024 movp(src1, kScratchRegister);
2025 jmp(on_not_smi_result, near_jump);
2026 bind(&smi_result);
2027 } else {
2028 j(not_zero, on_not_smi_result, near_jump);
2029 }
2030 if (!dst.is(src1) && src1.is(rax)) {
2031 movp(src1, kScratchRegister);
2032 }
2033 Integer32ToSmi(dst, rax);
2034 }
2035
2036
SmiMod(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2037 void MacroAssembler::SmiMod(Register dst,
2038 Register src1,
2039 Register src2,
2040 Label* on_not_smi_result,
2041 Label::Distance near_jump) {
2042 DCHECK(!dst.is(kScratchRegister));
2043 DCHECK(!src1.is(kScratchRegister));
2044 DCHECK(!src2.is(kScratchRegister));
2045 DCHECK(!src2.is(rax));
2046 DCHECK(!src2.is(rdx));
2047 DCHECK(!src1.is(rdx));
2048 DCHECK(!src1.is(src2));
2049
2050 testp(src2, src2);
2051 j(zero, on_not_smi_result, near_jump);
2052
2053 if (src1.is(rax)) {
2054 movp(kScratchRegister, src1);
2055 }
2056 SmiToInteger32(rax, src1);
2057 SmiToInteger32(src2, src2);
2058
2059 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
2060 Label safe_div;
2061 cmpl(rax, Immediate(Smi::kMinValue));
2062 j(not_equal, &safe_div, Label::kNear);
2063 cmpl(src2, Immediate(-1));
2064 j(not_equal, &safe_div, Label::kNear);
2065 // Retag inputs and go slow case.
2066 Integer32ToSmi(src2, src2);
2067 if (src1.is(rax)) {
2068 movp(src1, kScratchRegister);
2069 }
2070 jmp(on_not_smi_result, near_jump);
2071 bind(&safe_div);
2072
2073 // Sign extend eax into edx:eax.
2074 cdq();
2075 idivl(src2);
2076 // Restore smi tags on inputs.
2077 Integer32ToSmi(src2, src2);
2078 if (src1.is(rax)) {
2079 movp(src1, kScratchRegister);
2080 }
2081 // Check for a negative zero result. If the result is zero, and the
2082 // dividend is negative, go slow to return a floating point negative zero.
2083 Label smi_result;
2084 testl(rdx, rdx);
2085 j(not_zero, &smi_result, Label::kNear);
2086 testp(src1, src1);
2087 j(negative, on_not_smi_result, near_jump);
2088 bind(&smi_result);
2089 Integer32ToSmi(dst, rdx);
2090 }
2091
2092
SmiNot(Register dst,Register src)2093 void MacroAssembler::SmiNot(Register dst, Register src) {
2094 DCHECK(!dst.is(kScratchRegister));
2095 DCHECK(!src.is(kScratchRegister));
2096 if (SmiValuesAre32Bits()) {
2097 // Set tag and padding bits before negating, so that they are zero
2098 // afterwards.
2099 movl(kScratchRegister, Immediate(~0));
2100 } else {
2101 DCHECK(SmiValuesAre31Bits());
2102 movl(kScratchRegister, Immediate(1));
2103 }
2104 if (dst.is(src)) {
2105 xorp(dst, kScratchRegister);
2106 } else {
2107 leap(dst, Operand(src, kScratchRegister, times_1, 0));
2108 }
2109 notp(dst);
2110 }
2111
2112
SmiAnd(Register dst,Register src1,Register src2)2113 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
2114 DCHECK(!dst.is(src2));
2115 if (!dst.is(src1)) {
2116 movp(dst, src1);
2117 }
2118 andp(dst, src2);
2119 }
2120
2121
SmiAndConstant(Register dst,Register src,Smi * constant)2122 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
2123 if (constant->value() == 0) {
2124 Set(dst, 0);
2125 } else if (dst.is(src)) {
2126 DCHECK(!dst.is(kScratchRegister));
2127 Register constant_reg = GetSmiConstant(constant);
2128 andp(dst, constant_reg);
2129 } else {
2130 LoadSmiConstant(dst, constant);
2131 andp(dst, src);
2132 }
2133 }
2134
2135
SmiOr(Register dst,Register src1,Register src2)2136 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
2137 if (!dst.is(src1)) {
2138 DCHECK(!src1.is(src2));
2139 movp(dst, src1);
2140 }
2141 orp(dst, src2);
2142 }
2143
2144
SmiOrConstant(Register dst,Register src,Smi * constant)2145 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
2146 if (dst.is(src)) {
2147 DCHECK(!dst.is(kScratchRegister));
2148 Register constant_reg = GetSmiConstant(constant);
2149 orp(dst, constant_reg);
2150 } else {
2151 LoadSmiConstant(dst, constant);
2152 orp(dst, src);
2153 }
2154 }
2155
2156
SmiXor(Register dst,Register src1,Register src2)2157 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2158 if (!dst.is(src1)) {
2159 DCHECK(!src1.is(src2));
2160 movp(dst, src1);
2161 }
2162 xorp(dst, src2);
2163 }
2164
2165
SmiXorConstant(Register dst,Register src,Smi * constant)2166 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2167 if (dst.is(src)) {
2168 DCHECK(!dst.is(kScratchRegister));
2169 Register constant_reg = GetSmiConstant(constant);
2170 xorp(dst, constant_reg);
2171 } else {
2172 LoadSmiConstant(dst, constant);
2173 xorp(dst, src);
2174 }
2175 }
2176
2177
SmiShiftArithmeticRightConstant(Register dst,Register src,int shift_value)2178 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2179 Register src,
2180 int shift_value) {
2181 DCHECK(is_uint5(shift_value));
2182 if (shift_value > 0) {
2183 if (dst.is(src)) {
2184 sarp(dst, Immediate(shift_value + kSmiShift));
2185 shlp(dst, Immediate(kSmiShift));
2186 } else {
2187 UNIMPLEMENTED(); // Not used.
2188 }
2189 }
2190 }
2191
2192
SmiShiftLeftConstant(Register dst,Register src,int shift_value,Label * on_not_smi_result,Label::Distance near_jump)2193 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2194 Register src,
2195 int shift_value,
2196 Label* on_not_smi_result,
2197 Label::Distance near_jump) {
2198 if (SmiValuesAre32Bits()) {
2199 if (!dst.is(src)) {
2200 movp(dst, src);
2201 }
2202 if (shift_value > 0) {
2203 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2204 shlq(dst, Immediate(shift_value & 0x1f));
2205 }
2206 } else {
2207 DCHECK(SmiValuesAre31Bits());
2208 if (dst.is(src)) {
2209 UNIMPLEMENTED(); // Not used.
2210 } else {
2211 SmiToInteger32(dst, src);
2212 shll(dst, Immediate(shift_value));
2213 JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
2214 Integer32ToSmi(dst, dst);
2215 }
2216 }
2217 }
2218
2219
SmiShiftLogicalRightConstant(Register dst,Register src,int shift_value,Label * on_not_smi_result,Label::Distance near_jump)2220 void MacroAssembler::SmiShiftLogicalRightConstant(
2221 Register dst, Register src, int shift_value,
2222 Label* on_not_smi_result, Label::Distance near_jump) {
2223 // Logic right shift interprets its result as an *unsigned* number.
2224 if (dst.is(src)) {
2225 UNIMPLEMENTED(); // Not used.
2226 } else {
2227 if (shift_value == 0) {
2228 testp(src, src);
2229 j(negative, on_not_smi_result, near_jump);
2230 }
2231 if (SmiValuesAre32Bits()) {
2232 movp(dst, src);
2233 shrp(dst, Immediate(shift_value + kSmiShift));
2234 shlp(dst, Immediate(kSmiShift));
2235 } else {
2236 DCHECK(SmiValuesAre31Bits());
2237 SmiToInteger32(dst, src);
2238 shrp(dst, Immediate(shift_value));
2239 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
2240 Integer32ToSmi(dst, dst);
2241 }
2242 }
2243 }
2244
2245
SmiShiftLeft(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2246 void MacroAssembler::SmiShiftLeft(Register dst,
2247 Register src1,
2248 Register src2,
2249 Label* on_not_smi_result,
2250 Label::Distance near_jump) {
2251 if (SmiValuesAre32Bits()) {
2252 DCHECK(!dst.is(rcx));
2253 if (!dst.is(src1)) {
2254 movp(dst, src1);
2255 }
2256 // Untag shift amount.
2257 SmiToInteger32(rcx, src2);
2258 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2259 andp(rcx, Immediate(0x1f));
2260 shlq_cl(dst);
2261 } else {
2262 DCHECK(SmiValuesAre31Bits());
2263 DCHECK(!dst.is(kScratchRegister));
2264 DCHECK(!src1.is(kScratchRegister));
2265 DCHECK(!src2.is(kScratchRegister));
2266 DCHECK(!dst.is(src2));
2267 DCHECK(!dst.is(rcx));
2268
2269 if (src1.is(rcx) || src2.is(rcx)) {
2270 movq(kScratchRegister, rcx);
2271 }
2272 if (dst.is(src1)) {
2273 UNIMPLEMENTED(); // Not used.
2274 } else {
2275 Label valid_result;
2276 SmiToInteger32(dst, src1);
2277 SmiToInteger32(rcx, src2);
2278 shll_cl(dst);
2279 JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2280 // As src1 or src2 could not be dst, we do not need to restore them for
2281 // clobbering dst.
2282 if (src1.is(rcx) || src2.is(rcx)) {
2283 if (src1.is(rcx)) {
2284 movq(src1, kScratchRegister);
2285 } else {
2286 movq(src2, kScratchRegister);
2287 }
2288 }
2289 jmp(on_not_smi_result, near_jump);
2290 bind(&valid_result);
2291 Integer32ToSmi(dst, dst);
2292 }
2293 }
2294 }
2295
2296
SmiShiftLogicalRight(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2297 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2298 Register src1,
2299 Register src2,
2300 Label* on_not_smi_result,
2301 Label::Distance near_jump) {
2302 DCHECK(!dst.is(kScratchRegister));
2303 DCHECK(!src1.is(kScratchRegister));
2304 DCHECK(!src2.is(kScratchRegister));
2305 DCHECK(!dst.is(src2));
2306 DCHECK(!dst.is(rcx));
2307 if (src1.is(rcx) || src2.is(rcx)) {
2308 movq(kScratchRegister, rcx);
2309 }
2310 if (dst.is(src1)) {
2311 UNIMPLEMENTED(); // Not used.
2312 } else {
2313 Label valid_result;
2314 SmiToInteger32(dst, src1);
2315 SmiToInteger32(rcx, src2);
2316 shrl_cl(dst);
2317 JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2318 // As src1 or src2 could not be dst, we do not need to restore them for
2319 // clobbering dst.
2320 if (src1.is(rcx) || src2.is(rcx)) {
2321 if (src1.is(rcx)) {
2322 movq(src1, kScratchRegister);
2323 } else {
2324 movq(src2, kScratchRegister);
2325 }
2326 }
2327 jmp(on_not_smi_result, near_jump);
2328 bind(&valid_result);
2329 Integer32ToSmi(dst, dst);
2330 }
2331 }
2332
2333
SmiShiftArithmeticRight(Register dst,Register src1,Register src2)2334 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2335 Register src1,
2336 Register src2) {
2337 DCHECK(!dst.is(kScratchRegister));
2338 DCHECK(!src1.is(kScratchRegister));
2339 DCHECK(!src2.is(kScratchRegister));
2340 DCHECK(!dst.is(rcx));
2341
2342 SmiToInteger32(rcx, src2);
2343 if (!dst.is(src1)) {
2344 movp(dst, src1);
2345 }
2346 SmiToInteger32(dst, dst);
2347 sarl_cl(dst);
2348 Integer32ToSmi(dst, dst);
2349 }
2350
2351
SelectNonSmi(Register dst,Register src1,Register src2,Label * on_not_smis,Label::Distance near_jump)2352 void MacroAssembler::SelectNonSmi(Register dst,
2353 Register src1,
2354 Register src2,
2355 Label* on_not_smis,
2356 Label::Distance near_jump) {
2357 DCHECK(!dst.is(kScratchRegister));
2358 DCHECK(!src1.is(kScratchRegister));
2359 DCHECK(!src2.is(kScratchRegister));
2360 DCHECK(!dst.is(src1));
2361 DCHECK(!dst.is(src2));
2362 // Both operands must not be smis.
2363 #ifdef DEBUG
2364 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2365 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2366 #endif
2367 STATIC_ASSERT(kSmiTag == 0);
2368 DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
2369 movl(kScratchRegister, Immediate(kSmiTagMask));
2370 andp(kScratchRegister, src1);
2371 testl(kScratchRegister, src2);
2372 // If non-zero then both are smis.
2373 j(not_zero, on_not_smis, near_jump);
2374
2375 // Exactly one operand is a smi.
2376 DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2377 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2378 subp(kScratchRegister, Immediate(1));
2379 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2380 movp(dst, src1);
2381 xorp(dst, src2);
2382 andp(dst, kScratchRegister);
2383 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2384 xorp(dst, src1);
2385 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2386 }
2387
2388
SmiToIndex(Register dst,Register src,int shift)2389 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2390 Register src,
2391 int shift) {
2392 if (SmiValuesAre32Bits()) {
2393 DCHECK(is_uint6(shift));
2394 // There is a possible optimization if shift is in the range 60-63, but that
2395 // will (and must) never happen.
2396 if (!dst.is(src)) {
2397 movp(dst, src);
2398 }
2399 if (shift < kSmiShift) {
2400 sarp(dst, Immediate(kSmiShift - shift));
2401 } else {
2402 shlp(dst, Immediate(shift - kSmiShift));
2403 }
2404 return SmiIndex(dst, times_1);
2405 } else {
2406 DCHECK(SmiValuesAre31Bits());
2407 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2408 if (!dst.is(src)) {
2409 movp(dst, src);
2410 }
2411 // We have to sign extend the index register to 64-bit as the SMI might
2412 // be negative.
2413 movsxlq(dst, dst);
2414 if (shift == times_1) {
2415 sarq(dst, Immediate(kSmiShift));
2416 return SmiIndex(dst, times_1);
2417 }
2418 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2419 }
2420 }
2421
2422
SmiToNegativeIndex(Register dst,Register src,int shift)2423 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2424 Register src,
2425 int shift) {
2426 if (SmiValuesAre32Bits()) {
2427 // Register src holds a positive smi.
2428 DCHECK(is_uint6(shift));
2429 if (!dst.is(src)) {
2430 movp(dst, src);
2431 }
2432 negp(dst);
2433 if (shift < kSmiShift) {
2434 sarp(dst, Immediate(kSmiShift - shift));
2435 } else {
2436 shlp(dst, Immediate(shift - kSmiShift));
2437 }
2438 return SmiIndex(dst, times_1);
2439 } else {
2440 DCHECK(SmiValuesAre31Bits());
2441 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2442 if (!dst.is(src)) {
2443 movp(dst, src);
2444 }
2445 negq(dst);
2446 if (shift == times_1) {
2447 sarq(dst, Immediate(kSmiShift));
2448 return SmiIndex(dst, times_1);
2449 }
2450 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2451 }
2452 }
2453
2454
AddSmiField(Register dst,const Operand & src)2455 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2456 if (SmiValuesAre32Bits()) {
2457 DCHECK_EQ(0, kSmiShift % kBitsPerByte);
2458 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2459 } else {
2460 DCHECK(SmiValuesAre31Bits());
2461 SmiToInteger32(kScratchRegister, src);
2462 addl(dst, kScratchRegister);
2463 }
2464 }
2465
2466
Push(Smi * source)2467 void MacroAssembler::Push(Smi* source) {
2468 intptr_t smi = reinterpret_cast<intptr_t>(source);
2469 if (is_int32(smi)) {
2470 Push(Immediate(static_cast<int32_t>(smi)));
2471 return;
2472 }
2473 int first_byte_set = base::bits::CountTrailingZeros64(smi) / 8;
2474 int last_byte_set = (63 - base::bits::CountLeadingZeros64(smi)) / 8;
2475 if (first_byte_set == last_byte_set && kPointerSize == kInt64Size) {
2476 // This sequence has only 7 bytes, compared to the 12 bytes below.
2477 Push(Immediate(0));
2478 movb(Operand(rsp, first_byte_set),
2479 Immediate(static_cast<int8_t>(smi >> (8 * first_byte_set))));
2480 return;
2481 }
2482 Register constant = GetSmiConstant(source);
2483 Push(constant);
2484 }
2485
2486
PushRegisterAsTwoSmis(Register src,Register scratch)2487 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2488 DCHECK(!src.is(scratch));
2489 movp(scratch, src);
2490 // High bits.
2491 shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2492 shlp(src, Immediate(kSmiShift));
2493 Push(src);
2494 // Low bits.
2495 shlp(scratch, Immediate(kSmiShift));
2496 Push(scratch);
2497 }
2498
2499
PopRegisterAsTwoSmis(Register dst,Register scratch)2500 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2501 DCHECK(!dst.is(scratch));
2502 Pop(scratch);
2503 // Low bits.
2504 shrp(scratch, Immediate(kSmiShift));
2505 Pop(dst);
2506 shrp(dst, Immediate(kSmiShift));
2507 // High bits.
2508 shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2509 orp(dst, scratch);
2510 }
2511
2512
Test(const Operand & src,Smi * source)2513 void MacroAssembler::Test(const Operand& src, Smi* source) {
2514 if (SmiValuesAre32Bits()) {
2515 testl(Operand(src, kIntSize), Immediate(source->value()));
2516 } else {
2517 DCHECK(SmiValuesAre31Bits());
2518 testl(src, Immediate(source));
2519 }
2520 }
2521
2522
2523 // ----------------------------------------------------------------------------
2524
2525
JumpIfNotString(Register object,Register object_map,Label * not_string,Label::Distance near_jump)2526 void MacroAssembler::JumpIfNotString(Register object,
2527 Register object_map,
2528 Label* not_string,
2529 Label::Distance near_jump) {
2530 Condition is_smi = CheckSmi(object);
2531 j(is_smi, not_string, near_jump);
2532 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2533 j(above_equal, not_string, near_jump);
2534 }
2535
2536
JumpIfNotBothSequentialOneByteStrings(Register first_object,Register second_object,Register scratch1,Register scratch2,Label * on_fail,Label::Distance near_jump)2537 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
2538 Register first_object, Register second_object, Register scratch1,
2539 Register scratch2, Label* on_fail, Label::Distance near_jump) {
2540 // Check that both objects are not smis.
2541 Condition either_smi = CheckEitherSmi(first_object, second_object);
2542 j(either_smi, on_fail, near_jump);
2543
2544 // Load instance type for both strings.
2545 movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2546 movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2547 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2548 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2549
2550 // Check that both are flat one-byte strings.
2551 DCHECK(kNotStringTag != 0);
2552 const int kFlatOneByteStringMask =
2553 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2554 const int kFlatOneByteStringTag =
2555 kStringTag | kOneByteStringTag | kSeqStringTag;
2556
2557 andl(scratch1, Immediate(kFlatOneByteStringMask));
2558 andl(scratch2, Immediate(kFlatOneByteStringMask));
2559 // Interleave the bits to check both scratch1 and scratch2 in one test.
2560 const int kShift = 8;
2561 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << kShift));
2562 shlp(scratch2, Immediate(kShift));
2563 orp(scratch1, scratch2);
2564 cmpl(scratch1,
2565 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << kShift)));
2566 j(not_equal, on_fail, near_jump);
2567 }
2568
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first_object_instance_type,Register second_object_instance_type,Register scratch1,Register scratch2,Label * on_fail,Label::Distance near_jump)2569 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2570 Register first_object_instance_type, Register second_object_instance_type,
2571 Register scratch1, Register scratch2, Label* on_fail,
2572 Label::Distance near_jump) {
2573 // Load instance type for both strings.
2574 movp(scratch1, first_object_instance_type);
2575 movp(scratch2, second_object_instance_type);
2576
2577 // Check that both are flat one-byte strings.
2578 DCHECK(kNotStringTag != 0);
2579 const int kFlatOneByteStringMask =
2580 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2581 const int kFlatOneByteStringTag =
2582 kStringTag | kOneByteStringTag | kSeqStringTag;
2583
2584 andl(scratch1, Immediate(kFlatOneByteStringMask));
2585 andl(scratch2, Immediate(kFlatOneByteStringMask));
2586 // Interleave the bits to check both scratch1 and scratch2 in one test.
2587 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2588 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2589 cmpl(scratch1,
2590 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2591 j(not_equal, on_fail, near_jump);
2592 }
2593
2594
2595 template<class T>
JumpIfNotUniqueNameHelper(MacroAssembler * masm,T operand_or_register,Label * not_unique_name,Label::Distance distance)2596 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2597 T operand_or_register,
2598 Label* not_unique_name,
2599 Label::Distance distance) {
2600 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2601 Label succeed;
2602 masm->testb(operand_or_register,
2603 Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2604 masm->j(zero, &succeed, Label::kNear);
2605 masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2606 masm->j(not_equal, not_unique_name, distance);
2607
2608 masm->bind(&succeed);
2609 }
2610
2611
JumpIfNotUniqueNameInstanceType(Operand operand,Label * not_unique_name,Label::Distance distance)2612 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2613 Label* not_unique_name,
2614 Label::Distance distance) {
2615 JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2616 }
2617
2618
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name,Label::Distance distance)2619 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2620 Label* not_unique_name,
2621 Label::Distance distance) {
2622 JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2623 }
2624
2625
Move(Register dst,Register src)2626 void MacroAssembler::Move(Register dst, Register src) {
2627 if (!dst.is(src)) {
2628 movp(dst, src);
2629 }
2630 }
2631
2632
Move(Register dst,Handle<Object> source)2633 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2634 AllowDeferredHandleDereference smi_check;
2635 if (source->IsSmi()) {
2636 Move(dst, Smi::cast(*source));
2637 } else {
2638 MoveHeapObject(dst, source);
2639 }
2640 }
2641
2642
Move(const Operand & dst,Handle<Object> source)2643 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2644 AllowDeferredHandleDereference smi_check;
2645 if (source->IsSmi()) {
2646 Move(dst, Smi::cast(*source));
2647 } else {
2648 MoveHeapObject(kScratchRegister, source);
2649 movp(dst, kScratchRegister);
2650 }
2651 }
2652
2653
Move(XMMRegister dst,uint32_t src)2654 void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
2655 if (src == 0) {
2656 Xorpd(dst, dst);
2657 } else {
2658 unsigned pop = base::bits::CountPopulation32(src);
2659 DCHECK_NE(0u, pop);
2660 if (pop == 32) {
2661 Pcmpeqd(dst, dst);
2662 } else {
2663 movl(kScratchRegister, Immediate(src));
2664 Movq(dst, kScratchRegister);
2665 }
2666 }
2667 }
2668
2669
Move(XMMRegister dst,uint64_t src)2670 void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2671 if (src == 0) {
2672 Xorpd(dst, dst);
2673 } else {
2674 unsigned nlz = base::bits::CountLeadingZeros64(src);
2675 unsigned ntz = base::bits::CountTrailingZeros64(src);
2676 unsigned pop = base::bits::CountPopulation64(src);
2677 DCHECK_NE(0u, pop);
2678 if (pop == 64) {
2679 Pcmpeqd(dst, dst);
2680 } else if (pop + ntz == 64) {
2681 Pcmpeqd(dst, dst);
2682 Psllq(dst, ntz);
2683 } else if (pop + nlz == 64) {
2684 Pcmpeqd(dst, dst);
2685 Psrlq(dst, nlz);
2686 } else {
2687 uint32_t lower = static_cast<uint32_t>(src);
2688 uint32_t upper = static_cast<uint32_t>(src >> 32);
2689 if (upper == 0) {
2690 Move(dst, lower);
2691 } else {
2692 movq(kScratchRegister, src);
2693 Movq(dst, kScratchRegister);
2694 }
2695 }
2696 }
2697 }
2698
2699
Movaps(XMMRegister dst,XMMRegister src)2700 void MacroAssembler::Movaps(XMMRegister dst, XMMRegister src) {
2701 if (CpuFeatures::IsSupported(AVX)) {
2702 CpuFeatureScope scope(this, AVX);
2703 vmovaps(dst, src);
2704 } else {
2705 movaps(dst, src);
2706 }
2707 }
2708
Movups(XMMRegister dst,XMMRegister src)2709 void MacroAssembler::Movups(XMMRegister dst, XMMRegister src) {
2710 if (CpuFeatures::IsSupported(AVX)) {
2711 CpuFeatureScope scope(this, AVX);
2712 vmovups(dst, src);
2713 } else {
2714 movups(dst, src);
2715 }
2716 }
2717
Movups(XMMRegister dst,const Operand & src)2718 void MacroAssembler::Movups(XMMRegister dst, const Operand& src) {
2719 if (CpuFeatures::IsSupported(AVX)) {
2720 CpuFeatureScope scope(this, AVX);
2721 vmovups(dst, src);
2722 } else {
2723 movups(dst, src);
2724 }
2725 }
2726
Movups(const Operand & dst,XMMRegister src)2727 void MacroAssembler::Movups(const Operand& dst, XMMRegister src) {
2728 if (CpuFeatures::IsSupported(AVX)) {
2729 CpuFeatureScope scope(this, AVX);
2730 vmovups(dst, src);
2731 } else {
2732 movups(dst, src);
2733 }
2734 }
2735
Movapd(XMMRegister dst,XMMRegister src)2736 void MacroAssembler::Movapd(XMMRegister dst, XMMRegister src) {
2737 if (CpuFeatures::IsSupported(AVX)) {
2738 CpuFeatureScope scope(this, AVX);
2739 vmovapd(dst, src);
2740 } else {
2741 movapd(dst, src);
2742 }
2743 }
2744
Movupd(XMMRegister dst,const Operand & src)2745 void MacroAssembler::Movupd(XMMRegister dst, const Operand& src) {
2746 if (CpuFeatures::IsSupported(AVX)) {
2747 CpuFeatureScope scope(this, AVX);
2748 vmovupd(dst, src);
2749 } else {
2750 movupd(dst, src);
2751 }
2752 }
2753
Movupd(const Operand & dst,XMMRegister src)2754 void MacroAssembler::Movupd(const Operand& dst, XMMRegister src) {
2755 if (CpuFeatures::IsSupported(AVX)) {
2756 CpuFeatureScope scope(this, AVX);
2757 vmovupd(dst, src);
2758 } else {
2759 movupd(dst, src);
2760 }
2761 }
2762
Movsd(XMMRegister dst,XMMRegister src)2763 void MacroAssembler::Movsd(XMMRegister dst, XMMRegister src) {
2764 if (CpuFeatures::IsSupported(AVX)) {
2765 CpuFeatureScope scope(this, AVX);
2766 vmovsd(dst, dst, src);
2767 } else {
2768 movsd(dst, src);
2769 }
2770 }
2771
2772
Movsd(XMMRegister dst,const Operand & src)2773 void MacroAssembler::Movsd(XMMRegister dst, const Operand& src) {
2774 if (CpuFeatures::IsSupported(AVX)) {
2775 CpuFeatureScope scope(this, AVX);
2776 vmovsd(dst, src);
2777 } else {
2778 movsd(dst, src);
2779 }
2780 }
2781
2782
Movsd(const Operand & dst,XMMRegister src)2783 void MacroAssembler::Movsd(const Operand& dst, XMMRegister src) {
2784 if (CpuFeatures::IsSupported(AVX)) {
2785 CpuFeatureScope scope(this, AVX);
2786 vmovsd(dst, src);
2787 } else {
2788 movsd(dst, src);
2789 }
2790 }
2791
2792
Movss(XMMRegister dst,XMMRegister src)2793 void MacroAssembler::Movss(XMMRegister dst, XMMRegister src) {
2794 if (CpuFeatures::IsSupported(AVX)) {
2795 CpuFeatureScope scope(this, AVX);
2796 vmovss(dst, dst, src);
2797 } else {
2798 movss(dst, src);
2799 }
2800 }
2801
2802
Movss(XMMRegister dst,const Operand & src)2803 void MacroAssembler::Movss(XMMRegister dst, const Operand& src) {
2804 if (CpuFeatures::IsSupported(AVX)) {
2805 CpuFeatureScope scope(this, AVX);
2806 vmovss(dst, src);
2807 } else {
2808 movss(dst, src);
2809 }
2810 }
2811
2812
Movss(const Operand & dst,XMMRegister src)2813 void MacroAssembler::Movss(const Operand& dst, XMMRegister src) {
2814 if (CpuFeatures::IsSupported(AVX)) {
2815 CpuFeatureScope scope(this, AVX);
2816 vmovss(dst, src);
2817 } else {
2818 movss(dst, src);
2819 }
2820 }
2821
2822
Movd(XMMRegister dst,Register src)2823 void MacroAssembler::Movd(XMMRegister dst, Register src) {
2824 if (CpuFeatures::IsSupported(AVX)) {
2825 CpuFeatureScope scope(this, AVX);
2826 vmovd(dst, src);
2827 } else {
2828 movd(dst, src);
2829 }
2830 }
2831
2832
Movd(XMMRegister dst,const Operand & src)2833 void MacroAssembler::Movd(XMMRegister dst, const Operand& src) {
2834 if (CpuFeatures::IsSupported(AVX)) {
2835 CpuFeatureScope scope(this, AVX);
2836 vmovd(dst, src);
2837 } else {
2838 movd(dst, src);
2839 }
2840 }
2841
2842
Movd(Register dst,XMMRegister src)2843 void MacroAssembler::Movd(Register dst, XMMRegister src) {
2844 if (CpuFeatures::IsSupported(AVX)) {
2845 CpuFeatureScope scope(this, AVX);
2846 vmovd(dst, src);
2847 } else {
2848 movd(dst, src);
2849 }
2850 }
2851
2852
Movq(XMMRegister dst,Register src)2853 void MacroAssembler::Movq(XMMRegister dst, Register src) {
2854 if (CpuFeatures::IsSupported(AVX)) {
2855 CpuFeatureScope scope(this, AVX);
2856 vmovq(dst, src);
2857 } else {
2858 movq(dst, src);
2859 }
2860 }
2861
2862
Movq(Register dst,XMMRegister src)2863 void MacroAssembler::Movq(Register dst, XMMRegister src) {
2864 if (CpuFeatures::IsSupported(AVX)) {
2865 CpuFeatureScope scope(this, AVX);
2866 vmovq(dst, src);
2867 } else {
2868 movq(dst, src);
2869 }
2870 }
2871
Movmskps(Register dst,XMMRegister src)2872 void MacroAssembler::Movmskps(Register dst, XMMRegister src) {
2873 if (CpuFeatures::IsSupported(AVX)) {
2874 CpuFeatureScope scope(this, AVX);
2875 vmovmskps(dst, src);
2876 } else {
2877 movmskps(dst, src);
2878 }
2879 }
2880
Movmskpd(Register dst,XMMRegister src)2881 void MacroAssembler::Movmskpd(Register dst, XMMRegister src) {
2882 if (CpuFeatures::IsSupported(AVX)) {
2883 CpuFeatureScope scope(this, AVX);
2884 vmovmskpd(dst, src);
2885 } else {
2886 movmskpd(dst, src);
2887 }
2888 }
2889
Xorps(XMMRegister dst,XMMRegister src)2890 void MacroAssembler::Xorps(XMMRegister dst, XMMRegister src) {
2891 if (CpuFeatures::IsSupported(AVX)) {
2892 CpuFeatureScope scope(this, AVX);
2893 vxorps(dst, dst, src);
2894 } else {
2895 xorps(dst, src);
2896 }
2897 }
2898
Xorps(XMMRegister dst,const Operand & src)2899 void MacroAssembler::Xorps(XMMRegister dst, const Operand& src) {
2900 if (CpuFeatures::IsSupported(AVX)) {
2901 CpuFeatureScope scope(this, AVX);
2902 vxorps(dst, dst, src);
2903 } else {
2904 xorps(dst, src);
2905 }
2906 }
2907
Roundss(XMMRegister dst,XMMRegister src,RoundingMode mode)2908 void MacroAssembler::Roundss(XMMRegister dst, XMMRegister src,
2909 RoundingMode mode) {
2910 if (CpuFeatures::IsSupported(AVX)) {
2911 CpuFeatureScope scope(this, AVX);
2912 vroundss(dst, dst, src, mode);
2913 } else {
2914 roundss(dst, src, mode);
2915 }
2916 }
2917
2918
Roundsd(XMMRegister dst,XMMRegister src,RoundingMode mode)2919 void MacroAssembler::Roundsd(XMMRegister dst, XMMRegister src,
2920 RoundingMode mode) {
2921 if (CpuFeatures::IsSupported(AVX)) {
2922 CpuFeatureScope scope(this, AVX);
2923 vroundsd(dst, dst, src, mode);
2924 } else {
2925 roundsd(dst, src, mode);
2926 }
2927 }
2928
2929
Sqrtsd(XMMRegister dst,XMMRegister src)2930 void MacroAssembler::Sqrtsd(XMMRegister dst, XMMRegister src) {
2931 if (CpuFeatures::IsSupported(AVX)) {
2932 CpuFeatureScope scope(this, AVX);
2933 vsqrtsd(dst, dst, src);
2934 } else {
2935 sqrtsd(dst, src);
2936 }
2937 }
2938
2939
Sqrtsd(XMMRegister dst,const Operand & src)2940 void MacroAssembler::Sqrtsd(XMMRegister dst, const Operand& src) {
2941 if (CpuFeatures::IsSupported(AVX)) {
2942 CpuFeatureScope scope(this, AVX);
2943 vsqrtsd(dst, dst, src);
2944 } else {
2945 sqrtsd(dst, src);
2946 }
2947 }
2948
2949
Ucomiss(XMMRegister src1,XMMRegister src2)2950 void MacroAssembler::Ucomiss(XMMRegister src1, XMMRegister src2) {
2951 if (CpuFeatures::IsSupported(AVX)) {
2952 CpuFeatureScope scope(this, AVX);
2953 vucomiss(src1, src2);
2954 } else {
2955 ucomiss(src1, src2);
2956 }
2957 }
2958
2959
Ucomiss(XMMRegister src1,const Operand & src2)2960 void MacroAssembler::Ucomiss(XMMRegister src1, const Operand& src2) {
2961 if (CpuFeatures::IsSupported(AVX)) {
2962 CpuFeatureScope scope(this, AVX);
2963 vucomiss(src1, src2);
2964 } else {
2965 ucomiss(src1, src2);
2966 }
2967 }
2968
2969
Ucomisd(XMMRegister src1,XMMRegister src2)2970 void MacroAssembler::Ucomisd(XMMRegister src1, XMMRegister src2) {
2971 if (CpuFeatures::IsSupported(AVX)) {
2972 CpuFeatureScope scope(this, AVX);
2973 vucomisd(src1, src2);
2974 } else {
2975 ucomisd(src1, src2);
2976 }
2977 }
2978
2979
Ucomisd(XMMRegister src1,const Operand & src2)2980 void MacroAssembler::Ucomisd(XMMRegister src1, const Operand& src2) {
2981 if (CpuFeatures::IsSupported(AVX)) {
2982 CpuFeatureScope scope(this, AVX);
2983 vucomisd(src1, src2);
2984 } else {
2985 ucomisd(src1, src2);
2986 }
2987 }
2988
2989 // ----------------------------------------------------------------------------
2990
Absps(XMMRegister dst)2991 void MacroAssembler::Absps(XMMRegister dst) {
2992 Andps(dst,
2993 ExternalOperand(ExternalReference::address_of_float_abs_constant()));
2994 }
2995
Negps(XMMRegister dst)2996 void MacroAssembler::Negps(XMMRegister dst) {
2997 Xorps(dst,
2998 ExternalOperand(ExternalReference::address_of_float_neg_constant()));
2999 }
3000
Abspd(XMMRegister dst)3001 void MacroAssembler::Abspd(XMMRegister dst) {
3002 Andps(dst,
3003 ExternalOperand(ExternalReference::address_of_double_abs_constant()));
3004 }
3005
Negpd(XMMRegister dst)3006 void MacroAssembler::Negpd(XMMRegister dst) {
3007 Xorps(dst,
3008 ExternalOperand(ExternalReference::address_of_double_neg_constant()));
3009 }
3010
Cmp(Register dst,Handle<Object> source)3011 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
3012 AllowDeferredHandleDereference smi_check;
3013 if (source->IsSmi()) {
3014 Cmp(dst, Smi::cast(*source));
3015 } else {
3016 MoveHeapObject(kScratchRegister, source);
3017 cmpp(dst, kScratchRegister);
3018 }
3019 }
3020
3021
Cmp(const Operand & dst,Handle<Object> source)3022 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
3023 AllowDeferredHandleDereference smi_check;
3024 if (source->IsSmi()) {
3025 Cmp(dst, Smi::cast(*source));
3026 } else {
3027 MoveHeapObject(kScratchRegister, source);
3028 cmpp(dst, kScratchRegister);
3029 }
3030 }
3031
3032
Push(Handle<Object> source)3033 void MacroAssembler::Push(Handle<Object> source) {
3034 AllowDeferredHandleDereference smi_check;
3035 if (source->IsSmi()) {
3036 Push(Smi::cast(*source));
3037 } else {
3038 MoveHeapObject(kScratchRegister, source);
3039 Push(kScratchRegister);
3040 }
3041 }
3042
3043
MoveHeapObject(Register result,Handle<Object> object)3044 void MacroAssembler::MoveHeapObject(Register result,
3045 Handle<Object> object) {
3046 DCHECK(object->IsHeapObject());
3047 Move(result, object, RelocInfo::EMBEDDED_OBJECT);
3048 }
3049
3050
LoadGlobalCell(Register dst,Handle<Cell> cell)3051 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
3052 if (dst.is(rax)) {
3053 AllowDeferredHandleDereference embedding_raw_address;
3054 load_rax(cell.location(), RelocInfo::CELL);
3055 } else {
3056 Move(dst, cell, RelocInfo::CELL);
3057 movp(dst, Operand(dst, 0));
3058 }
3059 }
3060
3061
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch)3062 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
3063 Register scratch) {
3064 Move(scratch, cell, RelocInfo::EMBEDDED_OBJECT);
3065 cmpp(value, FieldOperand(scratch, WeakCell::kValueOffset));
3066 }
3067
3068
GetWeakValue(Register value,Handle<WeakCell> cell)3069 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3070 Move(value, cell, RelocInfo::EMBEDDED_OBJECT);
3071 movp(value, FieldOperand(value, WeakCell::kValueOffset));
3072 }
3073
3074
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)3075 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3076 Label* miss) {
3077 GetWeakValue(value, cell);
3078 JumpIfSmi(value, miss);
3079 }
3080
3081
Drop(int stack_elements)3082 void MacroAssembler::Drop(int stack_elements) {
3083 if (stack_elements > 0) {
3084 addp(rsp, Immediate(stack_elements * kPointerSize));
3085 }
3086 }
3087
3088
DropUnderReturnAddress(int stack_elements,Register scratch)3089 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
3090 Register scratch) {
3091 DCHECK(stack_elements > 0);
3092 if (kPointerSize == kInt64Size && stack_elements == 1) {
3093 popq(MemOperand(rsp, 0));
3094 return;
3095 }
3096
3097 PopReturnAddressTo(scratch);
3098 Drop(stack_elements);
3099 PushReturnAddressFrom(scratch);
3100 }
3101
3102
Push(Register src)3103 void MacroAssembler::Push(Register src) {
3104 if (kPointerSize == kInt64Size) {
3105 pushq(src);
3106 } else {
3107 // x32 uses 64-bit push for rbp in the prologue.
3108 DCHECK(src.code() != rbp.code());
3109 leal(rsp, Operand(rsp, -4));
3110 movp(Operand(rsp, 0), src);
3111 }
3112 }
3113
3114
Push(const Operand & src)3115 void MacroAssembler::Push(const Operand& src) {
3116 if (kPointerSize == kInt64Size) {
3117 pushq(src);
3118 } else {
3119 movp(kScratchRegister, src);
3120 leal(rsp, Operand(rsp, -4));
3121 movp(Operand(rsp, 0), kScratchRegister);
3122 }
3123 }
3124
3125
PushQuad(const Operand & src)3126 void MacroAssembler::PushQuad(const Operand& src) {
3127 if (kPointerSize == kInt64Size) {
3128 pushq(src);
3129 } else {
3130 movp(kScratchRegister, src);
3131 pushq(kScratchRegister);
3132 }
3133 }
3134
3135
Push(Immediate value)3136 void MacroAssembler::Push(Immediate value) {
3137 if (kPointerSize == kInt64Size) {
3138 pushq(value);
3139 } else {
3140 leal(rsp, Operand(rsp, -4));
3141 movp(Operand(rsp, 0), value);
3142 }
3143 }
3144
3145
PushImm32(int32_t imm32)3146 void MacroAssembler::PushImm32(int32_t imm32) {
3147 if (kPointerSize == kInt64Size) {
3148 pushq_imm32(imm32);
3149 } else {
3150 leal(rsp, Operand(rsp, -4));
3151 movp(Operand(rsp, 0), Immediate(imm32));
3152 }
3153 }
3154
3155
Pop(Register dst)3156 void MacroAssembler::Pop(Register dst) {
3157 if (kPointerSize == kInt64Size) {
3158 popq(dst);
3159 } else {
3160 // x32 uses 64-bit pop for rbp in the epilogue.
3161 DCHECK(dst.code() != rbp.code());
3162 movp(dst, Operand(rsp, 0));
3163 leal(rsp, Operand(rsp, 4));
3164 }
3165 }
3166
3167
Pop(const Operand & dst)3168 void MacroAssembler::Pop(const Operand& dst) {
3169 if (kPointerSize == kInt64Size) {
3170 popq(dst);
3171 } else {
3172 Register scratch = dst.AddressUsesRegister(kScratchRegister)
3173 ? kRootRegister : kScratchRegister;
3174 movp(scratch, Operand(rsp, 0));
3175 movp(dst, scratch);
3176 leal(rsp, Operand(rsp, 4));
3177 if (scratch.is(kRootRegister)) {
3178 // Restore kRootRegister.
3179 InitializeRootRegister();
3180 }
3181 }
3182 }
3183
3184
PopQuad(const Operand & dst)3185 void MacroAssembler::PopQuad(const Operand& dst) {
3186 if (kPointerSize == kInt64Size) {
3187 popq(dst);
3188 } else {
3189 popq(kScratchRegister);
3190 movp(dst, kScratchRegister);
3191 }
3192 }
3193
3194
LoadSharedFunctionInfoSpecialField(Register dst,Register base,int offset)3195 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
3196 Register base,
3197 int offset) {
3198 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
3199 offset <= SharedFunctionInfo::kSize &&
3200 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
3201 if (kPointerSize == kInt64Size) {
3202 movsxlq(dst, FieldOperand(base, offset));
3203 } else {
3204 movp(dst, FieldOperand(base, offset));
3205 SmiToInteger32(dst, dst);
3206 }
3207 }
3208
3209
TestBitSharedFunctionInfoSpecialField(Register base,int offset,int bits)3210 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
3211 int offset,
3212 int bits) {
3213 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
3214 offset <= SharedFunctionInfo::kSize &&
3215 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
3216 if (kPointerSize == kInt32Size) {
3217 // On x32, this field is represented by SMI.
3218 bits += kSmiShift;
3219 }
3220 int byte_offset = bits / kBitsPerByte;
3221 int bit_in_byte = bits & (kBitsPerByte - 1);
3222 testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
3223 }
3224
3225
Jump(ExternalReference ext)3226 void MacroAssembler::Jump(ExternalReference ext) {
3227 LoadAddress(kScratchRegister, ext);
3228 jmp(kScratchRegister);
3229 }
3230
3231
Jump(const Operand & op)3232 void MacroAssembler::Jump(const Operand& op) {
3233 if (kPointerSize == kInt64Size) {
3234 jmp(op);
3235 } else {
3236 movp(kScratchRegister, op);
3237 jmp(kScratchRegister);
3238 }
3239 }
3240
3241
Jump(Address destination,RelocInfo::Mode rmode)3242 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
3243 Move(kScratchRegister, destination, rmode);
3244 jmp(kScratchRegister);
3245 }
3246
3247
Jump(Handle<Code> code_object,RelocInfo::Mode rmode)3248 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
3249 // TODO(X64): Inline this
3250 jmp(code_object, rmode);
3251 }
3252
3253
CallSize(ExternalReference ext)3254 int MacroAssembler::CallSize(ExternalReference ext) {
3255 // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
3256 return LoadAddressSize(ext) +
3257 Assembler::kCallScratchRegisterInstructionLength;
3258 }
3259
3260
Call(ExternalReference ext)3261 void MacroAssembler::Call(ExternalReference ext) {
3262 #ifdef DEBUG
3263 int end_position = pc_offset() + CallSize(ext);
3264 #endif
3265 LoadAddress(kScratchRegister, ext);
3266 call(kScratchRegister);
3267 #ifdef DEBUG
3268 CHECK_EQ(end_position, pc_offset());
3269 #endif
3270 }
3271
3272
Call(const Operand & op)3273 void MacroAssembler::Call(const Operand& op) {
3274 if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
3275 call(op);
3276 } else {
3277 movp(kScratchRegister, op);
3278 call(kScratchRegister);
3279 }
3280 }
3281
3282
Call(Address destination,RelocInfo::Mode rmode)3283 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
3284 #ifdef DEBUG
3285 int end_position = pc_offset() + CallSize(destination);
3286 #endif
3287 Move(kScratchRegister, destination, rmode);
3288 call(kScratchRegister);
3289 #ifdef DEBUG
3290 CHECK_EQ(pc_offset(), end_position);
3291 #endif
3292 }
3293
3294
Call(Handle<Code> code_object,RelocInfo::Mode rmode,TypeFeedbackId ast_id)3295 void MacroAssembler::Call(Handle<Code> code_object,
3296 RelocInfo::Mode rmode,
3297 TypeFeedbackId ast_id) {
3298 #ifdef DEBUG
3299 int end_position = pc_offset() + CallSize(code_object);
3300 #endif
3301 DCHECK(RelocInfo::IsCodeTarget(rmode) ||
3302 rmode == RelocInfo::CODE_AGE_SEQUENCE);
3303 call(code_object, rmode, ast_id);
3304 #ifdef DEBUG
3305 CHECK_EQ(end_position, pc_offset());
3306 #endif
3307 }
3308
3309
Pextrd(Register dst,XMMRegister src,int8_t imm8)3310 void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
3311 if (imm8 == 0) {
3312 Movd(dst, src);
3313 return;
3314 }
3315 if (CpuFeatures::IsSupported(SSE4_1)) {
3316 CpuFeatureScope sse_scope(this, SSE4_1);
3317 pextrd(dst, src, imm8);
3318 return;
3319 }
3320 DCHECK_EQ(1, imm8);
3321 movq(dst, src);
3322 shrq(dst, Immediate(32));
3323 }
3324
3325
Pinsrd(XMMRegister dst,Register src,int8_t imm8)3326 void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
3327 if (CpuFeatures::IsSupported(SSE4_1)) {
3328 CpuFeatureScope sse_scope(this, SSE4_1);
3329 pinsrd(dst, src, imm8);
3330 return;
3331 }
3332 Movd(kScratchDoubleReg, src);
3333 if (imm8 == 1) {
3334 punpckldq(dst, kScratchDoubleReg);
3335 } else {
3336 DCHECK_EQ(0, imm8);
3337 Movss(dst, kScratchDoubleReg);
3338 }
3339 }
3340
3341
Pinsrd(XMMRegister dst,const Operand & src,int8_t imm8)3342 void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
3343 DCHECK(imm8 == 0 || imm8 == 1);
3344 if (CpuFeatures::IsSupported(SSE4_1)) {
3345 CpuFeatureScope sse_scope(this, SSE4_1);
3346 pinsrd(dst, src, imm8);
3347 return;
3348 }
3349 Movd(kScratchDoubleReg, src);
3350 if (imm8 == 1) {
3351 punpckldq(dst, kScratchDoubleReg);
3352 } else {
3353 DCHECK_EQ(0, imm8);
3354 Movss(dst, kScratchDoubleReg);
3355 }
3356 }
3357
3358
Lzcntl(Register dst,Register src)3359 void MacroAssembler::Lzcntl(Register dst, Register src) {
3360 if (CpuFeatures::IsSupported(LZCNT)) {
3361 CpuFeatureScope scope(this, LZCNT);
3362 lzcntl(dst, src);
3363 return;
3364 }
3365 Label not_zero_src;
3366 bsrl(dst, src);
3367 j(not_zero, ¬_zero_src, Label::kNear);
3368 Set(dst, 63); // 63^31 == 32
3369 bind(¬_zero_src);
3370 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
3371 }
3372
3373
Lzcntl(Register dst,const Operand & src)3374 void MacroAssembler::Lzcntl(Register dst, const Operand& src) {
3375 if (CpuFeatures::IsSupported(LZCNT)) {
3376 CpuFeatureScope scope(this, LZCNT);
3377 lzcntl(dst, src);
3378 return;
3379 }
3380 Label not_zero_src;
3381 bsrl(dst, src);
3382 j(not_zero, ¬_zero_src, Label::kNear);
3383 Set(dst, 63); // 63^31 == 32
3384 bind(¬_zero_src);
3385 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
3386 }
3387
3388
Lzcntq(Register dst,Register src)3389 void MacroAssembler::Lzcntq(Register dst, Register src) {
3390 if (CpuFeatures::IsSupported(LZCNT)) {
3391 CpuFeatureScope scope(this, LZCNT);
3392 lzcntq(dst, src);
3393 return;
3394 }
3395 Label not_zero_src;
3396 bsrq(dst, src);
3397 j(not_zero, ¬_zero_src, Label::kNear);
3398 Set(dst, 127); // 127^63 == 64
3399 bind(¬_zero_src);
3400 xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
3401 }
3402
3403
Lzcntq(Register dst,const Operand & src)3404 void MacroAssembler::Lzcntq(Register dst, const Operand& src) {
3405 if (CpuFeatures::IsSupported(LZCNT)) {
3406 CpuFeatureScope scope(this, LZCNT);
3407 lzcntq(dst, src);
3408 return;
3409 }
3410 Label not_zero_src;
3411 bsrq(dst, src);
3412 j(not_zero, ¬_zero_src, Label::kNear);
3413 Set(dst, 127); // 127^63 == 64
3414 bind(¬_zero_src);
3415 xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
3416 }
3417
3418
Tzcntq(Register dst,Register src)3419 void MacroAssembler::Tzcntq(Register dst, Register src) {
3420 if (CpuFeatures::IsSupported(BMI1)) {
3421 CpuFeatureScope scope(this, BMI1);
3422 tzcntq(dst, src);
3423 return;
3424 }
3425 Label not_zero_src;
3426 bsfq(dst, src);
3427 j(not_zero, ¬_zero_src, Label::kNear);
3428 // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
3429 Set(dst, 64);
3430 bind(¬_zero_src);
3431 }
3432
3433
Tzcntq(Register dst,const Operand & src)3434 void MacroAssembler::Tzcntq(Register dst, const Operand& src) {
3435 if (CpuFeatures::IsSupported(BMI1)) {
3436 CpuFeatureScope scope(this, BMI1);
3437 tzcntq(dst, src);
3438 return;
3439 }
3440 Label not_zero_src;
3441 bsfq(dst, src);
3442 j(not_zero, ¬_zero_src, Label::kNear);
3443 // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
3444 Set(dst, 64);
3445 bind(¬_zero_src);
3446 }
3447
3448
Tzcntl(Register dst,Register src)3449 void MacroAssembler::Tzcntl(Register dst, Register src) {
3450 if (CpuFeatures::IsSupported(BMI1)) {
3451 CpuFeatureScope scope(this, BMI1);
3452 tzcntl(dst, src);
3453 return;
3454 }
3455 Label not_zero_src;
3456 bsfl(dst, src);
3457 j(not_zero, ¬_zero_src, Label::kNear);
3458 Set(dst, 32); // The result of tzcnt is 32 if src = 0.
3459 bind(¬_zero_src);
3460 }
3461
3462
Tzcntl(Register dst,const Operand & src)3463 void MacroAssembler::Tzcntl(Register dst, const Operand& src) {
3464 if (CpuFeatures::IsSupported(BMI1)) {
3465 CpuFeatureScope scope(this, BMI1);
3466 tzcntl(dst, src);
3467 return;
3468 }
3469 Label not_zero_src;
3470 bsfl(dst, src);
3471 j(not_zero, ¬_zero_src, Label::kNear);
3472 Set(dst, 32); // The result of tzcnt is 32 if src = 0.
3473 bind(¬_zero_src);
3474 }
3475
3476
Popcntl(Register dst,Register src)3477 void MacroAssembler::Popcntl(Register dst, Register src) {
3478 if (CpuFeatures::IsSupported(POPCNT)) {
3479 CpuFeatureScope scope(this, POPCNT);
3480 popcntl(dst, src);
3481 return;
3482 }
3483 UNREACHABLE();
3484 }
3485
3486
Popcntl(Register dst,const Operand & src)3487 void MacroAssembler::Popcntl(Register dst, const Operand& src) {
3488 if (CpuFeatures::IsSupported(POPCNT)) {
3489 CpuFeatureScope scope(this, POPCNT);
3490 popcntl(dst, src);
3491 return;
3492 }
3493 UNREACHABLE();
3494 }
3495
3496
Popcntq(Register dst,Register src)3497 void MacroAssembler::Popcntq(Register dst, Register src) {
3498 if (CpuFeatures::IsSupported(POPCNT)) {
3499 CpuFeatureScope scope(this, POPCNT);
3500 popcntq(dst, src);
3501 return;
3502 }
3503 UNREACHABLE();
3504 }
3505
3506
Popcntq(Register dst,const Operand & src)3507 void MacroAssembler::Popcntq(Register dst, const Operand& src) {
3508 if (CpuFeatures::IsSupported(POPCNT)) {
3509 CpuFeatureScope scope(this, POPCNT);
3510 popcntq(dst, src);
3511 return;
3512 }
3513 UNREACHABLE();
3514 }
3515
3516
Pushad()3517 void MacroAssembler::Pushad() {
3518 Push(rax);
3519 Push(rcx);
3520 Push(rdx);
3521 Push(rbx);
3522 // Not pushing rsp or rbp.
3523 Push(rsi);
3524 Push(rdi);
3525 Push(r8);
3526 Push(r9);
3527 // r10 is kScratchRegister.
3528 Push(r11);
3529 Push(r12);
3530 // r13 is kRootRegister.
3531 Push(r14);
3532 Push(r15);
3533 STATIC_ASSERT(12 == kNumSafepointSavedRegisters);
3534 // Use lea for symmetry with Popad.
3535 int sp_delta =
3536 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3537 leap(rsp, Operand(rsp, -sp_delta));
3538 }
3539
3540
Popad()3541 void MacroAssembler::Popad() {
3542 // Popad must not change the flags, so use lea instead of addq.
3543 int sp_delta =
3544 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3545 leap(rsp, Operand(rsp, sp_delta));
3546 Pop(r15);
3547 Pop(r14);
3548 Pop(r12);
3549 Pop(r11);
3550 Pop(r9);
3551 Pop(r8);
3552 Pop(rdi);
3553 Pop(rsi);
3554 Pop(rbx);
3555 Pop(rdx);
3556 Pop(rcx);
3557 Pop(rax);
3558 }
3559
3560
Dropad()3561 void MacroAssembler::Dropad() {
3562 addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
3563 }
3564
3565
3566 // Order general registers are pushed by Pushad:
3567 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
3568 const int
3569 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
3570 0,
3571 1,
3572 2,
3573 3,
3574 -1,
3575 -1,
3576 4,
3577 5,
3578 6,
3579 7,
3580 -1,
3581 8,
3582 9,
3583 -1,
3584 10,
3585 11
3586 };
3587
3588
StoreToSafepointRegisterSlot(Register dst,const Immediate & imm)3589 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
3590 const Immediate& imm) {
3591 movp(SafepointRegisterSlot(dst), imm);
3592 }
3593
3594
StoreToSafepointRegisterSlot(Register dst,Register src)3595 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
3596 movp(SafepointRegisterSlot(dst), src);
3597 }
3598
3599
LoadFromSafepointRegisterSlot(Register dst,Register src)3600 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
3601 movp(dst, SafepointRegisterSlot(src));
3602 }
3603
3604
SafepointRegisterSlot(Register reg)3605 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
3606 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3607 }
3608
3609
PushStackHandler()3610 void MacroAssembler::PushStackHandler() {
3611 // Adjust this code if not the case.
3612 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3613 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3614
3615 // Link the current handler as the next handler.
3616 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3617 Push(ExternalOperand(handler_address));
3618
3619 // Set this new handler as the current one.
3620 movp(ExternalOperand(handler_address), rsp);
3621 }
3622
3623
PopStackHandler()3624 void MacroAssembler::PopStackHandler() {
3625 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3626 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3627 Pop(ExternalOperand(handler_address));
3628 addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3629 }
3630
3631
Ret()3632 void MacroAssembler::Ret() {
3633 ret(0);
3634 }
3635
3636
Ret(int bytes_dropped,Register scratch)3637 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3638 if (is_uint16(bytes_dropped)) {
3639 ret(bytes_dropped);
3640 } else {
3641 PopReturnAddressTo(scratch);
3642 addp(rsp, Immediate(bytes_dropped));
3643 PushReturnAddressFrom(scratch);
3644 ret(0);
3645 }
3646 }
3647
3648
FCmp()3649 void MacroAssembler::FCmp() {
3650 fucomip();
3651 fstp(0);
3652 }
3653
3654
CmpObjectType(Register heap_object,InstanceType type,Register map)3655 void MacroAssembler::CmpObjectType(Register heap_object,
3656 InstanceType type,
3657 Register map) {
3658 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3659 CmpInstanceType(map, type);
3660 }
3661
3662
CmpInstanceType(Register map,InstanceType type)3663 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3664 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3665 Immediate(static_cast<int8_t>(type)));
3666 }
3667
CompareMap(Register obj,Handle<Map> map)3668 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3669 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3670 }
3671
3672
CheckMap(Register obj,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)3673 void MacroAssembler::CheckMap(Register obj,
3674 Handle<Map> map,
3675 Label* fail,
3676 SmiCheckType smi_check_type) {
3677 if (smi_check_type == DO_SMI_CHECK) {
3678 JumpIfSmi(obj, fail);
3679 }
3680
3681 CompareMap(obj, map);
3682 j(not_equal, fail);
3683 }
3684
3685
ClampUint8(Register reg)3686 void MacroAssembler::ClampUint8(Register reg) {
3687 Label done;
3688 testl(reg, Immediate(0xFFFFFF00));
3689 j(zero, &done, Label::kNear);
3690 setcc(negative, reg); // 1 if negative, 0 if positive.
3691 decb(reg); // 0 if negative, 255 if positive.
3692 bind(&done);
3693 }
3694
3695
ClampDoubleToUint8(XMMRegister input_reg,XMMRegister temp_xmm_reg,Register result_reg)3696 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3697 XMMRegister temp_xmm_reg,
3698 Register result_reg) {
3699 Label done;
3700 Label conv_failure;
3701 Xorpd(temp_xmm_reg, temp_xmm_reg);
3702 Cvtsd2si(result_reg, input_reg);
3703 testl(result_reg, Immediate(0xFFFFFF00));
3704 j(zero, &done, Label::kNear);
3705 cmpl(result_reg, Immediate(1));
3706 j(overflow, &conv_failure, Label::kNear);
3707 movl(result_reg, Immediate(0));
3708 setcc(sign, result_reg);
3709 subl(result_reg, Immediate(1));
3710 andl(result_reg, Immediate(255));
3711 jmp(&done, Label::kNear);
3712 bind(&conv_failure);
3713 Set(result_reg, 0);
3714 Ucomisd(input_reg, temp_xmm_reg);
3715 j(below, &done, Label::kNear);
3716 Set(result_reg, 255);
3717 bind(&done);
3718 }
3719
3720
LoadUint32(XMMRegister dst,Register src)3721 void MacroAssembler::LoadUint32(XMMRegister dst,
3722 Register src) {
3723 if (FLAG_debug_code) {
3724 cmpq(src, Immediate(0xffffffff));
3725 Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3726 }
3727 Cvtqsi2sd(dst, src);
3728 }
3729
3730
SlowTruncateToI(Register result_reg,Register input_reg,int offset)3731 void MacroAssembler::SlowTruncateToI(Register result_reg,
3732 Register input_reg,
3733 int offset) {
3734 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3735 call(stub.GetCode(), RelocInfo::CODE_TARGET);
3736 }
3737
3738
TruncateHeapNumberToI(Register result_reg,Register input_reg)3739 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3740 Register input_reg) {
3741 Label done;
3742 Movsd(kScratchDoubleReg, FieldOperand(input_reg, HeapNumber::kValueOffset));
3743 Cvttsd2siq(result_reg, kScratchDoubleReg);
3744 cmpq(result_reg, Immediate(1));
3745 j(no_overflow, &done, Label::kNear);
3746
3747 // Slow case.
3748 if (input_reg.is(result_reg)) {
3749 subp(rsp, Immediate(kDoubleSize));
3750 Movsd(MemOperand(rsp, 0), kScratchDoubleReg);
3751 SlowTruncateToI(result_reg, rsp, 0);
3752 addp(rsp, Immediate(kDoubleSize));
3753 } else {
3754 SlowTruncateToI(result_reg, input_reg);
3755 }
3756
3757 bind(&done);
3758 // Keep our invariant that the upper 32 bits are zero.
3759 movl(result_reg, result_reg);
3760 }
3761
3762
TruncateDoubleToI(Register result_reg,XMMRegister input_reg)3763 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3764 XMMRegister input_reg) {
3765 Label done;
3766 Cvttsd2siq(result_reg, input_reg);
3767 cmpq(result_reg, Immediate(1));
3768 j(no_overflow, &done, Label::kNear);
3769
3770 subp(rsp, Immediate(kDoubleSize));
3771 Movsd(MemOperand(rsp, 0), input_reg);
3772 SlowTruncateToI(result_reg, rsp, 0);
3773 addp(rsp, Immediate(kDoubleSize));
3774
3775 bind(&done);
3776 // Keep our invariant that the upper 32 bits are zero.
3777 movl(result_reg, result_reg);
3778 }
3779
3780
DoubleToI(Register result_reg,XMMRegister input_reg,XMMRegister scratch,MinusZeroMode minus_zero_mode,Label * lost_precision,Label * is_nan,Label * minus_zero,Label::Distance dst)3781 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
3782 XMMRegister scratch,
3783 MinusZeroMode minus_zero_mode,
3784 Label* lost_precision, Label* is_nan,
3785 Label* minus_zero, Label::Distance dst) {
3786 Cvttsd2si(result_reg, input_reg);
3787 Cvtlsi2sd(kScratchDoubleReg, result_reg);
3788 Ucomisd(kScratchDoubleReg, input_reg);
3789 j(not_equal, lost_precision, dst);
3790 j(parity_even, is_nan, dst); // NaN.
3791 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3792 Label done;
3793 // The integer converted back is equal to the original. We
3794 // only have to test if we got -0 as an input.
3795 testl(result_reg, result_reg);
3796 j(not_zero, &done, Label::kNear);
3797 Movmskpd(result_reg, input_reg);
3798 // Bit 0 contains the sign of the double in input_reg.
3799 // If input was positive, we are ok and return 0, otherwise
3800 // jump to minus_zero.
3801 andl(result_reg, Immediate(1));
3802 j(not_zero, minus_zero, dst);
3803 bind(&done);
3804 }
3805 }
3806
3807
LoadInstanceDescriptors(Register map,Register descriptors)3808 void MacroAssembler::LoadInstanceDescriptors(Register map,
3809 Register descriptors) {
3810 movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3811 }
3812
3813
NumberOfOwnDescriptors(Register dst,Register map)3814 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3815 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3816 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3817 }
3818
3819
EnumLength(Register dst,Register map)3820 void MacroAssembler::EnumLength(Register dst, Register map) {
3821 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3822 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3823 andl(dst, Immediate(Map::EnumLengthBits::kMask));
3824 Integer32ToSmi(dst, dst);
3825 }
3826
3827
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)3828 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3829 int accessor_index,
3830 AccessorComponent accessor) {
3831 movp(dst, FieldOperand(holder, HeapObject::kMapOffset));
3832 LoadInstanceDescriptors(dst, dst);
3833 movp(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3834 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
3835 : AccessorPair::kSetterOffset;
3836 movp(dst, FieldOperand(dst, offset));
3837 }
3838
3839
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)3840 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3841 Register scratch2, Handle<WeakCell> cell,
3842 Handle<Code> success,
3843 SmiCheckType smi_check_type) {
3844 Label fail;
3845 if (smi_check_type == DO_SMI_CHECK) {
3846 JumpIfSmi(obj, &fail);
3847 }
3848 movq(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
3849 CmpWeakValue(scratch1, cell, scratch2);
3850 j(equal, success, RelocInfo::CODE_TARGET);
3851 bind(&fail);
3852 }
3853
3854
AssertNumber(Register object)3855 void MacroAssembler::AssertNumber(Register object) {
3856 if (emit_debug_code()) {
3857 Label ok;
3858 Condition is_smi = CheckSmi(object);
3859 j(is_smi, &ok, Label::kNear);
3860 Cmp(FieldOperand(object, HeapObject::kMapOffset),
3861 isolate()->factory()->heap_number_map());
3862 Check(equal, kOperandIsNotANumber);
3863 bind(&ok);
3864 }
3865 }
3866
AssertNotNumber(Register object)3867 void MacroAssembler::AssertNotNumber(Register object) {
3868 if (emit_debug_code()) {
3869 Condition is_smi = CheckSmi(object);
3870 Check(NegateCondition(is_smi), kOperandIsANumber);
3871 Cmp(FieldOperand(object, HeapObject::kMapOffset),
3872 isolate()->factory()->heap_number_map());
3873 Check(not_equal, kOperandIsANumber);
3874 }
3875 }
3876
AssertNotSmi(Register object)3877 void MacroAssembler::AssertNotSmi(Register object) {
3878 if (emit_debug_code()) {
3879 Condition is_smi = CheckSmi(object);
3880 Check(NegateCondition(is_smi), kOperandIsASmi);
3881 }
3882 }
3883
3884
AssertSmi(Register object)3885 void MacroAssembler::AssertSmi(Register object) {
3886 if (emit_debug_code()) {
3887 Condition is_smi = CheckSmi(object);
3888 Check(is_smi, kOperandIsNotASmi);
3889 }
3890 }
3891
3892
AssertSmi(const Operand & object)3893 void MacroAssembler::AssertSmi(const Operand& object) {
3894 if (emit_debug_code()) {
3895 Condition is_smi = CheckSmi(object);
3896 Check(is_smi, kOperandIsNotASmi);
3897 }
3898 }
3899
3900
AssertZeroExtended(Register int32_register)3901 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3902 if (emit_debug_code()) {
3903 DCHECK(!int32_register.is(kScratchRegister));
3904 movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3905 cmpq(kScratchRegister, int32_register);
3906 Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3907 }
3908 }
3909
3910
AssertString(Register object)3911 void MacroAssembler::AssertString(Register object) {
3912 if (emit_debug_code()) {
3913 testb(object, Immediate(kSmiTagMask));
3914 Check(not_equal, kOperandIsASmiAndNotAString);
3915 Push(object);
3916 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3917 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3918 Pop(object);
3919 Check(below, kOperandIsNotAString);
3920 }
3921 }
3922
3923
AssertName(Register object)3924 void MacroAssembler::AssertName(Register object) {
3925 if (emit_debug_code()) {
3926 testb(object, Immediate(kSmiTagMask));
3927 Check(not_equal, kOperandIsASmiAndNotAName);
3928 Push(object);
3929 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3930 CmpInstanceType(object, LAST_NAME_TYPE);
3931 Pop(object);
3932 Check(below_equal, kOperandIsNotAName);
3933 }
3934 }
3935
3936
AssertFunction(Register object)3937 void MacroAssembler::AssertFunction(Register object) {
3938 if (emit_debug_code()) {
3939 testb(object, Immediate(kSmiTagMask));
3940 Check(not_equal, kOperandIsASmiAndNotAFunction);
3941 Push(object);
3942 CmpObjectType(object, JS_FUNCTION_TYPE, object);
3943 Pop(object);
3944 Check(equal, kOperandIsNotAFunction);
3945 }
3946 }
3947
3948
AssertBoundFunction(Register object)3949 void MacroAssembler::AssertBoundFunction(Register object) {
3950 if (emit_debug_code()) {
3951 testb(object, Immediate(kSmiTagMask));
3952 Check(not_equal, kOperandIsASmiAndNotABoundFunction);
3953 Push(object);
3954 CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
3955 Pop(object);
3956 Check(equal, kOperandIsNotABoundFunction);
3957 }
3958 }
3959
AssertGeneratorObject(Register object)3960 void MacroAssembler::AssertGeneratorObject(Register object) {
3961 if (emit_debug_code()) {
3962 testb(object, Immediate(kSmiTagMask));
3963 Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
3964 Push(object);
3965 CmpObjectType(object, JS_GENERATOR_OBJECT_TYPE, object);
3966 Pop(object);
3967 Check(equal, kOperandIsNotAGeneratorObject);
3968 }
3969 }
3970
AssertReceiver(Register object)3971 void MacroAssembler::AssertReceiver(Register object) {
3972 if (emit_debug_code()) {
3973 testb(object, Immediate(kSmiTagMask));
3974 Check(not_equal, kOperandIsASmiAndNotAReceiver);
3975 Push(object);
3976 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
3977 CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, object);
3978 Pop(object);
3979 Check(above_equal, kOperandIsNotAReceiver);
3980 }
3981 }
3982
3983
AssertUndefinedOrAllocationSite(Register object)3984 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3985 if (emit_debug_code()) {
3986 Label done_checking;
3987 AssertNotSmi(object);
3988 Cmp(object, isolate()->factory()->undefined_value());
3989 j(equal, &done_checking);
3990 Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3991 Assert(equal, kExpectedUndefinedOrCell);
3992 bind(&done_checking);
3993 }
3994 }
3995
3996
AssertRootValue(Register src,Heap::RootListIndex root_value_index,BailoutReason reason)3997 void MacroAssembler::AssertRootValue(Register src,
3998 Heap::RootListIndex root_value_index,
3999 BailoutReason reason) {
4000 if (emit_debug_code()) {
4001 DCHECK(!src.is(kScratchRegister));
4002 LoadRoot(kScratchRegister, root_value_index);
4003 cmpp(src, kScratchRegister);
4004 Check(equal, reason);
4005 }
4006 }
4007
4008
4009
IsObjectStringType(Register heap_object,Register map,Register instance_type)4010 Condition MacroAssembler::IsObjectStringType(Register heap_object,
4011 Register map,
4012 Register instance_type) {
4013 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
4014 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4015 STATIC_ASSERT(kNotStringTag != 0);
4016 testb(instance_type, Immediate(kIsNotStringMask));
4017 return zero;
4018 }
4019
4020
IsObjectNameType(Register heap_object,Register map,Register instance_type)4021 Condition MacroAssembler::IsObjectNameType(Register heap_object,
4022 Register map,
4023 Register instance_type) {
4024 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
4025 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4026 cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
4027 return below_equal;
4028 }
4029
4030
GetMapConstructor(Register result,Register map,Register temp)4031 void MacroAssembler::GetMapConstructor(Register result, Register map,
4032 Register temp) {
4033 Label done, loop;
4034 movp(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
4035 bind(&loop);
4036 JumpIfSmi(result, &done, Label::kNear);
4037 CmpObjectType(result, MAP_TYPE, temp);
4038 j(not_equal, &done, Label::kNear);
4039 movp(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
4040 jmp(&loop);
4041 bind(&done);
4042 }
4043
SetCounter(StatsCounter * counter,int value)4044 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
4045 if (FLAG_native_code_counters && counter->Enabled()) {
4046 Operand counter_operand = ExternalOperand(ExternalReference(counter));
4047 movl(counter_operand, Immediate(value));
4048 }
4049 }
4050
4051
IncrementCounter(StatsCounter * counter,int value)4052 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
4053 DCHECK(value > 0);
4054 if (FLAG_native_code_counters && counter->Enabled()) {
4055 Operand counter_operand = ExternalOperand(ExternalReference(counter));
4056 if (value == 1) {
4057 incl(counter_operand);
4058 } else {
4059 addl(counter_operand, Immediate(value));
4060 }
4061 }
4062 }
4063
4064
DecrementCounter(StatsCounter * counter,int value)4065 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
4066 DCHECK(value > 0);
4067 if (FLAG_native_code_counters && counter->Enabled()) {
4068 Operand counter_operand = ExternalOperand(ExternalReference(counter));
4069 if (value == 1) {
4070 decl(counter_operand);
4071 } else {
4072 subl(counter_operand, Immediate(value));
4073 }
4074 }
4075 }
4076
MaybeDropFrames()4077 void MacroAssembler::MaybeDropFrames() {
4078 // Check whether we need to drop frames to restart a function on the stack.
4079 ExternalReference restart_fp =
4080 ExternalReference::debug_restart_fp_address(isolate());
4081 Load(rbx, restart_fp);
4082 testp(rbx, rbx);
4083 j(not_zero, isolate()->builtins()->FrameDropperTrampoline(),
4084 RelocInfo::CODE_TARGET);
4085 }
4086
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1,ReturnAddressState ra_state)4087 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
4088 Register caller_args_count_reg,
4089 Register scratch0, Register scratch1,
4090 ReturnAddressState ra_state) {
4091 #if DEBUG
4092 if (callee_args_count.is_reg()) {
4093 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
4094 scratch1));
4095 } else {
4096 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
4097 }
4098 #endif
4099
4100 // Calculate the destination address where we will put the return address
4101 // after we drop current frame.
4102 Register new_sp_reg = scratch0;
4103 if (callee_args_count.is_reg()) {
4104 subp(caller_args_count_reg, callee_args_count.reg());
4105 leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
4106 StandardFrameConstants::kCallerPCOffset));
4107 } else {
4108 leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
4109 StandardFrameConstants::kCallerPCOffset -
4110 callee_args_count.immediate() * kPointerSize));
4111 }
4112
4113 if (FLAG_debug_code) {
4114 cmpp(rsp, new_sp_reg);
4115 Check(below, kStackAccessBelowStackPointer);
4116 }
4117
4118 // Copy return address from caller's frame to current frame's return address
4119 // to avoid its trashing and let the following loop copy it to the right
4120 // place.
4121 Register tmp_reg = scratch1;
4122 if (ra_state == ReturnAddressState::kOnStack) {
4123 movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
4124 movp(Operand(rsp, 0), tmp_reg);
4125 } else {
4126 DCHECK(ReturnAddressState::kNotOnStack == ra_state);
4127 Push(Operand(rbp, StandardFrameConstants::kCallerPCOffset));
4128 }
4129
4130 // Restore caller's frame pointer now as it could be overwritten by
4131 // the copying loop.
4132 movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
4133
4134 // +2 here is to copy both receiver and return address.
4135 Register count_reg = caller_args_count_reg;
4136 if (callee_args_count.is_reg()) {
4137 leap(count_reg, Operand(callee_args_count.reg(), 2));
4138 } else {
4139 movp(count_reg, Immediate(callee_args_count.immediate() + 2));
4140 // TODO(ishell): Unroll copying loop for small immediate values.
4141 }
4142
4143 // Now copy callee arguments to the caller frame going backwards to avoid
4144 // callee arguments corruption (source and destination areas could overlap).
4145 Label loop, entry;
4146 jmp(&entry, Label::kNear);
4147 bind(&loop);
4148 decp(count_reg);
4149 movp(tmp_reg, Operand(rsp, count_reg, times_pointer_size, 0));
4150 movp(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
4151 bind(&entry);
4152 cmpp(count_reg, Immediate(0));
4153 j(not_equal, &loop, Label::kNear);
4154
4155 // Leave current frame.
4156 movp(rsp, new_sp_reg);
4157 }
4158
InvokeFunction(Register function,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4159 void MacroAssembler::InvokeFunction(Register function,
4160 Register new_target,
4161 const ParameterCount& actual,
4162 InvokeFlag flag,
4163 const CallWrapper& call_wrapper) {
4164 movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
4165 LoadSharedFunctionInfoSpecialField(
4166 rbx, rbx, SharedFunctionInfo::kFormalParameterCountOffset);
4167
4168 ParameterCount expected(rbx);
4169 InvokeFunction(function, new_target, expected, actual, flag, call_wrapper);
4170 }
4171
4172
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4173 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4174 const ParameterCount& expected,
4175 const ParameterCount& actual,
4176 InvokeFlag flag,
4177 const CallWrapper& call_wrapper) {
4178 Move(rdi, function);
4179 InvokeFunction(rdi, no_reg, expected, actual, flag, call_wrapper);
4180 }
4181
4182
InvokeFunction(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4183 void MacroAssembler::InvokeFunction(Register function,
4184 Register new_target,
4185 const ParameterCount& expected,
4186 const ParameterCount& actual,
4187 InvokeFlag flag,
4188 const CallWrapper& call_wrapper) {
4189 DCHECK(function.is(rdi));
4190 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
4191 InvokeFunctionCode(rdi, new_target, expected, actual, flag, call_wrapper);
4192 }
4193
4194
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4195 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
4196 const ParameterCount& expected,
4197 const ParameterCount& actual,
4198 InvokeFlag flag,
4199 const CallWrapper& call_wrapper) {
4200 // You can't call a function without a valid frame.
4201 DCHECK(flag == JUMP_FUNCTION || has_frame());
4202 DCHECK(function.is(rdi));
4203 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(rdx));
4204
4205 if (call_wrapper.NeedsDebugHookCheck()) {
4206 CheckDebugHook(function, new_target, expected, actual);
4207 }
4208
4209 // Clear the new.target register if not given.
4210 if (!new_target.is_valid()) {
4211 LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
4212 }
4213
4214 Label done;
4215 bool definitely_mismatches = false;
4216 InvokePrologue(expected,
4217 actual,
4218 &done,
4219 &definitely_mismatches,
4220 flag,
4221 Label::kNear,
4222 call_wrapper);
4223 if (!definitely_mismatches) {
4224 // We call indirectly through the code field in the function to
4225 // allow recompilation to take effect without changing any of the
4226 // call sites.
4227 Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
4228 if (flag == CALL_FUNCTION) {
4229 call_wrapper.BeforeCall(CallSize(code));
4230 call(code);
4231 call_wrapper.AfterCall();
4232 } else {
4233 DCHECK(flag == JUMP_FUNCTION);
4234 jmp(code);
4235 }
4236 bind(&done);
4237 }
4238 }
4239
4240
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,Label::Distance near_jump,const CallWrapper & call_wrapper)4241 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4242 const ParameterCount& actual,
4243 Label* done,
4244 bool* definitely_mismatches,
4245 InvokeFlag flag,
4246 Label::Distance near_jump,
4247 const CallWrapper& call_wrapper) {
4248 bool definitely_matches = false;
4249 *definitely_mismatches = false;
4250 Label invoke;
4251 if (expected.is_immediate()) {
4252 DCHECK(actual.is_immediate());
4253 Set(rax, actual.immediate());
4254 if (expected.immediate() == actual.immediate()) {
4255 definitely_matches = true;
4256 } else {
4257 if (expected.immediate() ==
4258 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
4259 // Don't worry about adapting arguments for built-ins that
4260 // don't want that done. Skip adaption code by making it look
4261 // like we have a match between expected and actual number of
4262 // arguments.
4263 definitely_matches = true;
4264 } else {
4265 *definitely_mismatches = true;
4266 Set(rbx, expected.immediate());
4267 }
4268 }
4269 } else {
4270 if (actual.is_immediate()) {
4271 // Expected is in register, actual is immediate. This is the
4272 // case when we invoke function values without going through the
4273 // IC mechanism.
4274 Set(rax, actual.immediate());
4275 cmpp(expected.reg(), Immediate(actual.immediate()));
4276 j(equal, &invoke, Label::kNear);
4277 DCHECK(expected.reg().is(rbx));
4278 } else if (!expected.reg().is(actual.reg())) {
4279 // Both expected and actual are in (different) registers. This
4280 // is the case when we invoke functions using call and apply.
4281 cmpp(expected.reg(), actual.reg());
4282 j(equal, &invoke, Label::kNear);
4283 DCHECK(actual.reg().is(rax));
4284 DCHECK(expected.reg().is(rbx));
4285 } else {
4286 definitely_matches = true;
4287 Move(rax, actual.reg());
4288 }
4289 }
4290
4291 if (!definitely_matches) {
4292 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
4293 if (flag == CALL_FUNCTION) {
4294 call_wrapper.BeforeCall(CallSize(adaptor));
4295 Call(adaptor, RelocInfo::CODE_TARGET);
4296 call_wrapper.AfterCall();
4297 if (!*definitely_mismatches) {
4298 jmp(done, near_jump);
4299 }
4300 } else {
4301 Jump(adaptor, RelocInfo::CODE_TARGET);
4302 }
4303 bind(&invoke);
4304 }
4305 }
4306
CheckDebugHook(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)4307 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
4308 const ParameterCount& expected,
4309 const ParameterCount& actual) {
4310 Label skip_hook;
4311 ExternalReference debug_hook_active =
4312 ExternalReference::debug_hook_on_function_call_address(isolate());
4313 Operand debug_hook_active_operand = ExternalOperand(debug_hook_active);
4314 cmpb(debug_hook_active_operand, Immediate(0));
4315 j(equal, &skip_hook);
4316 {
4317 FrameScope frame(this,
4318 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
4319 if (expected.is_reg()) {
4320 Integer32ToSmi(expected.reg(), expected.reg());
4321 Push(expected.reg());
4322 }
4323 if (actual.is_reg()) {
4324 Integer32ToSmi(actual.reg(), actual.reg());
4325 Push(actual.reg());
4326 }
4327 if (new_target.is_valid()) {
4328 Push(new_target);
4329 }
4330 Push(fun);
4331 Push(fun);
4332 CallRuntime(Runtime::kDebugOnFunctionCall);
4333 Pop(fun);
4334 if (new_target.is_valid()) {
4335 Pop(new_target);
4336 }
4337 if (actual.is_reg()) {
4338 Pop(actual.reg());
4339 SmiToInteger64(actual.reg(), actual.reg());
4340 }
4341 if (expected.is_reg()) {
4342 Pop(expected.reg());
4343 SmiToInteger64(expected.reg(), expected.reg());
4344 }
4345 }
4346 bind(&skip_hook);
4347 }
4348
StubPrologue(StackFrame::Type type)4349 void MacroAssembler::StubPrologue(StackFrame::Type type) {
4350 pushq(rbp); // Caller's frame pointer.
4351 movp(rbp, rsp);
4352 Push(Immediate(StackFrame::TypeToMarker(type)));
4353 }
4354
Prologue(bool code_pre_aging)4355 void MacroAssembler::Prologue(bool code_pre_aging) {
4356 PredictableCodeSizeScope predictible_code_size_scope(this,
4357 kNoCodeAgeSequenceLength);
4358 if (code_pre_aging) {
4359 // Pre-age the code.
4360 Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
4361 RelocInfo::CODE_AGE_SEQUENCE);
4362 Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
4363 } else {
4364 pushq(rbp); // Caller's frame pointer.
4365 movp(rbp, rsp);
4366 Push(rsi); // Callee's context.
4367 Push(rdi); // Callee's JS function.
4368 }
4369 }
4370
EmitLoadFeedbackVector(Register vector)4371 void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
4372 movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
4373 movp(vector, FieldOperand(vector, JSFunction::kFeedbackVectorOffset));
4374 movp(vector, FieldOperand(vector, Cell::kValueOffset));
4375 }
4376
4377
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)4378 void MacroAssembler::EnterFrame(StackFrame::Type type,
4379 bool load_constant_pool_pointer_reg) {
4380 // Out-of-line constant pool not implemented on x64.
4381 UNREACHABLE();
4382 }
4383
4384
EnterFrame(StackFrame::Type type)4385 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4386 pushq(rbp);
4387 movp(rbp, rsp);
4388 Push(Immediate(StackFrame::TypeToMarker(type)));
4389 if (type == StackFrame::INTERNAL) {
4390 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4391 Push(kScratchRegister);
4392 }
4393 if (emit_debug_code()) {
4394 Move(kScratchRegister,
4395 isolate()->factory()->undefined_value(),
4396 RelocInfo::EMBEDDED_OBJECT);
4397 cmpp(Operand(rsp, 0), kScratchRegister);
4398 Check(not_equal, kCodeObjectNotProperlyPatched);
4399 }
4400 }
4401
4402
LeaveFrame(StackFrame::Type type)4403 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4404 if (emit_debug_code()) {
4405 cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
4406 Immediate(StackFrame::TypeToMarker(type)));
4407 Check(equal, kStackFrameTypesMustMatch);
4408 }
4409 movp(rsp, rbp);
4410 popq(rbp);
4411 }
4412
EnterBuiltinFrame(Register context,Register target,Register argc)4413 void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
4414 Register argc) {
4415 Push(rbp);
4416 Move(rbp, rsp);
4417 Push(context);
4418 Push(target);
4419 Push(argc);
4420 }
4421
LeaveBuiltinFrame(Register context,Register target,Register argc)4422 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
4423 Register argc) {
4424 Pop(argc);
4425 Pop(target);
4426 Pop(context);
4427 leave();
4428 }
4429
EnterExitFramePrologue(bool save_rax,StackFrame::Type frame_type)4430 void MacroAssembler::EnterExitFramePrologue(bool save_rax,
4431 StackFrame::Type frame_type) {
4432 DCHECK(frame_type == StackFrame::EXIT ||
4433 frame_type == StackFrame::BUILTIN_EXIT);
4434
4435 // Set up the frame structure on the stack.
4436 // All constants are relative to the frame pointer of the exit frame.
4437 DCHECK_EQ(kFPOnStackSize + kPCOnStackSize,
4438 ExitFrameConstants::kCallerSPDisplacement);
4439 DCHECK_EQ(kFPOnStackSize, ExitFrameConstants::kCallerPCOffset);
4440 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
4441 pushq(rbp);
4442 movp(rbp, rsp);
4443
4444 // Reserve room for entry stack pointer and push the code object.
4445 Push(Immediate(StackFrame::TypeToMarker(frame_type)));
4446 DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
4447 Push(Immediate(0)); // Saved entry sp, patched before call.
4448 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4449 Push(kScratchRegister); // Accessed from ExitFrame::code_slot.
4450
4451 // Save the frame pointer and the context in top.
4452 if (save_rax) {
4453 movp(r14, rax); // Backup rax in callee-save register.
4454 }
4455
4456 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
4457 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
4458 Store(ExternalReference(Isolate::kCFunctionAddress, isolate()), rbx);
4459 }
4460
4461
EnterExitFrameEpilogue(int arg_stack_space,bool save_doubles)4462 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
4463 bool save_doubles) {
4464 #ifdef _WIN64
4465 const int kShadowSpace = 4;
4466 arg_stack_space += kShadowSpace;
4467 #endif
4468 // Optionally save all XMM registers.
4469 if (save_doubles) {
4470 int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
4471 arg_stack_space * kRegisterSize;
4472 subp(rsp, Immediate(space));
4473 int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
4474 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
4475 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4476 DoubleRegister reg =
4477 DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
4478 Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
4479 }
4480 } else if (arg_stack_space > 0) {
4481 subp(rsp, Immediate(arg_stack_space * kRegisterSize));
4482 }
4483
4484 // Get the required frame alignment for the OS.
4485 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
4486 if (kFrameAlignment > 0) {
4487 DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
4488 DCHECK(is_int8(kFrameAlignment));
4489 andp(rsp, Immediate(-kFrameAlignment));
4490 }
4491
4492 // Patch the saved entry sp.
4493 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
4494 }
4495
EnterExitFrame(int arg_stack_space,bool save_doubles,StackFrame::Type frame_type)4496 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
4497 StackFrame::Type frame_type) {
4498 EnterExitFramePrologue(true, frame_type);
4499
4500 // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
4501 // so it must be retained across the C-call.
4502 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
4503 leap(r15, Operand(rbp, r14, times_pointer_size, offset));
4504
4505 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
4506 }
4507
4508
EnterApiExitFrame(int arg_stack_space)4509 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
4510 EnterExitFramePrologue(false, StackFrame::EXIT);
4511 EnterExitFrameEpilogue(arg_stack_space, false);
4512 }
4513
4514
LeaveExitFrame(bool save_doubles,bool pop_arguments)4515 void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
4516 // Registers:
4517 // r15 : argv
4518 if (save_doubles) {
4519 int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
4520 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
4521 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4522 DoubleRegister reg =
4523 DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
4524 Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
4525 }
4526 }
4527
4528 if (pop_arguments) {
4529 // Get the return address from the stack and restore the frame pointer.
4530 movp(rcx, Operand(rbp, kFPOnStackSize));
4531 movp(rbp, Operand(rbp, 0 * kPointerSize));
4532
4533 // Drop everything up to and including the arguments and the receiver
4534 // from the caller stack.
4535 leap(rsp, Operand(r15, 1 * kPointerSize));
4536
4537 PushReturnAddressFrom(rcx);
4538 } else {
4539 // Otherwise just leave the exit frame.
4540 leave();
4541 }
4542
4543 LeaveExitFrameEpilogue(true);
4544 }
4545
4546
LeaveApiExitFrame(bool restore_context)4547 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
4548 movp(rsp, rbp);
4549 popq(rbp);
4550
4551 LeaveExitFrameEpilogue(restore_context);
4552 }
4553
4554
LeaveExitFrameEpilogue(bool restore_context)4555 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
4556 // Restore current context from top and clear it in debug mode.
4557 ExternalReference context_address(Isolate::kContextAddress, isolate());
4558 Operand context_operand = ExternalOperand(context_address);
4559 if (restore_context) {
4560 movp(rsi, context_operand);
4561 }
4562 #ifdef DEBUG
4563 movp(context_operand, Immediate(0));
4564 #endif
4565
4566 // Clear the top frame.
4567 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
4568 isolate());
4569 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
4570 movp(c_entry_fp_operand, Immediate(0));
4571 }
4572
4573
4574 // Compute the hash code from the untagged key. This must be kept in sync with
4575 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
4576 // code-stub-hydrogen.cc
GetNumberHash(Register r0,Register scratch)4577 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
4578 // First of all we assign the hash seed to scratch.
4579 LoadRoot(scratch, Heap::kHashSeedRootIndex);
4580 SmiToInteger32(scratch, scratch);
4581
4582 // Xor original key with a seed.
4583 xorl(r0, scratch);
4584
4585 // Compute the hash code from the untagged key. This must be kept in sync
4586 // with ComputeIntegerHash in utils.h.
4587 //
4588 // hash = ~hash + (hash << 15);
4589 movl(scratch, r0);
4590 notl(r0);
4591 shll(scratch, Immediate(15));
4592 addl(r0, scratch);
4593 // hash = hash ^ (hash >> 12);
4594 movl(scratch, r0);
4595 shrl(scratch, Immediate(12));
4596 xorl(r0, scratch);
4597 // hash = hash + (hash << 2);
4598 leal(r0, Operand(r0, r0, times_4, 0));
4599 // hash = hash ^ (hash >> 4);
4600 movl(scratch, r0);
4601 shrl(scratch, Immediate(4));
4602 xorl(r0, scratch);
4603 // hash = hash * 2057;
4604 imull(r0, r0, Immediate(2057));
4605 // hash = hash ^ (hash >> 16);
4606 movl(scratch, r0);
4607 shrl(scratch, Immediate(16));
4608 xorl(r0, scratch);
4609 andl(r0, Immediate(0x3fffffff));
4610 }
4611
LoadAllocationTopHelper(Register result,Register scratch,AllocationFlags flags)4612 void MacroAssembler::LoadAllocationTopHelper(Register result,
4613 Register scratch,
4614 AllocationFlags flags) {
4615 ExternalReference allocation_top =
4616 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4617
4618 // Just return if allocation top is already known.
4619 if ((flags & RESULT_CONTAINS_TOP) != 0) {
4620 // No use of scratch if allocation top is provided.
4621 DCHECK(!scratch.is_valid());
4622 #ifdef DEBUG
4623 // Assert that result actually contains top on entry.
4624 Operand top_operand = ExternalOperand(allocation_top);
4625 cmpp(result, top_operand);
4626 Check(equal, kUnexpectedAllocationTop);
4627 #endif
4628 return;
4629 }
4630
4631 // Move address of new object to result. Use scratch register if available,
4632 // and keep address in scratch until call to UpdateAllocationTopHelper.
4633 if (scratch.is_valid()) {
4634 LoadAddress(scratch, allocation_top);
4635 movp(result, Operand(scratch, 0));
4636 } else {
4637 Load(result, allocation_top);
4638 }
4639 }
4640
4641
MakeSureDoubleAlignedHelper(Register result,Register scratch,Label * gc_required,AllocationFlags flags)4642 void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
4643 Register scratch,
4644 Label* gc_required,
4645 AllocationFlags flags) {
4646 if (kPointerSize == kDoubleSize) {
4647 if (FLAG_debug_code) {
4648 testl(result, Immediate(kDoubleAlignmentMask));
4649 Check(zero, kAllocationIsNotDoubleAligned);
4650 }
4651 } else {
4652 // Align the next allocation. Storing the filler map without checking top
4653 // is safe in new-space because the limit of the heap is aligned there.
4654 DCHECK(kPointerSize * 2 == kDoubleSize);
4655 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4656 // Make sure scratch is not clobbered by this function as it might be
4657 // used in UpdateAllocationTopHelper later.
4658 DCHECK(!scratch.is(kScratchRegister));
4659 Label aligned;
4660 testl(result, Immediate(kDoubleAlignmentMask));
4661 j(zero, &aligned, Label::kNear);
4662 if (((flags & ALLOCATION_FOLDED) == 0) && ((flags & PRETENURE) != 0)) {
4663 ExternalReference allocation_limit =
4664 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4665 cmpp(result, ExternalOperand(allocation_limit));
4666 j(above_equal, gc_required);
4667 }
4668 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
4669 movp(Operand(result, 0), kScratchRegister);
4670 addp(result, Immediate(kDoubleSize / 2));
4671 bind(&aligned);
4672 }
4673 }
4674
4675
UpdateAllocationTopHelper(Register result_end,Register scratch,AllocationFlags flags)4676 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4677 Register scratch,
4678 AllocationFlags flags) {
4679 if (emit_debug_code()) {
4680 testp(result_end, Immediate(kObjectAlignmentMask));
4681 Check(zero, kUnalignedAllocationInNewSpace);
4682 }
4683
4684 ExternalReference allocation_top =
4685 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4686
4687 // Update new top.
4688 if (scratch.is_valid()) {
4689 // Scratch already contains address of allocation top.
4690 movp(Operand(scratch, 0), result_end);
4691 } else {
4692 Store(allocation_top, result_end);
4693 }
4694 }
4695
4696
Allocate(int object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4697 void MacroAssembler::Allocate(int object_size,
4698 Register result,
4699 Register result_end,
4700 Register scratch,
4701 Label* gc_required,
4702 AllocationFlags flags) {
4703 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4704 DCHECK(object_size <= kMaxRegularHeapObjectSize);
4705 DCHECK((flags & ALLOCATION_FOLDED) == 0);
4706 if (!FLAG_inline_new) {
4707 if (emit_debug_code()) {
4708 // Trash the registers to simulate an allocation failure.
4709 movl(result, Immediate(0x7091));
4710 if (result_end.is_valid()) {
4711 movl(result_end, Immediate(0x7191));
4712 }
4713 if (scratch.is_valid()) {
4714 movl(scratch, Immediate(0x7291));
4715 }
4716 }
4717 jmp(gc_required);
4718 return;
4719 }
4720 DCHECK(!result.is(result_end));
4721
4722 // Load address of new object into result.
4723 LoadAllocationTopHelper(result, scratch, flags);
4724
4725 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4726 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4727 }
4728
4729 // Calculate new top and bail out if new space is exhausted.
4730 ExternalReference allocation_limit =
4731 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4732
4733 Register top_reg = result_end.is_valid() ? result_end : result;
4734
4735 if (!top_reg.is(result)) {
4736 movp(top_reg, result);
4737 }
4738 addp(top_reg, Immediate(object_size));
4739 Operand limit_operand = ExternalOperand(allocation_limit);
4740 cmpp(top_reg, limit_operand);
4741 j(above, gc_required);
4742
4743 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4744 // The top pointer is not updated for allocation folding dominators.
4745 UpdateAllocationTopHelper(top_reg, scratch, flags);
4746 }
4747
4748 if (top_reg.is(result)) {
4749 subp(result, Immediate(object_size - kHeapObjectTag));
4750 } else {
4751 // Tag the result.
4752 DCHECK(kHeapObjectTag == 1);
4753 incp(result);
4754 }
4755 }
4756
4757
Allocate(int header_size,ScaleFactor element_size,Register element_count,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4758 void MacroAssembler::Allocate(int header_size,
4759 ScaleFactor element_size,
4760 Register element_count,
4761 Register result,
4762 Register result_end,
4763 Register scratch,
4764 Label* gc_required,
4765 AllocationFlags flags) {
4766 DCHECK((flags & SIZE_IN_WORDS) == 0);
4767 DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
4768 DCHECK((flags & ALLOCATION_FOLDED) == 0);
4769 leap(result_end, Operand(element_count, element_size, header_size));
4770 Allocate(result_end, result, result_end, scratch, gc_required, flags);
4771 }
4772
4773
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4774 void MacroAssembler::Allocate(Register object_size,
4775 Register result,
4776 Register result_end,
4777 Register scratch,
4778 Label* gc_required,
4779 AllocationFlags flags) {
4780 DCHECK((flags & SIZE_IN_WORDS) == 0);
4781 DCHECK((flags & ALLOCATION_FOLDED) == 0);
4782 if (!FLAG_inline_new) {
4783 if (emit_debug_code()) {
4784 // Trash the registers to simulate an allocation failure.
4785 movl(result, Immediate(0x7091));
4786 movl(result_end, Immediate(0x7191));
4787 if (scratch.is_valid()) {
4788 movl(scratch, Immediate(0x7291));
4789 }
4790 // object_size is left unchanged by this function.
4791 }
4792 jmp(gc_required);
4793 return;
4794 }
4795 DCHECK(!result.is(result_end));
4796
4797 // Load address of new object into result.
4798 LoadAllocationTopHelper(result, scratch, flags);
4799
4800 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4801 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4802 }
4803
4804 ExternalReference allocation_limit =
4805 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4806 if (!object_size.is(result_end)) {
4807 movp(result_end, object_size);
4808 }
4809 addp(result_end, result);
4810 Operand limit_operand = ExternalOperand(allocation_limit);
4811 cmpp(result_end, limit_operand);
4812 j(above, gc_required);
4813
4814 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4815 // The top pointer is not updated for allocation folding dominators.
4816 UpdateAllocationTopHelper(result_end, scratch, flags);
4817 }
4818
4819 // Tag the result.
4820 addp(result, Immediate(kHeapObjectTag));
4821 }
4822
FastAllocate(int object_size,Register result,Register result_end,AllocationFlags flags)4823 void MacroAssembler::FastAllocate(int object_size, Register result,
4824 Register result_end, AllocationFlags flags) {
4825 DCHECK(!result.is(result_end));
4826 // Load address of new object into result.
4827 LoadAllocationTopHelper(result, no_reg, flags);
4828
4829 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4830 MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
4831 }
4832
4833 leap(result_end, Operand(result, object_size));
4834
4835 UpdateAllocationTopHelper(result_end, no_reg, flags);
4836
4837 addp(result, Immediate(kHeapObjectTag));
4838 }
4839
FastAllocate(Register object_size,Register result,Register result_end,AllocationFlags flags)4840 void MacroAssembler::FastAllocate(Register object_size, Register result,
4841 Register result_end, AllocationFlags flags) {
4842 DCHECK(!result.is(result_end));
4843 // Load address of new object into result.
4844 LoadAllocationTopHelper(result, no_reg, flags);
4845
4846 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4847 MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
4848 }
4849
4850 leap(result_end, Operand(result, object_size, times_1, 0));
4851
4852 UpdateAllocationTopHelper(result_end, no_reg, flags);
4853
4854 addp(result, Immediate(kHeapObjectTag));
4855 }
4856
AllocateHeapNumber(Register result,Register scratch,Label * gc_required,MutableMode mode)4857 void MacroAssembler::AllocateHeapNumber(Register result,
4858 Register scratch,
4859 Label* gc_required,
4860 MutableMode mode) {
4861 // Allocate heap number in new space.
4862 Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required,
4863 NO_ALLOCATION_FLAGS);
4864
4865 Heap::RootListIndex map_index = mode == MUTABLE
4866 ? Heap::kMutableHeapNumberMapRootIndex
4867 : Heap::kHeapNumberMapRootIndex;
4868
4869 // Set the map.
4870 LoadRoot(kScratchRegister, map_index);
4871 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4872 }
4873
AllocateJSValue(Register result,Register constructor,Register value,Register scratch,Label * gc_required)4874 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
4875 Register value, Register scratch,
4876 Label* gc_required) {
4877 DCHECK(!result.is(constructor));
4878 DCHECK(!result.is(scratch));
4879 DCHECK(!result.is(value));
4880
4881 // Allocate JSValue in new space.
4882 Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
4883 NO_ALLOCATION_FLAGS);
4884
4885 // Initialize the JSValue.
4886 LoadGlobalFunctionInitialMap(constructor, scratch);
4887 movp(FieldOperand(result, HeapObject::kMapOffset), scratch);
4888 LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4889 movp(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
4890 movp(FieldOperand(result, JSObject::kElementsOffset), scratch);
4891 movp(FieldOperand(result, JSValue::kValueOffset), value);
4892 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
4893 }
4894
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)4895 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
4896 Register end_address,
4897 Register filler) {
4898 Label loop, entry;
4899 jmp(&entry, Label::kNear);
4900 bind(&loop);
4901 movp(Operand(current_address, 0), filler);
4902 addp(current_address, Immediate(kPointerSize));
4903 bind(&entry);
4904 cmpp(current_address, end_address);
4905 j(below, &loop, Label::kNear);
4906 }
4907
4908
LoadContext(Register dst,int context_chain_length)4909 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4910 if (context_chain_length > 0) {
4911 // Move up the chain of contexts to the context containing the slot.
4912 movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4913 for (int i = 1; i < context_chain_length; i++) {
4914 movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4915 }
4916 } else {
4917 // Slot is in the current function context. Move it into the
4918 // destination register in case we store into it (the write barrier
4919 // cannot be allowed to destroy the context in rsi).
4920 movp(dst, rsi);
4921 }
4922
4923 // We should not have found a with context by walking the context
4924 // chain (i.e., the static scope chain and runtime context chain do
4925 // not agree). A variable occurring in such a scope should have
4926 // slot type LOOKUP and not CONTEXT.
4927 if (emit_debug_code()) {
4928 CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4929 Heap::kWithContextMapRootIndex);
4930 Check(not_equal, kVariableResolvedToWithContext);
4931 }
4932 }
4933
4934 #ifdef _WIN64
4935 static const int kRegisterPassedArguments = 4;
4936 #else
4937 static const int kRegisterPassedArguments = 6;
4938 #endif
4939
4940
LoadNativeContextSlot(int index,Register dst)4941 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
4942 movp(dst, NativeContextOperand());
4943 movp(dst, ContextOperand(dst, index));
4944 }
4945
4946
LoadGlobalFunctionInitialMap(Register function,Register map)4947 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4948 Register map) {
4949 // Load the initial map. The global functions all have initial maps.
4950 movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4951 if (emit_debug_code()) {
4952 Label ok, fail;
4953 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4954 jmp(&ok);
4955 bind(&fail);
4956 Abort(kGlobalFunctionsMustHaveInitialMap);
4957 bind(&ok);
4958 }
4959 }
4960
4961
ArgumentStackSlotsForCFunctionCall(int num_arguments)4962 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4963 // On Windows 64 stack slots are reserved by the caller for all arguments
4964 // including the ones passed in registers, and space is always allocated for
4965 // the four register arguments even if the function takes fewer than four
4966 // arguments.
4967 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4968 // and the caller does not reserve stack slots for them.
4969 DCHECK(num_arguments >= 0);
4970 #ifdef _WIN64
4971 const int kMinimumStackSlots = kRegisterPassedArguments;
4972 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4973 return num_arguments;
4974 #else
4975 if (num_arguments < kRegisterPassedArguments) return 0;
4976 return num_arguments - kRegisterPassedArguments;
4977 #endif
4978 }
4979
4980
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)4981 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
4982 Register index,
4983 Register value,
4984 uint32_t encoding_mask) {
4985 Label is_object;
4986 JumpIfNotSmi(string, &is_object);
4987 Abort(kNonObject);
4988 bind(&is_object);
4989
4990 Push(value);
4991 movp(value, FieldOperand(string, HeapObject::kMapOffset));
4992 movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
4993
4994 andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
4995 cmpp(value, Immediate(encoding_mask));
4996 Pop(value);
4997 Check(equal, kUnexpectedStringType);
4998
4999 // The index is assumed to be untagged coming in, tag it to compare with the
5000 // string length without using a temp register, it is restored at the end of
5001 // this function.
5002 Integer32ToSmi(index, index);
5003 SmiCompare(index, FieldOperand(string, String::kLengthOffset));
5004 Check(less, kIndexIsTooLarge);
5005
5006 SmiCompare(index, Smi::kZero);
5007 Check(greater_equal, kIndexIsNegative);
5008
5009 // Restore the index
5010 SmiToInteger32(index, index);
5011 }
5012
5013
PrepareCallCFunction(int num_arguments)5014 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
5015 int frame_alignment = base::OS::ActivationFrameAlignment();
5016 DCHECK(frame_alignment != 0);
5017 DCHECK(num_arguments >= 0);
5018
5019 // Make stack end at alignment and allocate space for arguments and old rsp.
5020 movp(kScratchRegister, rsp);
5021 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5022 int argument_slots_on_stack =
5023 ArgumentStackSlotsForCFunctionCall(num_arguments);
5024 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
5025 andp(rsp, Immediate(-frame_alignment));
5026 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
5027 }
5028
5029
CallCFunction(ExternalReference function,int num_arguments)5030 void MacroAssembler::CallCFunction(ExternalReference function,
5031 int num_arguments) {
5032 LoadAddress(rax, function);
5033 CallCFunction(rax, num_arguments);
5034 }
5035
5036
CallCFunction(Register function,int num_arguments)5037 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
5038 DCHECK(has_frame());
5039 // Check stack alignment.
5040 if (emit_debug_code()) {
5041 CheckStackAlignment();
5042 }
5043
5044 call(function);
5045 DCHECK(base::OS::ActivationFrameAlignment() != 0);
5046 DCHECK(num_arguments >= 0);
5047 int argument_slots_on_stack =
5048 ArgumentStackSlotsForCFunctionCall(num_arguments);
5049 movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
5050 }
5051
5052
5053 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8)5054 bool AreAliased(Register reg1,
5055 Register reg2,
5056 Register reg3,
5057 Register reg4,
5058 Register reg5,
5059 Register reg6,
5060 Register reg7,
5061 Register reg8) {
5062 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
5063 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5064 reg7.is_valid() + reg8.is_valid();
5065
5066 RegList regs = 0;
5067 if (reg1.is_valid()) regs |= reg1.bit();
5068 if (reg2.is_valid()) regs |= reg2.bit();
5069 if (reg3.is_valid()) regs |= reg3.bit();
5070 if (reg4.is_valid()) regs |= reg4.bit();
5071 if (reg5.is_valid()) regs |= reg5.bit();
5072 if (reg6.is_valid()) regs |= reg6.bit();
5073 if (reg7.is_valid()) regs |= reg7.bit();
5074 if (reg8.is_valid()) regs |= reg8.bit();
5075 int n_of_non_aliasing_regs = NumRegs(regs);
5076
5077 return n_of_valid_regs != n_of_non_aliasing_regs;
5078 }
5079 #endif
5080
5081
CodePatcher(Isolate * isolate,byte * address,int size)5082 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
5083 : address_(address),
5084 size_(size),
5085 masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
5086 // Create a new macro assembler pointing to the address of the code to patch.
5087 // The size is adjusted with kGap on order for the assembler to generate size
5088 // bytes of instructions without failing with buffer size constraints.
5089 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5090 }
5091
5092
~CodePatcher()5093 CodePatcher::~CodePatcher() {
5094 // Indicate that code has changed.
5095 Assembler::FlushICache(masm_.isolate(), address_, size_);
5096
5097 // Check that the code was patched as expected.
5098 DCHECK(masm_.pc_ == address_ + size_);
5099 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5100 }
5101
5102
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met,Label::Distance condition_met_distance)5103 void MacroAssembler::CheckPageFlag(
5104 Register object,
5105 Register scratch,
5106 int mask,
5107 Condition cc,
5108 Label* condition_met,
5109 Label::Distance condition_met_distance) {
5110 DCHECK(cc == zero || cc == not_zero);
5111 if (scratch.is(object)) {
5112 andp(scratch, Immediate(~Page::kPageAlignmentMask));
5113 } else {
5114 movp(scratch, Immediate(~Page::kPageAlignmentMask));
5115 andp(scratch, object);
5116 }
5117 if (mask < (1 << kBitsPerByte)) {
5118 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
5119 Immediate(static_cast<uint8_t>(mask)));
5120 } else {
5121 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
5122 }
5123 j(cc, condition_met, condition_met_distance);
5124 }
5125
5126
JumpIfBlack(Register object,Register bitmap_scratch,Register mask_scratch,Label * on_black,Label::Distance on_black_distance)5127 void MacroAssembler::JumpIfBlack(Register object,
5128 Register bitmap_scratch,
5129 Register mask_scratch,
5130 Label* on_black,
5131 Label::Distance on_black_distance) {
5132 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
5133
5134 GetMarkBits(object, bitmap_scratch, mask_scratch);
5135
5136 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
5137 // The mask_scratch register contains a 1 at the position of the first bit
5138 // and a 1 at a position of the second bit. All other positions are zero.
5139 movp(rcx, mask_scratch);
5140 andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
5141 cmpp(mask_scratch, rcx);
5142 j(equal, on_black, on_black_distance);
5143 }
5144
5145
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)5146 void MacroAssembler::GetMarkBits(Register addr_reg,
5147 Register bitmap_reg,
5148 Register mask_reg) {
5149 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
5150 movp(bitmap_reg, addr_reg);
5151 // Sign extended 32 bit immediate.
5152 andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
5153 movp(rcx, addr_reg);
5154 int shift =
5155 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
5156 shrl(rcx, Immediate(shift));
5157 andp(rcx,
5158 Immediate((Page::kPageAlignmentMask >> shift) &
5159 ~(Bitmap::kBytesPerCell - 1)));
5160
5161 addp(bitmap_reg, rcx);
5162 movp(rcx, addr_reg);
5163 shrl(rcx, Immediate(kPointerSizeLog2));
5164 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
5165 movl(mask_reg, Immediate(3));
5166 shlp_cl(mask_reg);
5167 }
5168
5169
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Label * value_is_white,Label::Distance distance)5170 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
5171 Register mask_scratch, Label* value_is_white,
5172 Label::Distance distance) {
5173 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
5174 GetMarkBits(value, bitmap_scratch, mask_scratch);
5175
5176 // If the value is black or grey we don't need to do anything.
5177 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5178 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
5179 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
5180 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5181
5182 // Since both black and grey have a 1 in the first position and white does
5183 // not have a 1 there we only need to check one bit.
5184 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5185 j(zero, value_is_white, distance);
5186 }
5187
5188
CheckEnumCache(Label * call_runtime)5189 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
5190 Label next, start;
5191 Register empty_fixed_array_value = r8;
5192 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5193 movp(rcx, rax);
5194
5195 // Check if the enum length field is properly initialized, indicating that
5196 // there is an enum cache.
5197 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5198
5199 EnumLength(rdx, rbx);
5200 Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
5201 j(equal, call_runtime);
5202
5203 jmp(&start);
5204
5205 bind(&next);
5206
5207 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5208
5209 // For all objects but the receiver, check that the cache is empty.
5210 EnumLength(rdx, rbx);
5211 Cmp(rdx, Smi::kZero);
5212 j(not_equal, call_runtime);
5213
5214 bind(&start);
5215
5216 // Check that there are no elements. Register rcx contains the current JS
5217 // object we've reached through the prototype chain.
5218 Label no_elements;
5219 cmpp(empty_fixed_array_value,
5220 FieldOperand(rcx, JSObject::kElementsOffset));
5221 j(equal, &no_elements);
5222
5223 // Second chance, the object may be using the empty slow element dictionary.
5224 LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
5225 cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
5226 j(not_equal, call_runtime);
5227
5228 bind(&no_elements);
5229 movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5230 CompareRoot(rcx, Heap::kNullValueRootIndex);
5231 j(not_equal, &next);
5232 }
5233
5234
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)5235 void MacroAssembler::TestJSArrayForAllocationMemento(
5236 Register receiver_reg,
5237 Register scratch_reg,
5238 Label* no_memento_found) {
5239 Label map_check;
5240 Label top_check;
5241 ExternalReference new_space_allocation_top =
5242 ExternalReference::new_space_allocation_top_address(isolate());
5243 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
5244 const int kMementoLastWordOffset =
5245 kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
5246
5247 // Bail out if the object is not in new space.
5248 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
5249 // If the object is in new space, we need to check whether it is on the same
5250 // page as the current top.
5251 leap(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
5252 xorp(scratch_reg, ExternalOperand(new_space_allocation_top));
5253 testp(scratch_reg, Immediate(~Page::kPageAlignmentMask));
5254 j(zero, &top_check);
5255 // The object is on a different page than allocation top. Bail out if the
5256 // object sits on the page boundary as no memento can follow and we cannot
5257 // touch the memory following it.
5258 leap(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
5259 xorp(scratch_reg, receiver_reg);
5260 testp(scratch_reg, Immediate(~Page::kPageAlignmentMask));
5261 j(not_zero, no_memento_found);
5262 // Continue with the actual map check.
5263 jmp(&map_check);
5264 // If top is on the same page as the current object, we need to check whether
5265 // we are below top.
5266 bind(&top_check);
5267 leap(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
5268 cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5269 j(greater_equal, no_memento_found);
5270 // Memento map check.
5271 bind(&map_check);
5272 CompareRoot(MemOperand(receiver_reg, kMementoMapOffset),
5273 Heap::kAllocationMementoMapRootIndex);
5274 }
5275
TruncatingDiv(Register dividend,int32_t divisor)5276 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5277 DCHECK(!dividend.is(rax));
5278 DCHECK(!dividend.is(rdx));
5279 base::MagicNumbersForDivision<uint32_t> mag =
5280 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5281 movl(rax, Immediate(mag.multiplier));
5282 imull(dividend);
5283 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5284 if (divisor > 0 && neg) addl(rdx, dividend);
5285 if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
5286 if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
5287 movl(rax, dividend);
5288 shrl(rax, Immediate(31));
5289 addl(rdx, rax);
5290 }
5291
5292
5293 } // namespace internal
5294 } // namespace v8
5295
5296 #endif // V8_TARGET_ARCH_X64
5297