1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_X64
6
7 #include "src/base/bits.h"
8 #include "src/base/division-by-constant.h"
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/debug/debug.h"
12 #include "src/heap/heap.h"
13 #include "src/register-configuration.h"
14 #include "src/x64/assembler-x64.h"
15 #include "src/x64/macro-assembler-x64.h"
16
17 namespace v8 {
18 namespace internal {
19
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)20 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
21 CodeObjectRequired create_code_object)
22 : Assembler(arg_isolate, buffer, size),
23 generating_stub_(false),
24 has_frame_(false),
25 root_array_available_(true) {
26 if (create_code_object == CodeObjectRequired::kYes) {
27 code_object_ =
28 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
29 }
30 }
31
32
33 static const int64_t kInvalidRootRegisterDelta = -1;
34
35
RootRegisterDelta(ExternalReference other)36 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
37 if (predictable_code_size() &&
38 (other.address() < reinterpret_cast<Address>(isolate()) ||
39 other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
40 return kInvalidRootRegisterDelta;
41 }
42 Address roots_register_value = kRootRegisterBias +
43 reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
44
45 int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
46 if (kPointerSize == kInt64Size) {
47 delta = other.address() - roots_register_value;
48 } else {
49 // For x32, zero extend the address to 64-bit and calculate the delta.
50 uint64_t o = static_cast<uint32_t>(
51 reinterpret_cast<intptr_t>(other.address()));
52 uint64_t r = static_cast<uint32_t>(
53 reinterpret_cast<intptr_t>(roots_register_value));
54 delta = o - r;
55 }
56 return delta;
57 }
58
59
ExternalOperand(ExternalReference target,Register scratch)60 Operand MacroAssembler::ExternalOperand(ExternalReference target,
61 Register scratch) {
62 if (root_array_available_ && !serializer_enabled()) {
63 int64_t delta = RootRegisterDelta(target);
64 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
65 return Operand(kRootRegister, static_cast<int32_t>(delta));
66 }
67 }
68 Move(scratch, target);
69 return Operand(scratch, 0);
70 }
71
72
Load(Register destination,ExternalReference source)73 void MacroAssembler::Load(Register destination, ExternalReference source) {
74 if (root_array_available_ && !serializer_enabled()) {
75 int64_t delta = RootRegisterDelta(source);
76 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
77 movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
78 return;
79 }
80 }
81 // Safe code.
82 if (destination.is(rax)) {
83 load_rax(source);
84 } else {
85 Move(kScratchRegister, source);
86 movp(destination, Operand(kScratchRegister, 0));
87 }
88 }
89
90
Store(ExternalReference destination,Register source)91 void MacroAssembler::Store(ExternalReference destination, Register source) {
92 if (root_array_available_ && !serializer_enabled()) {
93 int64_t delta = RootRegisterDelta(destination);
94 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
95 movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
96 return;
97 }
98 }
99 // Safe code.
100 if (source.is(rax)) {
101 store_rax(destination);
102 } else {
103 Move(kScratchRegister, destination);
104 movp(Operand(kScratchRegister, 0), source);
105 }
106 }
107
108
LoadAddress(Register destination,ExternalReference source)109 void MacroAssembler::LoadAddress(Register destination,
110 ExternalReference source) {
111 if (root_array_available_ && !serializer_enabled()) {
112 int64_t delta = RootRegisterDelta(source);
113 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
114 leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
115 return;
116 }
117 }
118 // Safe code.
119 Move(destination, source);
120 }
121
122
LoadAddressSize(ExternalReference source)123 int MacroAssembler::LoadAddressSize(ExternalReference source) {
124 if (root_array_available_ && !serializer_enabled()) {
125 // This calculation depends on the internals of LoadAddress.
126 // It's correctness is ensured by the asserts in the Call
127 // instruction below.
128 int64_t delta = RootRegisterDelta(source);
129 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
130 // Operand is leap(scratch, Operand(kRootRegister, delta));
131 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
132 int size = 4;
133 if (!is_int8(static_cast<int32_t>(delta))) {
134 size += 3; // Need full four-byte displacement in lea.
135 }
136 return size;
137 }
138 }
139 // Size of movp(destination, src);
140 return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
141 }
142
143
PushAddress(ExternalReference source)144 void MacroAssembler::PushAddress(ExternalReference source) {
145 int64_t address = reinterpret_cast<int64_t>(source.address());
146 if (is_int32(address) && !serializer_enabled()) {
147 if (emit_debug_code()) {
148 Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
149 }
150 Push(Immediate(static_cast<int32_t>(address)));
151 return;
152 }
153 LoadAddress(kScratchRegister, source);
154 Push(kScratchRegister);
155 }
156
157
LoadRoot(Register destination,Heap::RootListIndex index)158 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
159 DCHECK(root_array_available_);
160 movp(destination, Operand(kRootRegister,
161 (index << kPointerSizeLog2) - kRootRegisterBias));
162 }
163
164
LoadRootIndexed(Register destination,Register variable_offset,int fixed_offset)165 void MacroAssembler::LoadRootIndexed(Register destination,
166 Register variable_offset,
167 int fixed_offset) {
168 DCHECK(root_array_available_);
169 movp(destination,
170 Operand(kRootRegister,
171 variable_offset, times_pointer_size,
172 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
173 }
174
175
StoreRoot(Register source,Heap::RootListIndex index)176 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
177 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
178 DCHECK(root_array_available_);
179 movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
180 source);
181 }
182
183
PushRoot(Heap::RootListIndex index)184 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
185 DCHECK(root_array_available_);
186 Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
187 }
188
189
CompareRoot(Register with,Heap::RootListIndex index)190 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
191 DCHECK(root_array_available_);
192 cmpp(with, Operand(kRootRegister,
193 (index << kPointerSizeLog2) - kRootRegisterBias));
194 }
195
196
CompareRoot(const Operand & with,Heap::RootListIndex index)197 void MacroAssembler::CompareRoot(const Operand& with,
198 Heap::RootListIndex index) {
199 DCHECK(root_array_available_);
200 DCHECK(!with.AddressUsesRegister(kScratchRegister));
201 LoadRoot(kScratchRegister, index);
202 cmpp(with, kScratchRegister);
203 }
204
205
RememberedSetHelper(Register object,Register addr,Register scratch,SaveFPRegsMode save_fp,RememberedSetFinalAction and_then)206 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
207 Register addr,
208 Register scratch,
209 SaveFPRegsMode save_fp,
210 RememberedSetFinalAction and_then) {
211 if (emit_debug_code()) {
212 Label ok;
213 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
214 int3();
215 bind(&ok);
216 }
217 // Load store buffer top.
218 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
219 // Store pointer to buffer.
220 movp(Operand(scratch, 0), addr);
221 // Increment buffer top.
222 addp(scratch, Immediate(kPointerSize));
223 // Write back new top of buffer.
224 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
225 // Call stub on end of buffer.
226 Label done;
227 // Check for end of buffer.
228 testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
229 if (and_then == kReturnAtEnd) {
230 Label buffer_overflowed;
231 j(not_equal, &buffer_overflowed, Label::kNear);
232 ret(0);
233 bind(&buffer_overflowed);
234 } else {
235 DCHECK(and_then == kFallThroughAtEnd);
236 j(equal, &done, Label::kNear);
237 }
238 StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
239 CallStub(&store_buffer_overflow);
240 if (and_then == kReturnAtEnd) {
241 ret(0);
242 } else {
243 DCHECK(and_then == kFallThroughAtEnd);
244 bind(&done);
245 }
246 }
247
248
InNewSpace(Register object,Register scratch,Condition cc,Label * branch,Label::Distance distance)249 void MacroAssembler::InNewSpace(Register object,
250 Register scratch,
251 Condition cc,
252 Label* branch,
253 Label::Distance distance) {
254 if (serializer_enabled()) {
255 // Can't do arithmetic on external references if it might get serialized.
256 // The mask isn't really an address. We load it as an external reference in
257 // case the size of the new space is different between the snapshot maker
258 // and the running system.
259 if (scratch.is(object)) {
260 Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
261 andp(scratch, kScratchRegister);
262 } else {
263 Move(scratch, ExternalReference::new_space_mask(isolate()));
264 andp(scratch, object);
265 }
266 Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
267 cmpp(scratch, kScratchRegister);
268 j(cc, branch, distance);
269 } else {
270 DCHECK(kPointerSize == kInt64Size
271 ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
272 : kPointerSize == kInt32Size);
273 intptr_t new_space_start =
274 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
275 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
276 Assembler::RelocInfoNone());
277 if (scratch.is(object)) {
278 addp(scratch, kScratchRegister);
279 } else {
280 leap(scratch, Operand(object, kScratchRegister, times_1, 0));
281 }
282 andp(scratch,
283 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
284 j(cc, branch, distance);
285 }
286 }
287
288
RecordWriteField(Register object,int offset,Register value,Register dst,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)289 void MacroAssembler::RecordWriteField(
290 Register object,
291 int offset,
292 Register value,
293 Register dst,
294 SaveFPRegsMode save_fp,
295 RememberedSetAction remembered_set_action,
296 SmiCheck smi_check,
297 PointersToHereCheck pointers_to_here_check_for_value) {
298 // First, check if a write barrier is even needed. The tests below
299 // catch stores of Smis.
300 Label done;
301
302 // Skip barrier if writing a smi.
303 if (smi_check == INLINE_SMI_CHECK) {
304 JumpIfSmi(value, &done);
305 }
306
307 // Although the object register is tagged, the offset is relative to the start
308 // of the object, so so offset must be a multiple of kPointerSize.
309 DCHECK(IsAligned(offset, kPointerSize));
310
311 leap(dst, FieldOperand(object, offset));
312 if (emit_debug_code()) {
313 Label ok;
314 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
315 j(zero, &ok, Label::kNear);
316 int3();
317 bind(&ok);
318 }
319
320 RecordWrite(object, dst, value, save_fp, remembered_set_action,
321 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
322
323 bind(&done);
324
325 // Clobber clobbered input registers when running with the debug-code flag
326 // turned on to provoke errors.
327 if (emit_debug_code()) {
328 Move(value, kZapValue, Assembler::RelocInfoNone());
329 Move(dst, kZapValue, Assembler::RelocInfoNone());
330 }
331 }
332
333
RecordWriteArray(Register object,Register value,Register index,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)334 void MacroAssembler::RecordWriteArray(
335 Register object,
336 Register value,
337 Register index,
338 SaveFPRegsMode save_fp,
339 RememberedSetAction remembered_set_action,
340 SmiCheck smi_check,
341 PointersToHereCheck pointers_to_here_check_for_value) {
342 // First, check if a write barrier is even needed. The tests below
343 // catch stores of Smis.
344 Label done;
345
346 // Skip barrier if writing a smi.
347 if (smi_check == INLINE_SMI_CHECK) {
348 JumpIfSmi(value, &done);
349 }
350
351 // Array access: calculate the destination address. Index is not a smi.
352 Register dst = index;
353 leap(dst, Operand(object, index, times_pointer_size,
354 FixedArray::kHeaderSize - kHeapObjectTag));
355
356 RecordWrite(object, dst, value, save_fp, remembered_set_action,
357 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
358
359 bind(&done);
360
361 // Clobber clobbered input registers when running with the debug-code flag
362 // turned on to provoke errors.
363 if (emit_debug_code()) {
364 Move(value, kZapValue, Assembler::RelocInfoNone());
365 Move(index, kZapValue, Assembler::RelocInfoNone());
366 }
367 }
368
369
RecordWriteForMap(Register object,Register map,Register dst,SaveFPRegsMode fp_mode)370 void MacroAssembler::RecordWriteForMap(Register object,
371 Register map,
372 Register dst,
373 SaveFPRegsMode fp_mode) {
374 DCHECK(!object.is(kScratchRegister));
375 DCHECK(!object.is(map));
376 DCHECK(!object.is(dst));
377 DCHECK(!map.is(dst));
378 AssertNotSmi(object);
379
380 if (emit_debug_code()) {
381 Label ok;
382 if (map.is(kScratchRegister)) pushq(map);
383 CompareMap(map, isolate()->factory()->meta_map());
384 if (map.is(kScratchRegister)) popq(map);
385 j(equal, &ok, Label::kNear);
386 int3();
387 bind(&ok);
388 }
389
390 if (!FLAG_incremental_marking) {
391 return;
392 }
393
394 if (emit_debug_code()) {
395 Label ok;
396 if (map.is(kScratchRegister)) pushq(map);
397 cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
398 if (map.is(kScratchRegister)) popq(map);
399 j(equal, &ok, Label::kNear);
400 int3();
401 bind(&ok);
402 }
403
404 // Compute the address.
405 leap(dst, FieldOperand(object, HeapObject::kMapOffset));
406
407 // First, check if a write barrier is even needed. The tests below
408 // catch stores of smis and stores into the young generation.
409 Label done;
410
411 // A single check of the map's pages interesting flag suffices, since it is
412 // only set during incremental collection, and then it's also guaranteed that
413 // the from object's page's interesting flag is also set. This optimization
414 // relies on the fact that maps can never be in new space.
415 CheckPageFlag(map,
416 map, // Used as scratch.
417 MemoryChunk::kPointersToHereAreInterestingMask,
418 zero,
419 &done,
420 Label::kNear);
421
422 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
423 fp_mode);
424 CallStub(&stub);
425
426 bind(&done);
427
428 // Count number of write barriers in generated code.
429 isolate()->counters()->write_barriers_static()->Increment();
430 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
431
432 // Clobber clobbered registers when running with the debug-code flag
433 // turned on to provoke errors.
434 if (emit_debug_code()) {
435 Move(dst, kZapValue, Assembler::RelocInfoNone());
436 Move(map, kZapValue, Assembler::RelocInfoNone());
437 }
438 }
439
440
RecordWrite(Register object,Register address,Register value,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)441 void MacroAssembler::RecordWrite(
442 Register object,
443 Register address,
444 Register value,
445 SaveFPRegsMode fp_mode,
446 RememberedSetAction remembered_set_action,
447 SmiCheck smi_check,
448 PointersToHereCheck pointers_to_here_check_for_value) {
449 DCHECK(!object.is(value));
450 DCHECK(!object.is(address));
451 DCHECK(!value.is(address));
452 AssertNotSmi(object);
453
454 if (remembered_set_action == OMIT_REMEMBERED_SET &&
455 !FLAG_incremental_marking) {
456 return;
457 }
458
459 if (emit_debug_code()) {
460 Label ok;
461 cmpp(value, Operand(address, 0));
462 j(equal, &ok, Label::kNear);
463 int3();
464 bind(&ok);
465 }
466
467 // First, check if a write barrier is even needed. The tests below
468 // catch stores of smis and stores into the young generation.
469 Label done;
470
471 if (smi_check == INLINE_SMI_CHECK) {
472 // Skip barrier if writing a smi.
473 JumpIfSmi(value, &done);
474 }
475
476 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
477 CheckPageFlag(value,
478 value, // Used as scratch.
479 MemoryChunk::kPointersToHereAreInterestingMask,
480 zero,
481 &done,
482 Label::kNear);
483 }
484
485 CheckPageFlag(object,
486 value, // Used as scratch.
487 MemoryChunk::kPointersFromHereAreInterestingMask,
488 zero,
489 &done,
490 Label::kNear);
491
492 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
493 fp_mode);
494 CallStub(&stub);
495
496 bind(&done);
497
498 // Count number of write barriers in generated code.
499 isolate()->counters()->write_barriers_static()->Increment();
500 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
501
502 // Clobber clobbered registers when running with the debug-code flag
503 // turned on to provoke errors.
504 if (emit_debug_code()) {
505 Move(address, kZapValue, Assembler::RelocInfoNone());
506 Move(value, kZapValue, Assembler::RelocInfoNone());
507 }
508 }
509
510
Assert(Condition cc,BailoutReason reason)511 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
512 if (emit_debug_code()) Check(cc, reason);
513 }
514
515
AssertFastElements(Register elements)516 void MacroAssembler::AssertFastElements(Register elements) {
517 if (emit_debug_code()) {
518 Label ok;
519 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
520 Heap::kFixedArrayMapRootIndex);
521 j(equal, &ok, Label::kNear);
522 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
523 Heap::kFixedDoubleArrayMapRootIndex);
524 j(equal, &ok, Label::kNear);
525 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
526 Heap::kFixedCOWArrayMapRootIndex);
527 j(equal, &ok, Label::kNear);
528 Abort(kJSObjectWithFastElementsMapHasSlowElements);
529 bind(&ok);
530 }
531 }
532
533
Check(Condition cc,BailoutReason reason)534 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
535 Label L;
536 j(cc, &L, Label::kNear);
537 Abort(reason);
538 // Control will not return here.
539 bind(&L);
540 }
541
542
CheckStackAlignment()543 void MacroAssembler::CheckStackAlignment() {
544 int frame_alignment = base::OS::ActivationFrameAlignment();
545 int frame_alignment_mask = frame_alignment - 1;
546 if (frame_alignment > kPointerSize) {
547 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
548 Label alignment_as_expected;
549 testp(rsp, Immediate(frame_alignment_mask));
550 j(zero, &alignment_as_expected, Label::kNear);
551 // Abort if stack is not aligned.
552 int3();
553 bind(&alignment_as_expected);
554 }
555 }
556
557
NegativeZeroTest(Register result,Register op,Label * then_label)558 void MacroAssembler::NegativeZeroTest(Register result,
559 Register op,
560 Label* then_label) {
561 Label ok;
562 testl(result, result);
563 j(not_zero, &ok, Label::kNear);
564 testl(op, op);
565 j(sign, then_label);
566 bind(&ok);
567 }
568
569
Abort(BailoutReason reason)570 void MacroAssembler::Abort(BailoutReason reason) {
571 #ifdef DEBUG
572 const char* msg = GetBailoutReason(reason);
573 if (msg != NULL) {
574 RecordComment("Abort message: ");
575 RecordComment(msg);
576 }
577
578 if (FLAG_trap_on_abort) {
579 int3();
580 return;
581 }
582 #endif
583
584 Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
585 Assembler::RelocInfoNone());
586 Push(kScratchRegister);
587
588 if (!has_frame_) {
589 // We don't actually want to generate a pile of code for this, so just
590 // claim there is a stack frame, without generating one.
591 FrameScope scope(this, StackFrame::NONE);
592 CallRuntime(Runtime::kAbort, 1);
593 } else {
594 CallRuntime(Runtime::kAbort, 1);
595 }
596 // Control will not return here.
597 int3();
598 }
599
600
CallStub(CodeStub * stub,TypeFeedbackId ast_id)601 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
602 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
603 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
604 }
605
606
TailCallStub(CodeStub * stub)607 void MacroAssembler::TailCallStub(CodeStub* stub) {
608 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
609 }
610
611
StubReturn(int argc)612 void MacroAssembler::StubReturn(int argc) {
613 DCHECK(argc >= 1 && generating_stub());
614 ret((argc - 1) * kPointerSize);
615 }
616
617
AllowThisStubCall(CodeStub * stub)618 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
619 return has_frame_ || !stub->SometimesSetsUpAFrame();
620 }
621
622
IndexFromHash(Register hash,Register index)623 void MacroAssembler::IndexFromHash(Register hash, Register index) {
624 // The assert checks that the constants for the maximum number of digits
625 // for an array index cached in the hash field and the number of bits
626 // reserved for it does not conflict.
627 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
628 (1 << String::kArrayIndexValueBits));
629 if (!hash.is(index)) {
630 movl(index, hash);
631 }
632 DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
633 }
634
635
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)636 void MacroAssembler::CallRuntime(const Runtime::Function* f,
637 int num_arguments,
638 SaveFPRegsMode save_doubles) {
639 // If the expected number of arguments of the runtime function is
640 // constant, we check that the actual number of arguments match the
641 // expectation.
642 CHECK(f->nargs < 0 || f->nargs == num_arguments);
643
644 // TODO(1236192): Most runtime routines don't need the number of
645 // arguments passed in because it is constant. At some point we
646 // should remove this need and make the runtime routine entry code
647 // smarter.
648 Set(rax, num_arguments);
649 LoadAddress(rbx, ExternalReference(f, isolate()));
650 CEntryStub ces(isolate(), f->result_size, save_doubles);
651 CallStub(&ces);
652 }
653
654
CallExternalReference(const ExternalReference & ext,int num_arguments)655 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
656 int num_arguments) {
657 Set(rax, num_arguments);
658 LoadAddress(rbx, ext);
659
660 CEntryStub stub(isolate(), 1);
661 CallStub(&stub);
662 }
663
664
TailCallRuntime(Runtime::FunctionId fid)665 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
666 // ----------- S t a t e -------------
667 // -- rsp[0] : return address
668 // -- rsp[8] : argument num_arguments - 1
669 // ...
670 // -- rsp[8 * num_arguments] : argument 0 (receiver)
671 //
672 // For runtime functions with variable arguments:
673 // -- rax : number of arguments
674 // -----------------------------------
675
676 const Runtime::Function* function = Runtime::FunctionForId(fid);
677 DCHECK_EQ(1, function->result_size);
678 if (function->nargs >= 0) {
679 Set(rax, function->nargs);
680 }
681 JumpToExternalReference(ExternalReference(fid, isolate()));
682 }
683
684
JumpToExternalReference(const ExternalReference & ext)685 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
686 // Set the entry point and jump to the C entry runtime stub.
687 LoadAddress(rbx, ext);
688 CEntryStub ces(isolate(), 1);
689 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
690 }
691
692
InvokeBuiltin(int native_context_index,InvokeFlag flag,const CallWrapper & call_wrapper)693 void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
694 const CallWrapper& call_wrapper) {
695 // You can't call a builtin without a valid frame.
696 DCHECK(flag == JUMP_FUNCTION || has_frame());
697
698 // Fake a parameter count to avoid emitting code to do the check.
699 ParameterCount expected(0);
700 LoadNativeContextSlot(native_context_index, rdi);
701 InvokeFunctionCode(rdi, no_reg, expected, expected, flag, call_wrapper);
702 }
703
704
705 #define REG(Name) \
706 { Register::kCode_##Name }
707
708 static const Register saved_regs[] = {
709 REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
710 REG(r9), REG(r10), REG(r11)
711 };
712
713 #undef REG
714
715 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
716
717
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)718 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
719 Register exclusion1,
720 Register exclusion2,
721 Register exclusion3) {
722 // We don't allow a GC during a store buffer overflow so there is no need to
723 // store the registers in any particular way, but we do have to store and
724 // restore them.
725 for (int i = 0; i < kNumberOfSavedRegs; i++) {
726 Register reg = saved_regs[i];
727 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
728 pushq(reg);
729 }
730 }
731 // R12 to r15 are callee save on all platforms.
732 if (fp_mode == kSaveFPRegs) {
733 subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
734 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
735 XMMRegister reg = XMMRegister::from_code(i);
736 Movsd(Operand(rsp, i * kDoubleSize), reg);
737 }
738 }
739 }
740
741
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)742 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
743 Register exclusion1,
744 Register exclusion2,
745 Register exclusion3) {
746 if (fp_mode == kSaveFPRegs) {
747 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
748 XMMRegister reg = XMMRegister::from_code(i);
749 Movsd(reg, Operand(rsp, i * kDoubleSize));
750 }
751 addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
752 }
753 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
754 Register reg = saved_regs[i];
755 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
756 popq(reg);
757 }
758 }
759 }
760
761
Cvtss2sd(XMMRegister dst,XMMRegister src)762 void MacroAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
763 if (CpuFeatures::IsSupported(AVX)) {
764 CpuFeatureScope scope(this, AVX);
765 vcvtss2sd(dst, src, src);
766 } else {
767 cvtss2sd(dst, src);
768 }
769 }
770
771
Cvtss2sd(XMMRegister dst,const Operand & src)772 void MacroAssembler::Cvtss2sd(XMMRegister dst, const Operand& src) {
773 if (CpuFeatures::IsSupported(AVX)) {
774 CpuFeatureScope scope(this, AVX);
775 vcvtss2sd(dst, dst, src);
776 } else {
777 cvtss2sd(dst, src);
778 }
779 }
780
781
Cvtsd2ss(XMMRegister dst,XMMRegister src)782 void MacroAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
783 if (CpuFeatures::IsSupported(AVX)) {
784 CpuFeatureScope scope(this, AVX);
785 vcvtsd2ss(dst, src, src);
786 } else {
787 cvtsd2ss(dst, src);
788 }
789 }
790
791
Cvtsd2ss(XMMRegister dst,const Operand & src)792 void MacroAssembler::Cvtsd2ss(XMMRegister dst, const Operand& src) {
793 if (CpuFeatures::IsSupported(AVX)) {
794 CpuFeatureScope scope(this, AVX);
795 vcvtsd2ss(dst, dst, src);
796 } else {
797 cvtsd2ss(dst, src);
798 }
799 }
800
801
Cvtlsi2sd(XMMRegister dst,Register src)802 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
803 if (CpuFeatures::IsSupported(AVX)) {
804 CpuFeatureScope scope(this, AVX);
805 vxorpd(dst, dst, dst);
806 vcvtlsi2sd(dst, dst, src);
807 } else {
808 xorpd(dst, dst);
809 cvtlsi2sd(dst, src);
810 }
811 }
812
813
Cvtlsi2sd(XMMRegister dst,const Operand & src)814 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
815 if (CpuFeatures::IsSupported(AVX)) {
816 CpuFeatureScope scope(this, AVX);
817 vxorpd(dst, dst, dst);
818 vcvtlsi2sd(dst, dst, src);
819 } else {
820 xorpd(dst, dst);
821 cvtlsi2sd(dst, src);
822 }
823 }
824
825
Cvtqsi2ss(XMMRegister dst,Register src)826 void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
827 if (CpuFeatures::IsSupported(AVX)) {
828 CpuFeatureScope scope(this, AVX);
829 vxorps(dst, dst, dst);
830 vcvtqsi2ss(dst, dst, src);
831 } else {
832 xorps(dst, dst);
833 cvtqsi2ss(dst, src);
834 }
835 }
836
837
Cvtqsi2ss(XMMRegister dst,const Operand & src)838 void MacroAssembler::Cvtqsi2ss(XMMRegister dst, const Operand& src) {
839 if (CpuFeatures::IsSupported(AVX)) {
840 CpuFeatureScope scope(this, AVX);
841 vxorps(dst, dst, dst);
842 vcvtqsi2ss(dst, dst, src);
843 } else {
844 xorps(dst, dst);
845 cvtqsi2ss(dst, src);
846 }
847 }
848
849
Cvtqsi2sd(XMMRegister dst,Register src)850 void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
851 if (CpuFeatures::IsSupported(AVX)) {
852 CpuFeatureScope scope(this, AVX);
853 vxorpd(dst, dst, dst);
854 vcvtqsi2sd(dst, dst, src);
855 } else {
856 xorpd(dst, dst);
857 cvtqsi2sd(dst, src);
858 }
859 }
860
861
Cvtqsi2sd(XMMRegister dst,const Operand & src)862 void MacroAssembler::Cvtqsi2sd(XMMRegister dst, const Operand& src) {
863 if (CpuFeatures::IsSupported(AVX)) {
864 CpuFeatureScope scope(this, AVX);
865 vxorpd(dst, dst, dst);
866 vcvtqsi2sd(dst, dst, src);
867 } else {
868 xorpd(dst, dst);
869 cvtqsi2sd(dst, src);
870 }
871 }
872
873
Cvtqui2ss(XMMRegister dst,Register src,Register tmp)874 void MacroAssembler::Cvtqui2ss(XMMRegister dst, Register src, Register tmp) {
875 Label msb_set_src;
876 Label jmp_return;
877 testq(src, src);
878 j(sign, &msb_set_src, Label::kNear);
879 Cvtqsi2ss(dst, src);
880 jmp(&jmp_return, Label::kNear);
881 bind(&msb_set_src);
882 movq(tmp, src);
883 shrq(src, Immediate(1));
884 // Recover the least significant bit to avoid rounding errors.
885 andq(tmp, Immediate(1));
886 orq(src, tmp);
887 Cvtqsi2ss(dst, src);
888 addss(dst, dst);
889 bind(&jmp_return);
890 }
891
892
Cvtqui2sd(XMMRegister dst,Register src,Register tmp)893 void MacroAssembler::Cvtqui2sd(XMMRegister dst, Register src, Register tmp) {
894 Label msb_set_src;
895 Label jmp_return;
896 testq(src, src);
897 j(sign, &msb_set_src, Label::kNear);
898 Cvtqsi2sd(dst, src);
899 jmp(&jmp_return, Label::kNear);
900 bind(&msb_set_src);
901 movq(tmp, src);
902 shrq(src, Immediate(1));
903 andq(tmp, Immediate(1));
904 orq(src, tmp);
905 Cvtqsi2sd(dst, src);
906 addsd(dst, dst);
907 bind(&jmp_return);
908 }
909
910
Cvtsd2si(Register dst,XMMRegister src)911 void MacroAssembler::Cvtsd2si(Register dst, XMMRegister src) {
912 if (CpuFeatures::IsSupported(AVX)) {
913 CpuFeatureScope scope(this, AVX);
914 vcvtsd2si(dst, src);
915 } else {
916 cvtsd2si(dst, src);
917 }
918 }
919
920
Cvttsd2si(Register dst,XMMRegister src)921 void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) {
922 if (CpuFeatures::IsSupported(AVX)) {
923 CpuFeatureScope scope(this, AVX);
924 vcvttsd2si(dst, src);
925 } else {
926 cvttsd2si(dst, src);
927 }
928 }
929
930
Cvttsd2si(Register dst,const Operand & src)931 void MacroAssembler::Cvttsd2si(Register dst, const Operand& src) {
932 if (CpuFeatures::IsSupported(AVX)) {
933 CpuFeatureScope scope(this, AVX);
934 vcvttsd2si(dst, src);
935 } else {
936 cvttsd2si(dst, src);
937 }
938 }
939
940
Cvttss2siq(Register dst,XMMRegister src)941 void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) {
942 if (CpuFeatures::IsSupported(AVX)) {
943 CpuFeatureScope scope(this, AVX);
944 vcvttss2siq(dst, src);
945 } else {
946 cvttss2siq(dst, src);
947 }
948 }
949
950
Cvttss2siq(Register dst,const Operand & src)951 void MacroAssembler::Cvttss2siq(Register dst, const Operand& src) {
952 if (CpuFeatures::IsSupported(AVX)) {
953 CpuFeatureScope scope(this, AVX);
954 vcvttss2siq(dst, src);
955 } else {
956 cvttss2siq(dst, src);
957 }
958 }
959
960
Cvttsd2siq(Register dst,XMMRegister src)961 void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
962 if (CpuFeatures::IsSupported(AVX)) {
963 CpuFeatureScope scope(this, AVX);
964 vcvttsd2siq(dst, src);
965 } else {
966 cvttsd2siq(dst, src);
967 }
968 }
969
970
Cvttsd2siq(Register dst,const Operand & src)971 void MacroAssembler::Cvttsd2siq(Register dst, const Operand& src) {
972 if (CpuFeatures::IsSupported(AVX)) {
973 CpuFeatureScope scope(this, AVX);
974 vcvttsd2siq(dst, src);
975 } else {
976 cvttsd2siq(dst, src);
977 }
978 }
979
980
Load(Register dst,const Operand & src,Representation r)981 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
982 DCHECK(!r.IsDouble());
983 if (r.IsInteger8()) {
984 movsxbq(dst, src);
985 } else if (r.IsUInteger8()) {
986 movzxbl(dst, src);
987 } else if (r.IsInteger16()) {
988 movsxwq(dst, src);
989 } else if (r.IsUInteger16()) {
990 movzxwl(dst, src);
991 } else if (r.IsInteger32()) {
992 movl(dst, src);
993 } else {
994 movp(dst, src);
995 }
996 }
997
998
Store(const Operand & dst,Register src,Representation r)999 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
1000 DCHECK(!r.IsDouble());
1001 if (r.IsInteger8() || r.IsUInteger8()) {
1002 movb(dst, src);
1003 } else if (r.IsInteger16() || r.IsUInteger16()) {
1004 movw(dst, src);
1005 } else if (r.IsInteger32()) {
1006 movl(dst, src);
1007 } else {
1008 if (r.IsHeapObject()) {
1009 AssertNotSmi(src);
1010 } else if (r.IsSmi()) {
1011 AssertSmi(src);
1012 }
1013 movp(dst, src);
1014 }
1015 }
1016
1017
Set(Register dst,int64_t x)1018 void MacroAssembler::Set(Register dst, int64_t x) {
1019 if (x == 0) {
1020 xorl(dst, dst);
1021 } else if (is_uint32(x)) {
1022 movl(dst, Immediate(static_cast<uint32_t>(x)));
1023 } else if (is_int32(x)) {
1024 movq(dst, Immediate(static_cast<int32_t>(x)));
1025 } else {
1026 movq(dst, x);
1027 }
1028 }
1029
1030
Set(const Operand & dst,intptr_t x)1031 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
1032 if (kPointerSize == kInt64Size) {
1033 if (is_int32(x)) {
1034 movp(dst, Immediate(static_cast<int32_t>(x)));
1035 } else {
1036 Set(kScratchRegister, x);
1037 movp(dst, kScratchRegister);
1038 }
1039 } else {
1040 movp(dst, Immediate(static_cast<int32_t>(x)));
1041 }
1042 }
1043
1044
1045 // ----------------------------------------------------------------------------
1046 // Smi tagging, untagging and tag detection.
1047
IsUnsafeInt(const int32_t x)1048 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
1049 static const int kMaxBits = 17;
1050 return !is_intn(x, kMaxBits);
1051 }
1052
1053
SafeMove(Register dst,Smi * src)1054 void MacroAssembler::SafeMove(Register dst, Smi* src) {
1055 DCHECK(!dst.is(kScratchRegister));
1056 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1057 if (SmiValuesAre32Bits()) {
1058 // JIT cookie can be converted to Smi.
1059 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1060 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1061 xorp(dst, kScratchRegister);
1062 } else {
1063 DCHECK(SmiValuesAre31Bits());
1064 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1065 movp(dst, Immediate(value ^ jit_cookie()));
1066 xorp(dst, Immediate(jit_cookie()));
1067 }
1068 } else {
1069 Move(dst, src);
1070 }
1071 }
1072
1073
SafePush(Smi * src)1074 void MacroAssembler::SafePush(Smi* src) {
1075 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1076 if (SmiValuesAre32Bits()) {
1077 // JIT cookie can be converted to Smi.
1078 Push(Smi::FromInt(src->value() ^ jit_cookie()));
1079 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1080 xorp(Operand(rsp, 0), kScratchRegister);
1081 } else {
1082 DCHECK(SmiValuesAre31Bits());
1083 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1084 Push(Immediate(value ^ jit_cookie()));
1085 xorp(Operand(rsp, 0), Immediate(jit_cookie()));
1086 }
1087 } else {
1088 Push(src);
1089 }
1090 }
1091
1092
GetSmiConstant(Smi * source)1093 Register MacroAssembler::GetSmiConstant(Smi* source) {
1094 STATIC_ASSERT(kSmiTag == 0);
1095 int value = source->value();
1096 if (value == 0) {
1097 xorl(kScratchRegister, kScratchRegister);
1098 return kScratchRegister;
1099 }
1100 LoadSmiConstant(kScratchRegister, source);
1101 return kScratchRegister;
1102 }
1103
1104
LoadSmiConstant(Register dst,Smi * source)1105 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1106 STATIC_ASSERT(kSmiTag == 0);
1107 int value = source->value();
1108 if (value == 0) {
1109 xorl(dst, dst);
1110 } else {
1111 Move(dst, source, Assembler::RelocInfoNone());
1112 }
1113 }
1114
1115
Integer32ToSmi(Register dst,Register src)1116 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1117 STATIC_ASSERT(kSmiTag == 0);
1118 if (!dst.is(src)) {
1119 movl(dst, src);
1120 }
1121 shlp(dst, Immediate(kSmiShift));
1122 }
1123
1124
Integer32ToSmiField(const Operand & dst,Register src)1125 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1126 if (emit_debug_code()) {
1127 testb(dst, Immediate(0x01));
1128 Label ok;
1129 j(zero, &ok, Label::kNear);
1130 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1131 bind(&ok);
1132 }
1133
1134 if (SmiValuesAre32Bits()) {
1135 DCHECK(kSmiShift % kBitsPerByte == 0);
1136 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1137 } else {
1138 DCHECK(SmiValuesAre31Bits());
1139 Integer32ToSmi(kScratchRegister, src);
1140 movp(dst, kScratchRegister);
1141 }
1142 }
1143
1144
Integer64PlusConstantToSmi(Register dst,Register src,int constant)1145 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1146 Register src,
1147 int constant) {
1148 if (dst.is(src)) {
1149 addl(dst, Immediate(constant));
1150 } else {
1151 leal(dst, Operand(src, constant));
1152 }
1153 shlp(dst, Immediate(kSmiShift));
1154 }
1155
1156
SmiToInteger32(Register dst,Register src)1157 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1158 STATIC_ASSERT(kSmiTag == 0);
1159 if (!dst.is(src)) {
1160 movp(dst, src);
1161 }
1162
1163 if (SmiValuesAre32Bits()) {
1164 shrp(dst, Immediate(kSmiShift));
1165 } else {
1166 DCHECK(SmiValuesAre31Bits());
1167 sarl(dst, Immediate(kSmiShift));
1168 }
1169 }
1170
1171
SmiToInteger32(Register dst,const Operand & src)1172 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1173 if (SmiValuesAre32Bits()) {
1174 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1175 } else {
1176 DCHECK(SmiValuesAre31Bits());
1177 movl(dst, src);
1178 sarl(dst, Immediate(kSmiShift));
1179 }
1180 }
1181
1182
SmiToInteger64(Register dst,Register src)1183 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1184 STATIC_ASSERT(kSmiTag == 0);
1185 if (!dst.is(src)) {
1186 movp(dst, src);
1187 }
1188 sarp(dst, Immediate(kSmiShift));
1189 if (kPointerSize == kInt32Size) {
1190 // Sign extend to 64-bit.
1191 movsxlq(dst, dst);
1192 }
1193 }
1194
1195
SmiToInteger64(Register dst,const Operand & src)1196 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1197 if (SmiValuesAre32Bits()) {
1198 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1199 } else {
1200 DCHECK(SmiValuesAre31Bits());
1201 movp(dst, src);
1202 SmiToInteger64(dst, dst);
1203 }
1204 }
1205
1206
SmiTest(Register src)1207 void MacroAssembler::SmiTest(Register src) {
1208 AssertSmi(src);
1209 testp(src, src);
1210 }
1211
1212
SmiCompare(Register smi1,Register smi2)1213 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1214 AssertSmi(smi1);
1215 AssertSmi(smi2);
1216 cmpp(smi1, smi2);
1217 }
1218
1219
SmiCompare(Register dst,Smi * src)1220 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1221 AssertSmi(dst);
1222 Cmp(dst, src);
1223 }
1224
1225
Cmp(Register dst,Smi * src)1226 void MacroAssembler::Cmp(Register dst, Smi* src) {
1227 DCHECK(!dst.is(kScratchRegister));
1228 if (src->value() == 0) {
1229 testp(dst, dst);
1230 } else {
1231 Register constant_reg = GetSmiConstant(src);
1232 cmpp(dst, constant_reg);
1233 }
1234 }
1235
1236
SmiCompare(Register dst,const Operand & src)1237 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1238 AssertSmi(dst);
1239 AssertSmi(src);
1240 cmpp(dst, src);
1241 }
1242
1243
SmiCompare(const Operand & dst,Register src)1244 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1245 AssertSmi(dst);
1246 AssertSmi(src);
1247 cmpp(dst, src);
1248 }
1249
1250
SmiCompare(const Operand & dst,Smi * src)1251 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1252 AssertSmi(dst);
1253 if (SmiValuesAre32Bits()) {
1254 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1255 } else {
1256 DCHECK(SmiValuesAre31Bits());
1257 cmpl(dst, Immediate(src));
1258 }
1259 }
1260
1261
Cmp(const Operand & dst,Smi * src)1262 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1263 // The Operand cannot use the smi register.
1264 Register smi_reg = GetSmiConstant(src);
1265 DCHECK(!dst.AddressUsesRegister(smi_reg));
1266 cmpp(dst, smi_reg);
1267 }
1268
1269
SmiCompareInteger32(const Operand & dst,Register src)1270 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1271 if (SmiValuesAre32Bits()) {
1272 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1273 } else {
1274 DCHECK(SmiValuesAre31Bits());
1275 SmiToInteger32(kScratchRegister, dst);
1276 cmpl(kScratchRegister, src);
1277 }
1278 }
1279
1280
PositiveSmiTimesPowerOfTwoToInteger64(Register dst,Register src,int power)1281 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1282 Register src,
1283 int power) {
1284 DCHECK(power >= 0);
1285 DCHECK(power < 64);
1286 if (power == 0) {
1287 SmiToInteger64(dst, src);
1288 return;
1289 }
1290 if (!dst.is(src)) {
1291 movp(dst, src);
1292 }
1293 if (power < kSmiShift) {
1294 sarp(dst, Immediate(kSmiShift - power));
1295 } else if (power > kSmiShift) {
1296 shlp(dst, Immediate(power - kSmiShift));
1297 }
1298 }
1299
1300
PositiveSmiDivPowerOfTwoToInteger32(Register dst,Register src,int power)1301 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1302 Register src,
1303 int power) {
1304 DCHECK((0 <= power) && (power < 32));
1305 if (dst.is(src)) {
1306 shrp(dst, Immediate(power + kSmiShift));
1307 } else {
1308 UNIMPLEMENTED(); // Not used.
1309 }
1310 }
1311
1312
SmiOrIfSmis(Register dst,Register src1,Register src2,Label * on_not_smis,Label::Distance near_jump)1313 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1314 Label* on_not_smis,
1315 Label::Distance near_jump) {
1316 if (dst.is(src1) || dst.is(src2)) {
1317 DCHECK(!src1.is(kScratchRegister));
1318 DCHECK(!src2.is(kScratchRegister));
1319 movp(kScratchRegister, src1);
1320 orp(kScratchRegister, src2);
1321 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1322 movp(dst, kScratchRegister);
1323 } else {
1324 movp(dst, src1);
1325 orp(dst, src2);
1326 JumpIfNotSmi(dst, on_not_smis, near_jump);
1327 }
1328 }
1329
1330
CheckSmi(Register src)1331 Condition MacroAssembler::CheckSmi(Register src) {
1332 STATIC_ASSERT(kSmiTag == 0);
1333 testb(src, Immediate(kSmiTagMask));
1334 return zero;
1335 }
1336
1337
CheckSmi(const Operand & src)1338 Condition MacroAssembler::CheckSmi(const Operand& src) {
1339 STATIC_ASSERT(kSmiTag == 0);
1340 testb(src, Immediate(kSmiTagMask));
1341 return zero;
1342 }
1343
1344
CheckNonNegativeSmi(Register src)1345 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1346 STATIC_ASSERT(kSmiTag == 0);
1347 // Test that both bits of the mask 0x8000000000000001 are zero.
1348 movp(kScratchRegister, src);
1349 rolp(kScratchRegister, Immediate(1));
1350 testb(kScratchRegister, Immediate(3));
1351 return zero;
1352 }
1353
1354
CheckBothSmi(Register first,Register second)1355 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1356 if (first.is(second)) {
1357 return CheckSmi(first);
1358 }
1359 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1360 if (SmiValuesAre32Bits()) {
1361 leal(kScratchRegister, Operand(first, second, times_1, 0));
1362 testb(kScratchRegister, Immediate(0x03));
1363 } else {
1364 DCHECK(SmiValuesAre31Bits());
1365 movl(kScratchRegister, first);
1366 orl(kScratchRegister, second);
1367 testb(kScratchRegister, Immediate(kSmiTagMask));
1368 }
1369 return zero;
1370 }
1371
1372
CheckBothNonNegativeSmi(Register first,Register second)1373 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1374 Register second) {
1375 if (first.is(second)) {
1376 return CheckNonNegativeSmi(first);
1377 }
1378 movp(kScratchRegister, first);
1379 orp(kScratchRegister, second);
1380 rolp(kScratchRegister, Immediate(1));
1381 testl(kScratchRegister, Immediate(3));
1382 return zero;
1383 }
1384
1385
CheckEitherSmi(Register first,Register second,Register scratch)1386 Condition MacroAssembler::CheckEitherSmi(Register first,
1387 Register second,
1388 Register scratch) {
1389 if (first.is(second)) {
1390 return CheckSmi(first);
1391 }
1392 if (scratch.is(second)) {
1393 andl(scratch, first);
1394 } else {
1395 if (!scratch.is(first)) {
1396 movl(scratch, first);
1397 }
1398 andl(scratch, second);
1399 }
1400 testb(scratch, Immediate(kSmiTagMask));
1401 return zero;
1402 }
1403
1404
CheckInteger32ValidSmiValue(Register src)1405 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1406 if (SmiValuesAre32Bits()) {
1407 // A 32-bit integer value can always be converted to a smi.
1408 return always;
1409 } else {
1410 DCHECK(SmiValuesAre31Bits());
1411 cmpl(src, Immediate(0xc0000000));
1412 return positive;
1413 }
1414 }
1415
1416
CheckUInteger32ValidSmiValue(Register src)1417 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1418 if (SmiValuesAre32Bits()) {
1419 // An unsigned 32-bit integer value is valid as long as the high bit
1420 // is not set.
1421 testl(src, src);
1422 return positive;
1423 } else {
1424 DCHECK(SmiValuesAre31Bits());
1425 testl(src, Immediate(0xc0000000));
1426 return zero;
1427 }
1428 }
1429
1430
CheckSmiToIndicator(Register dst,Register src)1431 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1432 if (dst.is(src)) {
1433 andl(dst, Immediate(kSmiTagMask));
1434 } else {
1435 movl(dst, Immediate(kSmiTagMask));
1436 andl(dst, src);
1437 }
1438 }
1439
1440
CheckSmiToIndicator(Register dst,const Operand & src)1441 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1442 if (!(src.AddressUsesRegister(dst))) {
1443 movl(dst, Immediate(kSmiTagMask));
1444 andl(dst, src);
1445 } else {
1446 movl(dst, src);
1447 andl(dst, Immediate(kSmiTagMask));
1448 }
1449 }
1450
1451
JumpIfValidSmiValue(Register src,Label * on_valid,Label::Distance near_jump)1452 void MacroAssembler::JumpIfValidSmiValue(Register src,
1453 Label* on_valid,
1454 Label::Distance near_jump) {
1455 Condition is_valid = CheckInteger32ValidSmiValue(src);
1456 j(is_valid, on_valid, near_jump);
1457 }
1458
1459
JumpIfNotValidSmiValue(Register src,Label * on_invalid,Label::Distance near_jump)1460 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1461 Label* on_invalid,
1462 Label::Distance near_jump) {
1463 Condition is_valid = CheckInteger32ValidSmiValue(src);
1464 j(NegateCondition(is_valid), on_invalid, near_jump);
1465 }
1466
1467
JumpIfUIntValidSmiValue(Register src,Label * on_valid,Label::Distance near_jump)1468 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1469 Label* on_valid,
1470 Label::Distance near_jump) {
1471 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1472 j(is_valid, on_valid, near_jump);
1473 }
1474
1475
JumpIfUIntNotValidSmiValue(Register src,Label * on_invalid,Label::Distance near_jump)1476 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1477 Label* on_invalid,
1478 Label::Distance near_jump) {
1479 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1480 j(NegateCondition(is_valid), on_invalid, near_jump);
1481 }
1482
1483
JumpIfSmi(Register src,Label * on_smi,Label::Distance near_jump)1484 void MacroAssembler::JumpIfSmi(Register src,
1485 Label* on_smi,
1486 Label::Distance near_jump) {
1487 Condition smi = CheckSmi(src);
1488 j(smi, on_smi, near_jump);
1489 }
1490
1491
JumpIfNotSmi(Register src,Label * on_not_smi,Label::Distance near_jump)1492 void MacroAssembler::JumpIfNotSmi(Register src,
1493 Label* on_not_smi,
1494 Label::Distance near_jump) {
1495 Condition smi = CheckSmi(src);
1496 j(NegateCondition(smi), on_not_smi, near_jump);
1497 }
1498
1499
JumpUnlessNonNegativeSmi(Register src,Label * on_not_smi_or_negative,Label::Distance near_jump)1500 void MacroAssembler::JumpUnlessNonNegativeSmi(
1501 Register src, Label* on_not_smi_or_negative,
1502 Label::Distance near_jump) {
1503 Condition non_negative_smi = CheckNonNegativeSmi(src);
1504 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1505 }
1506
1507
JumpIfSmiEqualsConstant(Register src,Smi * constant,Label * on_equals,Label::Distance near_jump)1508 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1509 Smi* constant,
1510 Label* on_equals,
1511 Label::Distance near_jump) {
1512 SmiCompare(src, constant);
1513 j(equal, on_equals, near_jump);
1514 }
1515
1516
JumpIfNotBothSmi(Register src1,Register src2,Label * on_not_both_smi,Label::Distance near_jump)1517 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1518 Register src2,
1519 Label* on_not_both_smi,
1520 Label::Distance near_jump) {
1521 Condition both_smi = CheckBothSmi(src1, src2);
1522 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1523 }
1524
1525
JumpUnlessBothNonNegativeSmi(Register src1,Register src2,Label * on_not_both_smi,Label::Distance near_jump)1526 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1527 Register src2,
1528 Label* on_not_both_smi,
1529 Label::Distance near_jump) {
1530 Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1531 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1532 }
1533
1534
SmiAddConstant(Register dst,Register src,Smi * constant)1535 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1536 if (constant->value() == 0) {
1537 if (!dst.is(src)) {
1538 movp(dst, src);
1539 }
1540 return;
1541 } else if (dst.is(src)) {
1542 DCHECK(!dst.is(kScratchRegister));
1543 Register constant_reg = GetSmiConstant(constant);
1544 addp(dst, constant_reg);
1545 } else {
1546 LoadSmiConstant(dst, constant);
1547 addp(dst, src);
1548 }
1549 }
1550
1551
SmiAddConstant(const Operand & dst,Smi * constant)1552 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1553 if (constant->value() != 0) {
1554 if (SmiValuesAre32Bits()) {
1555 addl(Operand(dst, kSmiShift / kBitsPerByte),
1556 Immediate(constant->value()));
1557 } else {
1558 DCHECK(SmiValuesAre31Bits());
1559 addp(dst, Immediate(constant));
1560 }
1561 }
1562 }
1563
1564
SmiAddConstant(Register dst,Register src,Smi * constant,SmiOperationConstraints constraints,Label * bailout_label,Label::Distance near_jump)1565 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant,
1566 SmiOperationConstraints constraints,
1567 Label* bailout_label,
1568 Label::Distance near_jump) {
1569 if (constant->value() == 0) {
1570 if (!dst.is(src)) {
1571 movp(dst, src);
1572 }
1573 } else if (dst.is(src)) {
1574 DCHECK(!dst.is(kScratchRegister));
1575 LoadSmiConstant(kScratchRegister, constant);
1576 addp(dst, kScratchRegister);
1577 if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
1578 j(no_overflow, bailout_label, near_jump);
1579 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1580 subp(dst, kScratchRegister);
1581 } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
1582 if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
1583 Label done;
1584 j(no_overflow, &done, Label::kNear);
1585 subp(dst, kScratchRegister);
1586 jmp(bailout_label, near_jump);
1587 bind(&done);
1588 } else {
1589 // Bailout if overflow without reserving src.
1590 j(overflow, bailout_label, near_jump);
1591 }
1592 } else {
1593 UNREACHABLE();
1594 }
1595 } else {
1596 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1597 DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
1598 LoadSmiConstant(dst, constant);
1599 addp(dst, src);
1600 j(overflow, bailout_label, near_jump);
1601 }
1602 }
1603
1604
SmiSubConstant(Register dst,Register src,Smi * constant)1605 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1606 if (constant->value() == 0) {
1607 if (!dst.is(src)) {
1608 movp(dst, src);
1609 }
1610 } else if (dst.is(src)) {
1611 DCHECK(!dst.is(kScratchRegister));
1612 Register constant_reg = GetSmiConstant(constant);
1613 subp(dst, constant_reg);
1614 } else {
1615 if (constant->value() == Smi::kMinValue) {
1616 LoadSmiConstant(dst, constant);
1617 // Adding and subtracting the min-value gives the same result, it only
1618 // differs on the overflow bit, which we don't check here.
1619 addp(dst, src);
1620 } else {
1621 // Subtract by adding the negation.
1622 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1623 addp(dst, src);
1624 }
1625 }
1626 }
1627
1628
SmiSubConstant(Register dst,Register src,Smi * constant,SmiOperationConstraints constraints,Label * bailout_label,Label::Distance near_jump)1629 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant,
1630 SmiOperationConstraints constraints,
1631 Label* bailout_label,
1632 Label::Distance near_jump) {
1633 if (constant->value() == 0) {
1634 if (!dst.is(src)) {
1635 movp(dst, src);
1636 }
1637 } else if (dst.is(src)) {
1638 DCHECK(!dst.is(kScratchRegister));
1639 LoadSmiConstant(kScratchRegister, constant);
1640 subp(dst, kScratchRegister);
1641 if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
1642 j(no_overflow, bailout_label, near_jump);
1643 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1644 addp(dst, kScratchRegister);
1645 } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
1646 if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
1647 Label done;
1648 j(no_overflow, &done, Label::kNear);
1649 addp(dst, kScratchRegister);
1650 jmp(bailout_label, near_jump);
1651 bind(&done);
1652 } else {
1653 // Bailout if overflow without reserving src.
1654 j(overflow, bailout_label, near_jump);
1655 }
1656 } else {
1657 UNREACHABLE();
1658 }
1659 } else {
1660 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1661 DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
1662 if (constant->value() == Smi::kMinValue) {
1663 DCHECK(!dst.is(kScratchRegister));
1664 movp(dst, src);
1665 LoadSmiConstant(kScratchRegister, constant);
1666 subp(dst, kScratchRegister);
1667 j(overflow, bailout_label, near_jump);
1668 } else {
1669 // Subtract by adding the negation.
1670 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1671 addp(dst, src);
1672 j(overflow, bailout_label, near_jump);
1673 }
1674 }
1675 }
1676
1677
SmiNeg(Register dst,Register src,Label * on_smi_result,Label::Distance near_jump)1678 void MacroAssembler::SmiNeg(Register dst,
1679 Register src,
1680 Label* on_smi_result,
1681 Label::Distance near_jump) {
1682 if (dst.is(src)) {
1683 DCHECK(!dst.is(kScratchRegister));
1684 movp(kScratchRegister, src);
1685 negp(dst); // Low 32 bits are retained as zero by negation.
1686 // Test if result is zero or Smi::kMinValue.
1687 cmpp(dst, kScratchRegister);
1688 j(not_equal, on_smi_result, near_jump);
1689 movp(src, kScratchRegister);
1690 } else {
1691 movp(dst, src);
1692 negp(dst);
1693 cmpp(dst, src);
1694 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1695 j(not_equal, on_smi_result, near_jump);
1696 }
1697 }
1698
1699
1700 template<class T>
SmiAddHelper(MacroAssembler * masm,Register dst,Register src1,T src2,Label * on_not_smi_result,Label::Distance near_jump)1701 static void SmiAddHelper(MacroAssembler* masm,
1702 Register dst,
1703 Register src1,
1704 T src2,
1705 Label* on_not_smi_result,
1706 Label::Distance near_jump) {
1707 if (dst.is(src1)) {
1708 Label done;
1709 masm->addp(dst, src2);
1710 masm->j(no_overflow, &done, Label::kNear);
1711 // Restore src1.
1712 masm->subp(dst, src2);
1713 masm->jmp(on_not_smi_result, near_jump);
1714 masm->bind(&done);
1715 } else {
1716 masm->movp(dst, src1);
1717 masm->addp(dst, src2);
1718 masm->j(overflow, on_not_smi_result, near_jump);
1719 }
1720 }
1721
1722
SmiAdd(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1723 void MacroAssembler::SmiAdd(Register dst,
1724 Register src1,
1725 Register src2,
1726 Label* on_not_smi_result,
1727 Label::Distance near_jump) {
1728 DCHECK_NOT_NULL(on_not_smi_result);
1729 DCHECK(!dst.is(src2));
1730 SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1731 }
1732
1733
SmiAdd(Register dst,Register src1,const Operand & src2,Label * on_not_smi_result,Label::Distance near_jump)1734 void MacroAssembler::SmiAdd(Register dst,
1735 Register src1,
1736 const Operand& src2,
1737 Label* on_not_smi_result,
1738 Label::Distance near_jump) {
1739 DCHECK_NOT_NULL(on_not_smi_result);
1740 DCHECK(!src2.AddressUsesRegister(dst));
1741 SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1742 }
1743
1744
SmiAdd(Register dst,Register src1,Register src2)1745 void MacroAssembler::SmiAdd(Register dst,
1746 Register src1,
1747 Register src2) {
1748 // No overflow checking. Use only when it's known that
1749 // overflowing is impossible.
1750 if (!dst.is(src1)) {
1751 if (emit_debug_code()) {
1752 movp(kScratchRegister, src1);
1753 addp(kScratchRegister, src2);
1754 Check(no_overflow, kSmiAdditionOverflow);
1755 }
1756 leap(dst, Operand(src1, src2, times_1, 0));
1757 } else {
1758 addp(dst, src2);
1759 Assert(no_overflow, kSmiAdditionOverflow);
1760 }
1761 }
1762
1763
1764 template<class T>
SmiSubHelper(MacroAssembler * masm,Register dst,Register src1,T src2,Label * on_not_smi_result,Label::Distance near_jump)1765 static void SmiSubHelper(MacroAssembler* masm,
1766 Register dst,
1767 Register src1,
1768 T src2,
1769 Label* on_not_smi_result,
1770 Label::Distance near_jump) {
1771 if (dst.is(src1)) {
1772 Label done;
1773 masm->subp(dst, src2);
1774 masm->j(no_overflow, &done, Label::kNear);
1775 // Restore src1.
1776 masm->addp(dst, src2);
1777 masm->jmp(on_not_smi_result, near_jump);
1778 masm->bind(&done);
1779 } else {
1780 masm->movp(dst, src1);
1781 masm->subp(dst, src2);
1782 masm->j(overflow, on_not_smi_result, near_jump);
1783 }
1784 }
1785
1786
SmiSub(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1787 void MacroAssembler::SmiSub(Register dst,
1788 Register src1,
1789 Register src2,
1790 Label* on_not_smi_result,
1791 Label::Distance near_jump) {
1792 DCHECK_NOT_NULL(on_not_smi_result);
1793 DCHECK(!dst.is(src2));
1794 SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1795 }
1796
1797
SmiSub(Register dst,Register src1,const Operand & src2,Label * on_not_smi_result,Label::Distance near_jump)1798 void MacroAssembler::SmiSub(Register dst,
1799 Register src1,
1800 const Operand& src2,
1801 Label* on_not_smi_result,
1802 Label::Distance near_jump) {
1803 DCHECK_NOT_NULL(on_not_smi_result);
1804 DCHECK(!src2.AddressUsesRegister(dst));
1805 SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1806 }
1807
1808
1809 template<class T>
SmiSubNoOverflowHelper(MacroAssembler * masm,Register dst,Register src1,T src2)1810 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1811 Register dst,
1812 Register src1,
1813 T src2) {
1814 // No overflow checking. Use only when it's known that
1815 // overflowing is impossible (e.g., subtracting two positive smis).
1816 if (!dst.is(src1)) {
1817 masm->movp(dst, src1);
1818 }
1819 masm->subp(dst, src2);
1820 masm->Assert(no_overflow, kSmiSubtractionOverflow);
1821 }
1822
1823
SmiSub(Register dst,Register src1,Register src2)1824 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1825 DCHECK(!dst.is(src2));
1826 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1827 }
1828
1829
SmiSub(Register dst,Register src1,const Operand & src2)1830 void MacroAssembler::SmiSub(Register dst,
1831 Register src1,
1832 const Operand& src2) {
1833 SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1834 }
1835
1836
SmiMul(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1837 void MacroAssembler::SmiMul(Register dst,
1838 Register src1,
1839 Register src2,
1840 Label* on_not_smi_result,
1841 Label::Distance near_jump) {
1842 DCHECK(!dst.is(src2));
1843 DCHECK(!dst.is(kScratchRegister));
1844 DCHECK(!src1.is(kScratchRegister));
1845 DCHECK(!src2.is(kScratchRegister));
1846
1847 if (dst.is(src1)) {
1848 Label failure, zero_correct_result;
1849 movp(kScratchRegister, src1); // Create backup for later testing.
1850 SmiToInteger64(dst, src1);
1851 imulp(dst, src2);
1852 j(overflow, &failure, Label::kNear);
1853
1854 // Check for negative zero result. If product is zero, and one
1855 // argument is negative, go to slow case.
1856 Label correct_result;
1857 testp(dst, dst);
1858 j(not_zero, &correct_result, Label::kNear);
1859
1860 movp(dst, kScratchRegister);
1861 xorp(dst, src2);
1862 // Result was positive zero.
1863 j(positive, &zero_correct_result, Label::kNear);
1864
1865 bind(&failure); // Reused failure exit, restores src1.
1866 movp(src1, kScratchRegister);
1867 jmp(on_not_smi_result, near_jump);
1868
1869 bind(&zero_correct_result);
1870 Set(dst, 0);
1871
1872 bind(&correct_result);
1873 } else {
1874 SmiToInteger64(dst, src1);
1875 imulp(dst, src2);
1876 j(overflow, on_not_smi_result, near_jump);
1877 // Check for negative zero result. If product is zero, and one
1878 // argument is negative, go to slow case.
1879 Label correct_result;
1880 testp(dst, dst);
1881 j(not_zero, &correct_result, Label::kNear);
1882 // One of src1 and src2 is zero, the check whether the other is
1883 // negative.
1884 movp(kScratchRegister, src1);
1885 xorp(kScratchRegister, src2);
1886 j(negative, on_not_smi_result, near_jump);
1887 bind(&correct_result);
1888 }
1889 }
1890
1891
SmiDiv(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1892 void MacroAssembler::SmiDiv(Register dst,
1893 Register src1,
1894 Register src2,
1895 Label* on_not_smi_result,
1896 Label::Distance near_jump) {
1897 DCHECK(!src1.is(kScratchRegister));
1898 DCHECK(!src2.is(kScratchRegister));
1899 DCHECK(!dst.is(kScratchRegister));
1900 DCHECK(!src2.is(rax));
1901 DCHECK(!src2.is(rdx));
1902 DCHECK(!src1.is(rdx));
1903
1904 // Check for 0 divisor (result is +/-Infinity).
1905 testp(src2, src2);
1906 j(zero, on_not_smi_result, near_jump);
1907
1908 if (src1.is(rax)) {
1909 movp(kScratchRegister, src1);
1910 }
1911 SmiToInteger32(rax, src1);
1912 // We need to rule out dividing Smi::kMinValue by -1, since that would
1913 // overflow in idiv and raise an exception.
1914 // We combine this with negative zero test (negative zero only happens
1915 // when dividing zero by a negative number).
1916
1917 // We overshoot a little and go to slow case if we divide min-value
1918 // by any negative value, not just -1.
1919 Label safe_div;
1920 testl(rax, Immediate(~Smi::kMinValue));
1921 j(not_zero, &safe_div, Label::kNear);
1922 testp(src2, src2);
1923 if (src1.is(rax)) {
1924 j(positive, &safe_div, Label::kNear);
1925 movp(src1, kScratchRegister);
1926 jmp(on_not_smi_result, near_jump);
1927 } else {
1928 j(negative, on_not_smi_result, near_jump);
1929 }
1930 bind(&safe_div);
1931
1932 SmiToInteger32(src2, src2);
1933 // Sign extend src1 into edx:eax.
1934 cdq();
1935 idivl(src2);
1936 Integer32ToSmi(src2, src2);
1937 // Check that the remainder is zero.
1938 testl(rdx, rdx);
1939 if (src1.is(rax)) {
1940 Label smi_result;
1941 j(zero, &smi_result, Label::kNear);
1942 movp(src1, kScratchRegister);
1943 jmp(on_not_smi_result, near_jump);
1944 bind(&smi_result);
1945 } else {
1946 j(not_zero, on_not_smi_result, near_jump);
1947 }
1948 if (!dst.is(src1) && src1.is(rax)) {
1949 movp(src1, kScratchRegister);
1950 }
1951 Integer32ToSmi(dst, rax);
1952 }
1953
1954
SmiMod(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1955 void MacroAssembler::SmiMod(Register dst,
1956 Register src1,
1957 Register src2,
1958 Label* on_not_smi_result,
1959 Label::Distance near_jump) {
1960 DCHECK(!dst.is(kScratchRegister));
1961 DCHECK(!src1.is(kScratchRegister));
1962 DCHECK(!src2.is(kScratchRegister));
1963 DCHECK(!src2.is(rax));
1964 DCHECK(!src2.is(rdx));
1965 DCHECK(!src1.is(rdx));
1966 DCHECK(!src1.is(src2));
1967
1968 testp(src2, src2);
1969 j(zero, on_not_smi_result, near_jump);
1970
1971 if (src1.is(rax)) {
1972 movp(kScratchRegister, src1);
1973 }
1974 SmiToInteger32(rax, src1);
1975 SmiToInteger32(src2, src2);
1976
1977 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1978 Label safe_div;
1979 cmpl(rax, Immediate(Smi::kMinValue));
1980 j(not_equal, &safe_div, Label::kNear);
1981 cmpl(src2, Immediate(-1));
1982 j(not_equal, &safe_div, Label::kNear);
1983 // Retag inputs and go slow case.
1984 Integer32ToSmi(src2, src2);
1985 if (src1.is(rax)) {
1986 movp(src1, kScratchRegister);
1987 }
1988 jmp(on_not_smi_result, near_jump);
1989 bind(&safe_div);
1990
1991 // Sign extend eax into edx:eax.
1992 cdq();
1993 idivl(src2);
1994 // Restore smi tags on inputs.
1995 Integer32ToSmi(src2, src2);
1996 if (src1.is(rax)) {
1997 movp(src1, kScratchRegister);
1998 }
1999 // Check for a negative zero result. If the result is zero, and the
2000 // dividend is negative, go slow to return a floating point negative zero.
2001 Label smi_result;
2002 testl(rdx, rdx);
2003 j(not_zero, &smi_result, Label::kNear);
2004 testp(src1, src1);
2005 j(negative, on_not_smi_result, near_jump);
2006 bind(&smi_result);
2007 Integer32ToSmi(dst, rdx);
2008 }
2009
2010
SmiNot(Register dst,Register src)2011 void MacroAssembler::SmiNot(Register dst, Register src) {
2012 DCHECK(!dst.is(kScratchRegister));
2013 DCHECK(!src.is(kScratchRegister));
2014 if (SmiValuesAre32Bits()) {
2015 // Set tag and padding bits before negating, so that they are zero
2016 // afterwards.
2017 movl(kScratchRegister, Immediate(~0));
2018 } else {
2019 DCHECK(SmiValuesAre31Bits());
2020 movl(kScratchRegister, Immediate(1));
2021 }
2022 if (dst.is(src)) {
2023 xorp(dst, kScratchRegister);
2024 } else {
2025 leap(dst, Operand(src, kScratchRegister, times_1, 0));
2026 }
2027 notp(dst);
2028 }
2029
2030
SmiAnd(Register dst,Register src1,Register src2)2031 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
2032 DCHECK(!dst.is(src2));
2033 if (!dst.is(src1)) {
2034 movp(dst, src1);
2035 }
2036 andp(dst, src2);
2037 }
2038
2039
SmiAndConstant(Register dst,Register src,Smi * constant)2040 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
2041 if (constant->value() == 0) {
2042 Set(dst, 0);
2043 } else if (dst.is(src)) {
2044 DCHECK(!dst.is(kScratchRegister));
2045 Register constant_reg = GetSmiConstant(constant);
2046 andp(dst, constant_reg);
2047 } else {
2048 LoadSmiConstant(dst, constant);
2049 andp(dst, src);
2050 }
2051 }
2052
2053
SmiOr(Register dst,Register src1,Register src2)2054 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
2055 if (!dst.is(src1)) {
2056 DCHECK(!src1.is(src2));
2057 movp(dst, src1);
2058 }
2059 orp(dst, src2);
2060 }
2061
2062
SmiOrConstant(Register dst,Register src,Smi * constant)2063 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
2064 if (dst.is(src)) {
2065 DCHECK(!dst.is(kScratchRegister));
2066 Register constant_reg = GetSmiConstant(constant);
2067 orp(dst, constant_reg);
2068 } else {
2069 LoadSmiConstant(dst, constant);
2070 orp(dst, src);
2071 }
2072 }
2073
2074
SmiXor(Register dst,Register src1,Register src2)2075 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2076 if (!dst.is(src1)) {
2077 DCHECK(!src1.is(src2));
2078 movp(dst, src1);
2079 }
2080 xorp(dst, src2);
2081 }
2082
2083
SmiXorConstant(Register dst,Register src,Smi * constant)2084 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2085 if (dst.is(src)) {
2086 DCHECK(!dst.is(kScratchRegister));
2087 Register constant_reg = GetSmiConstant(constant);
2088 xorp(dst, constant_reg);
2089 } else {
2090 LoadSmiConstant(dst, constant);
2091 xorp(dst, src);
2092 }
2093 }
2094
2095
SmiShiftArithmeticRightConstant(Register dst,Register src,int shift_value)2096 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2097 Register src,
2098 int shift_value) {
2099 DCHECK(is_uint5(shift_value));
2100 if (shift_value > 0) {
2101 if (dst.is(src)) {
2102 sarp(dst, Immediate(shift_value + kSmiShift));
2103 shlp(dst, Immediate(kSmiShift));
2104 } else {
2105 UNIMPLEMENTED(); // Not used.
2106 }
2107 }
2108 }
2109
2110
SmiShiftLeftConstant(Register dst,Register src,int shift_value,Label * on_not_smi_result,Label::Distance near_jump)2111 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2112 Register src,
2113 int shift_value,
2114 Label* on_not_smi_result,
2115 Label::Distance near_jump) {
2116 if (SmiValuesAre32Bits()) {
2117 if (!dst.is(src)) {
2118 movp(dst, src);
2119 }
2120 if (shift_value > 0) {
2121 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2122 shlq(dst, Immediate(shift_value & 0x1f));
2123 }
2124 } else {
2125 DCHECK(SmiValuesAre31Bits());
2126 if (dst.is(src)) {
2127 UNIMPLEMENTED(); // Not used.
2128 } else {
2129 SmiToInteger32(dst, src);
2130 shll(dst, Immediate(shift_value));
2131 JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
2132 Integer32ToSmi(dst, dst);
2133 }
2134 }
2135 }
2136
2137
SmiShiftLogicalRightConstant(Register dst,Register src,int shift_value,Label * on_not_smi_result,Label::Distance near_jump)2138 void MacroAssembler::SmiShiftLogicalRightConstant(
2139 Register dst, Register src, int shift_value,
2140 Label* on_not_smi_result, Label::Distance near_jump) {
2141 // Logic right shift interprets its result as an *unsigned* number.
2142 if (dst.is(src)) {
2143 UNIMPLEMENTED(); // Not used.
2144 } else {
2145 if (shift_value == 0) {
2146 testp(src, src);
2147 j(negative, on_not_smi_result, near_jump);
2148 }
2149 if (SmiValuesAre32Bits()) {
2150 movp(dst, src);
2151 shrp(dst, Immediate(shift_value + kSmiShift));
2152 shlp(dst, Immediate(kSmiShift));
2153 } else {
2154 DCHECK(SmiValuesAre31Bits());
2155 SmiToInteger32(dst, src);
2156 shrp(dst, Immediate(shift_value));
2157 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
2158 Integer32ToSmi(dst, dst);
2159 }
2160 }
2161 }
2162
2163
SmiShiftLeft(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2164 void MacroAssembler::SmiShiftLeft(Register dst,
2165 Register src1,
2166 Register src2,
2167 Label* on_not_smi_result,
2168 Label::Distance near_jump) {
2169 if (SmiValuesAre32Bits()) {
2170 DCHECK(!dst.is(rcx));
2171 if (!dst.is(src1)) {
2172 movp(dst, src1);
2173 }
2174 // Untag shift amount.
2175 SmiToInteger32(rcx, src2);
2176 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2177 andp(rcx, Immediate(0x1f));
2178 shlq_cl(dst);
2179 } else {
2180 DCHECK(SmiValuesAre31Bits());
2181 DCHECK(!dst.is(kScratchRegister));
2182 DCHECK(!src1.is(kScratchRegister));
2183 DCHECK(!src2.is(kScratchRegister));
2184 DCHECK(!dst.is(src2));
2185 DCHECK(!dst.is(rcx));
2186
2187 if (src1.is(rcx) || src2.is(rcx)) {
2188 movq(kScratchRegister, rcx);
2189 }
2190 if (dst.is(src1)) {
2191 UNIMPLEMENTED(); // Not used.
2192 } else {
2193 Label valid_result;
2194 SmiToInteger32(dst, src1);
2195 SmiToInteger32(rcx, src2);
2196 shll_cl(dst);
2197 JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2198 // As src1 or src2 could not be dst, we do not need to restore them for
2199 // clobbering dst.
2200 if (src1.is(rcx) || src2.is(rcx)) {
2201 if (src1.is(rcx)) {
2202 movq(src1, kScratchRegister);
2203 } else {
2204 movq(src2, kScratchRegister);
2205 }
2206 }
2207 jmp(on_not_smi_result, near_jump);
2208 bind(&valid_result);
2209 Integer32ToSmi(dst, dst);
2210 }
2211 }
2212 }
2213
2214
SmiShiftLogicalRight(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2215 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2216 Register src1,
2217 Register src2,
2218 Label* on_not_smi_result,
2219 Label::Distance near_jump) {
2220 DCHECK(!dst.is(kScratchRegister));
2221 DCHECK(!src1.is(kScratchRegister));
2222 DCHECK(!src2.is(kScratchRegister));
2223 DCHECK(!dst.is(src2));
2224 DCHECK(!dst.is(rcx));
2225 if (src1.is(rcx) || src2.is(rcx)) {
2226 movq(kScratchRegister, rcx);
2227 }
2228 if (dst.is(src1)) {
2229 UNIMPLEMENTED(); // Not used.
2230 } else {
2231 Label valid_result;
2232 SmiToInteger32(dst, src1);
2233 SmiToInteger32(rcx, src2);
2234 shrl_cl(dst);
2235 JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2236 // As src1 or src2 could not be dst, we do not need to restore them for
2237 // clobbering dst.
2238 if (src1.is(rcx) || src2.is(rcx)) {
2239 if (src1.is(rcx)) {
2240 movq(src1, kScratchRegister);
2241 } else {
2242 movq(src2, kScratchRegister);
2243 }
2244 }
2245 jmp(on_not_smi_result, near_jump);
2246 bind(&valid_result);
2247 Integer32ToSmi(dst, dst);
2248 }
2249 }
2250
2251
SmiShiftArithmeticRight(Register dst,Register src1,Register src2)2252 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2253 Register src1,
2254 Register src2) {
2255 DCHECK(!dst.is(kScratchRegister));
2256 DCHECK(!src1.is(kScratchRegister));
2257 DCHECK(!src2.is(kScratchRegister));
2258 DCHECK(!dst.is(rcx));
2259
2260 SmiToInteger32(rcx, src2);
2261 if (!dst.is(src1)) {
2262 movp(dst, src1);
2263 }
2264 SmiToInteger32(dst, dst);
2265 sarl_cl(dst);
2266 Integer32ToSmi(dst, dst);
2267 }
2268
2269
SelectNonSmi(Register dst,Register src1,Register src2,Label * on_not_smis,Label::Distance near_jump)2270 void MacroAssembler::SelectNonSmi(Register dst,
2271 Register src1,
2272 Register src2,
2273 Label* on_not_smis,
2274 Label::Distance near_jump) {
2275 DCHECK(!dst.is(kScratchRegister));
2276 DCHECK(!src1.is(kScratchRegister));
2277 DCHECK(!src2.is(kScratchRegister));
2278 DCHECK(!dst.is(src1));
2279 DCHECK(!dst.is(src2));
2280 // Both operands must not be smis.
2281 #ifdef DEBUG
2282 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2283 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2284 #endif
2285 STATIC_ASSERT(kSmiTag == 0);
2286 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
2287 movl(kScratchRegister, Immediate(kSmiTagMask));
2288 andp(kScratchRegister, src1);
2289 testl(kScratchRegister, src2);
2290 // If non-zero then both are smis.
2291 j(not_zero, on_not_smis, near_jump);
2292
2293 // Exactly one operand is a smi.
2294 DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2295 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2296 subp(kScratchRegister, Immediate(1));
2297 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2298 movp(dst, src1);
2299 xorp(dst, src2);
2300 andp(dst, kScratchRegister);
2301 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2302 xorp(dst, src1);
2303 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2304 }
2305
2306
SmiToIndex(Register dst,Register src,int shift)2307 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2308 Register src,
2309 int shift) {
2310 if (SmiValuesAre32Bits()) {
2311 DCHECK(is_uint6(shift));
2312 // There is a possible optimization if shift is in the range 60-63, but that
2313 // will (and must) never happen.
2314 if (!dst.is(src)) {
2315 movp(dst, src);
2316 }
2317 if (shift < kSmiShift) {
2318 sarp(dst, Immediate(kSmiShift - shift));
2319 } else {
2320 shlp(dst, Immediate(shift - kSmiShift));
2321 }
2322 return SmiIndex(dst, times_1);
2323 } else {
2324 DCHECK(SmiValuesAre31Bits());
2325 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2326 if (!dst.is(src)) {
2327 movp(dst, src);
2328 }
2329 // We have to sign extend the index register to 64-bit as the SMI might
2330 // be negative.
2331 movsxlq(dst, dst);
2332 if (shift == times_1) {
2333 sarq(dst, Immediate(kSmiShift));
2334 return SmiIndex(dst, times_1);
2335 }
2336 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2337 }
2338 }
2339
2340
SmiToNegativeIndex(Register dst,Register src,int shift)2341 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2342 Register src,
2343 int shift) {
2344 if (SmiValuesAre32Bits()) {
2345 // Register src holds a positive smi.
2346 DCHECK(is_uint6(shift));
2347 if (!dst.is(src)) {
2348 movp(dst, src);
2349 }
2350 negp(dst);
2351 if (shift < kSmiShift) {
2352 sarp(dst, Immediate(kSmiShift - shift));
2353 } else {
2354 shlp(dst, Immediate(shift - kSmiShift));
2355 }
2356 return SmiIndex(dst, times_1);
2357 } else {
2358 DCHECK(SmiValuesAre31Bits());
2359 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2360 if (!dst.is(src)) {
2361 movp(dst, src);
2362 }
2363 negq(dst);
2364 if (shift == times_1) {
2365 sarq(dst, Immediate(kSmiShift));
2366 return SmiIndex(dst, times_1);
2367 }
2368 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2369 }
2370 }
2371
2372
AddSmiField(Register dst,const Operand & src)2373 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2374 if (SmiValuesAre32Bits()) {
2375 DCHECK_EQ(0, kSmiShift % kBitsPerByte);
2376 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2377 } else {
2378 DCHECK(SmiValuesAre31Bits());
2379 SmiToInteger32(kScratchRegister, src);
2380 addl(dst, kScratchRegister);
2381 }
2382 }
2383
2384
Push(Smi * source)2385 void MacroAssembler::Push(Smi* source) {
2386 intptr_t smi = reinterpret_cast<intptr_t>(source);
2387 if (is_int32(smi)) {
2388 Push(Immediate(static_cast<int32_t>(smi)));
2389 } else {
2390 Register constant = GetSmiConstant(source);
2391 Push(constant);
2392 }
2393 }
2394
2395
PushRegisterAsTwoSmis(Register src,Register scratch)2396 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2397 DCHECK(!src.is(scratch));
2398 movp(scratch, src);
2399 // High bits.
2400 shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2401 shlp(src, Immediate(kSmiShift));
2402 Push(src);
2403 // Low bits.
2404 shlp(scratch, Immediate(kSmiShift));
2405 Push(scratch);
2406 }
2407
2408
PopRegisterAsTwoSmis(Register dst,Register scratch)2409 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2410 DCHECK(!dst.is(scratch));
2411 Pop(scratch);
2412 // Low bits.
2413 shrp(scratch, Immediate(kSmiShift));
2414 Pop(dst);
2415 shrp(dst, Immediate(kSmiShift));
2416 // High bits.
2417 shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2418 orp(dst, scratch);
2419 }
2420
2421
Test(const Operand & src,Smi * source)2422 void MacroAssembler::Test(const Operand& src, Smi* source) {
2423 if (SmiValuesAre32Bits()) {
2424 testl(Operand(src, kIntSize), Immediate(source->value()));
2425 } else {
2426 DCHECK(SmiValuesAre31Bits());
2427 testl(src, Immediate(source));
2428 }
2429 }
2430
2431
2432 // ----------------------------------------------------------------------------
2433
2434
JumpIfNotString(Register object,Register object_map,Label * not_string,Label::Distance near_jump)2435 void MacroAssembler::JumpIfNotString(Register object,
2436 Register object_map,
2437 Label* not_string,
2438 Label::Distance near_jump) {
2439 Condition is_smi = CheckSmi(object);
2440 j(is_smi, not_string, near_jump);
2441 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2442 j(above_equal, not_string, near_jump);
2443 }
2444
2445
JumpIfNotBothSequentialOneByteStrings(Register first_object,Register second_object,Register scratch1,Register scratch2,Label * on_fail,Label::Distance near_jump)2446 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
2447 Register first_object, Register second_object, Register scratch1,
2448 Register scratch2, Label* on_fail, Label::Distance near_jump) {
2449 // Check that both objects are not smis.
2450 Condition either_smi = CheckEitherSmi(first_object, second_object);
2451 j(either_smi, on_fail, near_jump);
2452
2453 // Load instance type for both strings.
2454 movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2455 movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2456 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2457 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2458
2459 // Check that both are flat one-byte strings.
2460 DCHECK(kNotStringTag != 0);
2461 const int kFlatOneByteStringMask =
2462 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2463 const int kFlatOneByteStringTag =
2464 kStringTag | kOneByteStringTag | kSeqStringTag;
2465
2466 andl(scratch1, Immediate(kFlatOneByteStringMask));
2467 andl(scratch2, Immediate(kFlatOneByteStringMask));
2468 // Interleave the bits to check both scratch1 and scratch2 in one test.
2469 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2470 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2471 cmpl(scratch1,
2472 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2473 j(not_equal, on_fail, near_jump);
2474 }
2475
2476
JumpIfInstanceTypeIsNotSequentialOneByte(Register instance_type,Register scratch,Label * failure,Label::Distance near_jump)2477 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
2478 Register instance_type, Register scratch, Label* failure,
2479 Label::Distance near_jump) {
2480 if (!scratch.is(instance_type)) {
2481 movl(scratch, instance_type);
2482 }
2483
2484 const int kFlatOneByteStringMask =
2485 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2486
2487 andl(scratch, Immediate(kFlatOneByteStringMask));
2488 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2489 j(not_equal, failure, near_jump);
2490 }
2491
2492
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first_object_instance_type,Register second_object_instance_type,Register scratch1,Register scratch2,Label * on_fail,Label::Distance near_jump)2493 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2494 Register first_object_instance_type, Register second_object_instance_type,
2495 Register scratch1, Register scratch2, Label* on_fail,
2496 Label::Distance near_jump) {
2497 // Load instance type for both strings.
2498 movp(scratch1, first_object_instance_type);
2499 movp(scratch2, second_object_instance_type);
2500
2501 // Check that both are flat one-byte strings.
2502 DCHECK(kNotStringTag != 0);
2503 const int kFlatOneByteStringMask =
2504 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2505 const int kFlatOneByteStringTag =
2506 kStringTag | kOneByteStringTag | kSeqStringTag;
2507
2508 andl(scratch1, Immediate(kFlatOneByteStringMask));
2509 andl(scratch2, Immediate(kFlatOneByteStringMask));
2510 // Interleave the bits to check both scratch1 and scratch2 in one test.
2511 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2512 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2513 cmpl(scratch1,
2514 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2515 j(not_equal, on_fail, near_jump);
2516 }
2517
2518
2519 template<class T>
JumpIfNotUniqueNameHelper(MacroAssembler * masm,T operand_or_register,Label * not_unique_name,Label::Distance distance)2520 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2521 T operand_or_register,
2522 Label* not_unique_name,
2523 Label::Distance distance) {
2524 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2525 Label succeed;
2526 masm->testb(operand_or_register,
2527 Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2528 masm->j(zero, &succeed, Label::kNear);
2529 masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2530 masm->j(not_equal, not_unique_name, distance);
2531
2532 masm->bind(&succeed);
2533 }
2534
2535
JumpIfNotUniqueNameInstanceType(Operand operand,Label * not_unique_name,Label::Distance distance)2536 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2537 Label* not_unique_name,
2538 Label::Distance distance) {
2539 JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2540 }
2541
2542
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name,Label::Distance distance)2543 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2544 Label* not_unique_name,
2545 Label::Distance distance) {
2546 JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2547 }
2548
2549
Move(Register dst,Register src)2550 void MacroAssembler::Move(Register dst, Register src) {
2551 if (!dst.is(src)) {
2552 movp(dst, src);
2553 }
2554 }
2555
2556
Move(Register dst,Handle<Object> source)2557 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2558 AllowDeferredHandleDereference smi_check;
2559 if (source->IsSmi()) {
2560 Move(dst, Smi::cast(*source));
2561 } else {
2562 MoveHeapObject(dst, source);
2563 }
2564 }
2565
2566
Move(const Operand & dst,Handle<Object> source)2567 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2568 AllowDeferredHandleDereference smi_check;
2569 if (source->IsSmi()) {
2570 Move(dst, Smi::cast(*source));
2571 } else {
2572 MoveHeapObject(kScratchRegister, source);
2573 movp(dst, kScratchRegister);
2574 }
2575 }
2576
2577
Move(XMMRegister dst,uint32_t src)2578 void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
2579 if (src == 0) {
2580 Xorpd(dst, dst);
2581 } else {
2582 unsigned pop = base::bits::CountPopulation32(src);
2583 DCHECK_NE(0u, pop);
2584 if (pop == 32) {
2585 Pcmpeqd(dst, dst);
2586 } else {
2587 movl(kScratchRegister, Immediate(src));
2588 Movq(dst, kScratchRegister);
2589 }
2590 }
2591 }
2592
2593
Move(XMMRegister dst,uint64_t src)2594 void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2595 if (src == 0) {
2596 Xorpd(dst, dst);
2597 } else {
2598 unsigned nlz = base::bits::CountLeadingZeros64(src);
2599 unsigned ntz = base::bits::CountTrailingZeros64(src);
2600 unsigned pop = base::bits::CountPopulation64(src);
2601 DCHECK_NE(0u, pop);
2602 if (pop == 64) {
2603 Pcmpeqd(dst, dst);
2604 } else if (pop + ntz == 64) {
2605 Pcmpeqd(dst, dst);
2606 Psllq(dst, ntz);
2607 } else if (pop + nlz == 64) {
2608 Pcmpeqd(dst, dst);
2609 Psrlq(dst, nlz);
2610 } else {
2611 uint32_t lower = static_cast<uint32_t>(src);
2612 uint32_t upper = static_cast<uint32_t>(src >> 32);
2613 if (upper == 0) {
2614 Move(dst, lower);
2615 } else {
2616 movq(kScratchRegister, src);
2617 Movq(dst, kScratchRegister);
2618 }
2619 }
2620 }
2621 }
2622
2623
Movaps(XMMRegister dst,XMMRegister src)2624 void MacroAssembler::Movaps(XMMRegister dst, XMMRegister src) {
2625 if (CpuFeatures::IsSupported(AVX)) {
2626 CpuFeatureScope scope(this, AVX);
2627 vmovaps(dst, src);
2628 } else {
2629 movaps(dst, src);
2630 }
2631 }
2632
2633
Movapd(XMMRegister dst,XMMRegister src)2634 void MacroAssembler::Movapd(XMMRegister dst, XMMRegister src) {
2635 if (CpuFeatures::IsSupported(AVX)) {
2636 CpuFeatureScope scope(this, AVX);
2637 vmovapd(dst, src);
2638 } else {
2639 movapd(dst, src);
2640 }
2641 }
2642
2643
Movsd(XMMRegister dst,XMMRegister src)2644 void MacroAssembler::Movsd(XMMRegister dst, XMMRegister src) {
2645 if (CpuFeatures::IsSupported(AVX)) {
2646 CpuFeatureScope scope(this, AVX);
2647 vmovsd(dst, dst, src);
2648 } else {
2649 movsd(dst, src);
2650 }
2651 }
2652
2653
Movsd(XMMRegister dst,const Operand & src)2654 void MacroAssembler::Movsd(XMMRegister dst, const Operand& src) {
2655 if (CpuFeatures::IsSupported(AVX)) {
2656 CpuFeatureScope scope(this, AVX);
2657 vmovsd(dst, src);
2658 } else {
2659 movsd(dst, src);
2660 }
2661 }
2662
2663
Movsd(const Operand & dst,XMMRegister src)2664 void MacroAssembler::Movsd(const Operand& dst, XMMRegister src) {
2665 if (CpuFeatures::IsSupported(AVX)) {
2666 CpuFeatureScope scope(this, AVX);
2667 vmovsd(dst, src);
2668 } else {
2669 movsd(dst, src);
2670 }
2671 }
2672
2673
Movss(XMMRegister dst,XMMRegister src)2674 void MacroAssembler::Movss(XMMRegister dst, XMMRegister src) {
2675 if (CpuFeatures::IsSupported(AVX)) {
2676 CpuFeatureScope scope(this, AVX);
2677 vmovss(dst, dst, src);
2678 } else {
2679 movss(dst, src);
2680 }
2681 }
2682
2683
Movss(XMMRegister dst,const Operand & src)2684 void MacroAssembler::Movss(XMMRegister dst, const Operand& src) {
2685 if (CpuFeatures::IsSupported(AVX)) {
2686 CpuFeatureScope scope(this, AVX);
2687 vmovss(dst, src);
2688 } else {
2689 movss(dst, src);
2690 }
2691 }
2692
2693
Movss(const Operand & dst,XMMRegister src)2694 void MacroAssembler::Movss(const Operand& dst, XMMRegister src) {
2695 if (CpuFeatures::IsSupported(AVX)) {
2696 CpuFeatureScope scope(this, AVX);
2697 vmovss(dst, src);
2698 } else {
2699 movss(dst, src);
2700 }
2701 }
2702
2703
Movd(XMMRegister dst,Register src)2704 void MacroAssembler::Movd(XMMRegister dst, Register src) {
2705 if (CpuFeatures::IsSupported(AVX)) {
2706 CpuFeatureScope scope(this, AVX);
2707 vmovd(dst, src);
2708 } else {
2709 movd(dst, src);
2710 }
2711 }
2712
2713
Movd(XMMRegister dst,const Operand & src)2714 void MacroAssembler::Movd(XMMRegister dst, const Operand& src) {
2715 if (CpuFeatures::IsSupported(AVX)) {
2716 CpuFeatureScope scope(this, AVX);
2717 vmovd(dst, src);
2718 } else {
2719 movd(dst, src);
2720 }
2721 }
2722
2723
Movd(Register dst,XMMRegister src)2724 void MacroAssembler::Movd(Register dst, XMMRegister src) {
2725 if (CpuFeatures::IsSupported(AVX)) {
2726 CpuFeatureScope scope(this, AVX);
2727 vmovd(dst, src);
2728 } else {
2729 movd(dst, src);
2730 }
2731 }
2732
2733
Movq(XMMRegister dst,Register src)2734 void MacroAssembler::Movq(XMMRegister dst, Register src) {
2735 if (CpuFeatures::IsSupported(AVX)) {
2736 CpuFeatureScope scope(this, AVX);
2737 vmovq(dst, src);
2738 } else {
2739 movq(dst, src);
2740 }
2741 }
2742
2743
Movq(Register dst,XMMRegister src)2744 void MacroAssembler::Movq(Register dst, XMMRegister src) {
2745 if (CpuFeatures::IsSupported(AVX)) {
2746 CpuFeatureScope scope(this, AVX);
2747 vmovq(dst, src);
2748 } else {
2749 movq(dst, src);
2750 }
2751 }
2752
2753
Movmskpd(Register dst,XMMRegister src)2754 void MacroAssembler::Movmskpd(Register dst, XMMRegister src) {
2755 if (CpuFeatures::IsSupported(AVX)) {
2756 CpuFeatureScope scope(this, AVX);
2757 vmovmskpd(dst, src);
2758 } else {
2759 movmskpd(dst, src);
2760 }
2761 }
2762
2763
Roundss(XMMRegister dst,XMMRegister src,RoundingMode mode)2764 void MacroAssembler::Roundss(XMMRegister dst, XMMRegister src,
2765 RoundingMode mode) {
2766 if (CpuFeatures::IsSupported(AVX)) {
2767 CpuFeatureScope scope(this, AVX);
2768 vroundss(dst, dst, src, mode);
2769 } else {
2770 roundss(dst, src, mode);
2771 }
2772 }
2773
2774
Roundsd(XMMRegister dst,XMMRegister src,RoundingMode mode)2775 void MacroAssembler::Roundsd(XMMRegister dst, XMMRegister src,
2776 RoundingMode mode) {
2777 if (CpuFeatures::IsSupported(AVX)) {
2778 CpuFeatureScope scope(this, AVX);
2779 vroundsd(dst, dst, src, mode);
2780 } else {
2781 roundsd(dst, src, mode);
2782 }
2783 }
2784
2785
Sqrtsd(XMMRegister dst,XMMRegister src)2786 void MacroAssembler::Sqrtsd(XMMRegister dst, XMMRegister src) {
2787 if (CpuFeatures::IsSupported(AVX)) {
2788 CpuFeatureScope scope(this, AVX);
2789 vsqrtsd(dst, dst, src);
2790 } else {
2791 sqrtsd(dst, src);
2792 }
2793 }
2794
2795
Sqrtsd(XMMRegister dst,const Operand & src)2796 void MacroAssembler::Sqrtsd(XMMRegister dst, const Operand& src) {
2797 if (CpuFeatures::IsSupported(AVX)) {
2798 CpuFeatureScope scope(this, AVX);
2799 vsqrtsd(dst, dst, src);
2800 } else {
2801 sqrtsd(dst, src);
2802 }
2803 }
2804
2805
Ucomiss(XMMRegister src1,XMMRegister src2)2806 void MacroAssembler::Ucomiss(XMMRegister src1, XMMRegister src2) {
2807 if (CpuFeatures::IsSupported(AVX)) {
2808 CpuFeatureScope scope(this, AVX);
2809 vucomiss(src1, src2);
2810 } else {
2811 ucomiss(src1, src2);
2812 }
2813 }
2814
2815
Ucomiss(XMMRegister src1,const Operand & src2)2816 void MacroAssembler::Ucomiss(XMMRegister src1, const Operand& src2) {
2817 if (CpuFeatures::IsSupported(AVX)) {
2818 CpuFeatureScope scope(this, AVX);
2819 vucomiss(src1, src2);
2820 } else {
2821 ucomiss(src1, src2);
2822 }
2823 }
2824
2825
Ucomisd(XMMRegister src1,XMMRegister src2)2826 void MacroAssembler::Ucomisd(XMMRegister src1, XMMRegister src2) {
2827 if (CpuFeatures::IsSupported(AVX)) {
2828 CpuFeatureScope scope(this, AVX);
2829 vucomisd(src1, src2);
2830 } else {
2831 ucomisd(src1, src2);
2832 }
2833 }
2834
2835
Ucomisd(XMMRegister src1,const Operand & src2)2836 void MacroAssembler::Ucomisd(XMMRegister src1, const Operand& src2) {
2837 if (CpuFeatures::IsSupported(AVX)) {
2838 CpuFeatureScope scope(this, AVX);
2839 vucomisd(src1, src2);
2840 } else {
2841 ucomisd(src1, src2);
2842 }
2843 }
2844
2845
Cmp(Register dst,Handle<Object> source)2846 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2847 AllowDeferredHandleDereference smi_check;
2848 if (source->IsSmi()) {
2849 Cmp(dst, Smi::cast(*source));
2850 } else {
2851 MoveHeapObject(kScratchRegister, source);
2852 cmpp(dst, kScratchRegister);
2853 }
2854 }
2855
2856
Cmp(const Operand & dst,Handle<Object> source)2857 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2858 AllowDeferredHandleDereference smi_check;
2859 if (source->IsSmi()) {
2860 Cmp(dst, Smi::cast(*source));
2861 } else {
2862 MoveHeapObject(kScratchRegister, source);
2863 cmpp(dst, kScratchRegister);
2864 }
2865 }
2866
2867
Push(Handle<Object> source)2868 void MacroAssembler::Push(Handle<Object> source) {
2869 AllowDeferredHandleDereference smi_check;
2870 if (source->IsSmi()) {
2871 Push(Smi::cast(*source));
2872 } else {
2873 MoveHeapObject(kScratchRegister, source);
2874 Push(kScratchRegister);
2875 }
2876 }
2877
2878
MoveHeapObject(Register result,Handle<Object> object)2879 void MacroAssembler::MoveHeapObject(Register result,
2880 Handle<Object> object) {
2881 AllowDeferredHandleDereference using_raw_address;
2882 DCHECK(object->IsHeapObject());
2883 if (isolate()->heap()->InNewSpace(*object)) {
2884 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2885 Move(result, cell, RelocInfo::CELL);
2886 movp(result, Operand(result, 0));
2887 } else {
2888 Move(result, object, RelocInfo::EMBEDDED_OBJECT);
2889 }
2890 }
2891
2892
LoadGlobalCell(Register dst,Handle<Cell> cell)2893 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2894 if (dst.is(rax)) {
2895 AllowDeferredHandleDereference embedding_raw_address;
2896 load_rax(cell.location(), RelocInfo::CELL);
2897 } else {
2898 Move(dst, cell, RelocInfo::CELL);
2899 movp(dst, Operand(dst, 0));
2900 }
2901 }
2902
2903
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch)2904 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2905 Register scratch) {
2906 Move(scratch, cell, RelocInfo::EMBEDDED_OBJECT);
2907 cmpp(value, FieldOperand(scratch, WeakCell::kValueOffset));
2908 }
2909
2910
GetWeakValue(Register value,Handle<WeakCell> cell)2911 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2912 Move(value, cell, RelocInfo::EMBEDDED_OBJECT);
2913 movp(value, FieldOperand(value, WeakCell::kValueOffset));
2914 }
2915
2916
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)2917 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2918 Label* miss) {
2919 GetWeakValue(value, cell);
2920 JumpIfSmi(value, miss);
2921 }
2922
2923
Drop(int stack_elements)2924 void MacroAssembler::Drop(int stack_elements) {
2925 if (stack_elements > 0) {
2926 addp(rsp, Immediate(stack_elements * kPointerSize));
2927 }
2928 }
2929
2930
DropUnderReturnAddress(int stack_elements,Register scratch)2931 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
2932 Register scratch) {
2933 DCHECK(stack_elements > 0);
2934 if (kPointerSize == kInt64Size && stack_elements == 1) {
2935 popq(MemOperand(rsp, 0));
2936 return;
2937 }
2938
2939 PopReturnAddressTo(scratch);
2940 Drop(stack_elements);
2941 PushReturnAddressFrom(scratch);
2942 }
2943
2944
Push(Register src)2945 void MacroAssembler::Push(Register src) {
2946 if (kPointerSize == kInt64Size) {
2947 pushq(src);
2948 } else {
2949 // x32 uses 64-bit push for rbp in the prologue.
2950 DCHECK(src.code() != rbp.code());
2951 leal(rsp, Operand(rsp, -4));
2952 movp(Operand(rsp, 0), src);
2953 }
2954 }
2955
2956
Push(const Operand & src)2957 void MacroAssembler::Push(const Operand& src) {
2958 if (kPointerSize == kInt64Size) {
2959 pushq(src);
2960 } else {
2961 movp(kScratchRegister, src);
2962 leal(rsp, Operand(rsp, -4));
2963 movp(Operand(rsp, 0), kScratchRegister);
2964 }
2965 }
2966
2967
PushQuad(const Operand & src)2968 void MacroAssembler::PushQuad(const Operand& src) {
2969 if (kPointerSize == kInt64Size) {
2970 pushq(src);
2971 } else {
2972 movp(kScratchRegister, src);
2973 pushq(kScratchRegister);
2974 }
2975 }
2976
2977
Push(Immediate value)2978 void MacroAssembler::Push(Immediate value) {
2979 if (kPointerSize == kInt64Size) {
2980 pushq(value);
2981 } else {
2982 leal(rsp, Operand(rsp, -4));
2983 movp(Operand(rsp, 0), value);
2984 }
2985 }
2986
2987
PushImm32(int32_t imm32)2988 void MacroAssembler::PushImm32(int32_t imm32) {
2989 if (kPointerSize == kInt64Size) {
2990 pushq_imm32(imm32);
2991 } else {
2992 leal(rsp, Operand(rsp, -4));
2993 movp(Operand(rsp, 0), Immediate(imm32));
2994 }
2995 }
2996
2997
Pop(Register dst)2998 void MacroAssembler::Pop(Register dst) {
2999 if (kPointerSize == kInt64Size) {
3000 popq(dst);
3001 } else {
3002 // x32 uses 64-bit pop for rbp in the epilogue.
3003 DCHECK(dst.code() != rbp.code());
3004 movp(dst, Operand(rsp, 0));
3005 leal(rsp, Operand(rsp, 4));
3006 }
3007 }
3008
3009
Pop(const Operand & dst)3010 void MacroAssembler::Pop(const Operand& dst) {
3011 if (kPointerSize == kInt64Size) {
3012 popq(dst);
3013 } else {
3014 Register scratch = dst.AddressUsesRegister(kScratchRegister)
3015 ? kRootRegister : kScratchRegister;
3016 movp(scratch, Operand(rsp, 0));
3017 movp(dst, scratch);
3018 leal(rsp, Operand(rsp, 4));
3019 if (scratch.is(kRootRegister)) {
3020 // Restore kRootRegister.
3021 InitializeRootRegister();
3022 }
3023 }
3024 }
3025
3026
PopQuad(const Operand & dst)3027 void MacroAssembler::PopQuad(const Operand& dst) {
3028 if (kPointerSize == kInt64Size) {
3029 popq(dst);
3030 } else {
3031 popq(kScratchRegister);
3032 movp(dst, kScratchRegister);
3033 }
3034 }
3035
3036
LoadSharedFunctionInfoSpecialField(Register dst,Register base,int offset)3037 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
3038 Register base,
3039 int offset) {
3040 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
3041 offset <= SharedFunctionInfo::kSize &&
3042 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
3043 if (kPointerSize == kInt64Size) {
3044 movsxlq(dst, FieldOperand(base, offset));
3045 } else {
3046 movp(dst, FieldOperand(base, offset));
3047 SmiToInteger32(dst, dst);
3048 }
3049 }
3050
3051
TestBitSharedFunctionInfoSpecialField(Register base,int offset,int bits)3052 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
3053 int offset,
3054 int bits) {
3055 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
3056 offset <= SharedFunctionInfo::kSize &&
3057 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
3058 if (kPointerSize == kInt32Size) {
3059 // On x32, this field is represented by SMI.
3060 bits += kSmiShift;
3061 }
3062 int byte_offset = bits / kBitsPerByte;
3063 int bit_in_byte = bits & (kBitsPerByte - 1);
3064 testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
3065 }
3066
3067
Jump(ExternalReference ext)3068 void MacroAssembler::Jump(ExternalReference ext) {
3069 LoadAddress(kScratchRegister, ext);
3070 jmp(kScratchRegister);
3071 }
3072
3073
Jump(const Operand & op)3074 void MacroAssembler::Jump(const Operand& op) {
3075 if (kPointerSize == kInt64Size) {
3076 jmp(op);
3077 } else {
3078 movp(kScratchRegister, op);
3079 jmp(kScratchRegister);
3080 }
3081 }
3082
3083
Jump(Address destination,RelocInfo::Mode rmode)3084 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
3085 Move(kScratchRegister, destination, rmode);
3086 jmp(kScratchRegister);
3087 }
3088
3089
Jump(Handle<Code> code_object,RelocInfo::Mode rmode)3090 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
3091 // TODO(X64): Inline this
3092 jmp(code_object, rmode);
3093 }
3094
3095
CallSize(ExternalReference ext)3096 int MacroAssembler::CallSize(ExternalReference ext) {
3097 // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
3098 return LoadAddressSize(ext) +
3099 Assembler::kCallScratchRegisterInstructionLength;
3100 }
3101
3102
Call(ExternalReference ext)3103 void MacroAssembler::Call(ExternalReference ext) {
3104 #ifdef DEBUG
3105 int end_position = pc_offset() + CallSize(ext);
3106 #endif
3107 LoadAddress(kScratchRegister, ext);
3108 call(kScratchRegister);
3109 #ifdef DEBUG
3110 CHECK_EQ(end_position, pc_offset());
3111 #endif
3112 }
3113
3114
Call(const Operand & op)3115 void MacroAssembler::Call(const Operand& op) {
3116 if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
3117 call(op);
3118 } else {
3119 movp(kScratchRegister, op);
3120 call(kScratchRegister);
3121 }
3122 }
3123
3124
Call(Address destination,RelocInfo::Mode rmode)3125 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
3126 #ifdef DEBUG
3127 int end_position = pc_offset() + CallSize(destination);
3128 #endif
3129 Move(kScratchRegister, destination, rmode);
3130 call(kScratchRegister);
3131 #ifdef DEBUG
3132 CHECK_EQ(pc_offset(), end_position);
3133 #endif
3134 }
3135
3136
Call(Handle<Code> code_object,RelocInfo::Mode rmode,TypeFeedbackId ast_id)3137 void MacroAssembler::Call(Handle<Code> code_object,
3138 RelocInfo::Mode rmode,
3139 TypeFeedbackId ast_id) {
3140 #ifdef DEBUG
3141 int end_position = pc_offset() + CallSize(code_object);
3142 #endif
3143 DCHECK(RelocInfo::IsCodeTarget(rmode) ||
3144 rmode == RelocInfo::CODE_AGE_SEQUENCE);
3145 call(code_object, rmode, ast_id);
3146 #ifdef DEBUG
3147 CHECK_EQ(end_position, pc_offset());
3148 #endif
3149 }
3150
3151
Pextrd(Register dst,XMMRegister src,int8_t imm8)3152 void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
3153 if (imm8 == 0) {
3154 Movd(dst, src);
3155 return;
3156 }
3157 DCHECK_EQ(1, imm8);
3158 if (CpuFeatures::IsSupported(SSE4_1)) {
3159 CpuFeatureScope sse_scope(this, SSE4_1);
3160 pextrd(dst, src, imm8);
3161 return;
3162 }
3163 movq(dst, src);
3164 shrq(dst, Immediate(32));
3165 }
3166
3167
Pinsrd(XMMRegister dst,Register src,int8_t imm8)3168 void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
3169 if (CpuFeatures::IsSupported(SSE4_1)) {
3170 CpuFeatureScope sse_scope(this, SSE4_1);
3171 pinsrd(dst, src, imm8);
3172 return;
3173 }
3174 Movd(xmm0, src);
3175 if (imm8 == 1) {
3176 punpckldq(dst, xmm0);
3177 } else {
3178 DCHECK_EQ(0, imm8);
3179 Movss(dst, xmm0);
3180 }
3181 }
3182
3183
Pinsrd(XMMRegister dst,const Operand & src,int8_t imm8)3184 void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
3185 DCHECK(imm8 == 0 || imm8 == 1);
3186 if (CpuFeatures::IsSupported(SSE4_1)) {
3187 CpuFeatureScope sse_scope(this, SSE4_1);
3188 pinsrd(dst, src, imm8);
3189 return;
3190 }
3191 Movd(xmm0, src);
3192 if (imm8 == 1) {
3193 punpckldq(dst, xmm0);
3194 } else {
3195 DCHECK_EQ(0, imm8);
3196 Movss(dst, xmm0);
3197 }
3198 }
3199
3200
Lzcntl(Register dst,Register src)3201 void MacroAssembler::Lzcntl(Register dst, Register src) {
3202 if (CpuFeatures::IsSupported(LZCNT)) {
3203 CpuFeatureScope scope(this, LZCNT);
3204 lzcntl(dst, src);
3205 return;
3206 }
3207 Label not_zero_src;
3208 bsrl(dst, src);
3209 j(not_zero, ¬_zero_src, Label::kNear);
3210 Set(dst, 63); // 63^31 == 32
3211 bind(¬_zero_src);
3212 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
3213 }
3214
3215
Lzcntl(Register dst,const Operand & src)3216 void MacroAssembler::Lzcntl(Register dst, const Operand& src) {
3217 if (CpuFeatures::IsSupported(LZCNT)) {
3218 CpuFeatureScope scope(this, LZCNT);
3219 lzcntl(dst, src);
3220 return;
3221 }
3222 Label not_zero_src;
3223 bsrl(dst, src);
3224 j(not_zero, ¬_zero_src, Label::kNear);
3225 Set(dst, 63); // 63^31 == 32
3226 bind(¬_zero_src);
3227 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
3228 }
3229
3230
Lzcntq(Register dst,Register src)3231 void MacroAssembler::Lzcntq(Register dst, Register src) {
3232 if (CpuFeatures::IsSupported(LZCNT)) {
3233 CpuFeatureScope scope(this, LZCNT);
3234 lzcntq(dst, src);
3235 return;
3236 }
3237 Label not_zero_src;
3238 bsrq(dst, src);
3239 j(not_zero, ¬_zero_src, Label::kNear);
3240 Set(dst, 127); // 127^63 == 64
3241 bind(¬_zero_src);
3242 xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
3243 }
3244
3245
Lzcntq(Register dst,const Operand & src)3246 void MacroAssembler::Lzcntq(Register dst, const Operand& src) {
3247 if (CpuFeatures::IsSupported(LZCNT)) {
3248 CpuFeatureScope scope(this, LZCNT);
3249 lzcntq(dst, src);
3250 return;
3251 }
3252 Label not_zero_src;
3253 bsrq(dst, src);
3254 j(not_zero, ¬_zero_src, Label::kNear);
3255 Set(dst, 127); // 127^63 == 64
3256 bind(¬_zero_src);
3257 xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
3258 }
3259
3260
Tzcntq(Register dst,Register src)3261 void MacroAssembler::Tzcntq(Register dst, Register src) {
3262 if (CpuFeatures::IsSupported(BMI1)) {
3263 CpuFeatureScope scope(this, BMI1);
3264 tzcntq(dst, src);
3265 return;
3266 }
3267 Label not_zero_src;
3268 bsfq(dst, src);
3269 j(not_zero, ¬_zero_src, Label::kNear);
3270 // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
3271 Set(dst, 64);
3272 bind(¬_zero_src);
3273 }
3274
3275
Tzcntq(Register dst,const Operand & src)3276 void MacroAssembler::Tzcntq(Register dst, const Operand& src) {
3277 if (CpuFeatures::IsSupported(BMI1)) {
3278 CpuFeatureScope scope(this, BMI1);
3279 tzcntq(dst, src);
3280 return;
3281 }
3282 Label not_zero_src;
3283 bsfq(dst, src);
3284 j(not_zero, ¬_zero_src, Label::kNear);
3285 // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
3286 Set(dst, 64);
3287 bind(¬_zero_src);
3288 }
3289
3290
Tzcntl(Register dst,Register src)3291 void MacroAssembler::Tzcntl(Register dst, Register src) {
3292 if (CpuFeatures::IsSupported(BMI1)) {
3293 CpuFeatureScope scope(this, BMI1);
3294 tzcntl(dst, src);
3295 return;
3296 }
3297 Label not_zero_src;
3298 bsfl(dst, src);
3299 j(not_zero, ¬_zero_src, Label::kNear);
3300 Set(dst, 32); // The result of tzcnt is 32 if src = 0.
3301 bind(¬_zero_src);
3302 }
3303
3304
Tzcntl(Register dst,const Operand & src)3305 void MacroAssembler::Tzcntl(Register dst, const Operand& src) {
3306 if (CpuFeatures::IsSupported(BMI1)) {
3307 CpuFeatureScope scope(this, BMI1);
3308 tzcntl(dst, src);
3309 return;
3310 }
3311 Label not_zero_src;
3312 bsfl(dst, src);
3313 j(not_zero, ¬_zero_src, Label::kNear);
3314 Set(dst, 32); // The result of tzcnt is 32 if src = 0.
3315 bind(¬_zero_src);
3316 }
3317
3318
Popcntl(Register dst,Register src)3319 void MacroAssembler::Popcntl(Register dst, Register src) {
3320 if (CpuFeatures::IsSupported(POPCNT)) {
3321 CpuFeatureScope scope(this, POPCNT);
3322 popcntl(dst, src);
3323 return;
3324 }
3325 UNREACHABLE();
3326 }
3327
3328
Popcntl(Register dst,const Operand & src)3329 void MacroAssembler::Popcntl(Register dst, const Operand& src) {
3330 if (CpuFeatures::IsSupported(POPCNT)) {
3331 CpuFeatureScope scope(this, POPCNT);
3332 popcntl(dst, src);
3333 return;
3334 }
3335 UNREACHABLE();
3336 }
3337
3338
Popcntq(Register dst,Register src)3339 void MacroAssembler::Popcntq(Register dst, Register src) {
3340 if (CpuFeatures::IsSupported(POPCNT)) {
3341 CpuFeatureScope scope(this, POPCNT);
3342 popcntq(dst, src);
3343 return;
3344 }
3345 UNREACHABLE();
3346 }
3347
3348
Popcntq(Register dst,const Operand & src)3349 void MacroAssembler::Popcntq(Register dst, const Operand& src) {
3350 if (CpuFeatures::IsSupported(POPCNT)) {
3351 CpuFeatureScope scope(this, POPCNT);
3352 popcntq(dst, src);
3353 return;
3354 }
3355 UNREACHABLE();
3356 }
3357
3358
Pushad()3359 void MacroAssembler::Pushad() {
3360 Push(rax);
3361 Push(rcx);
3362 Push(rdx);
3363 Push(rbx);
3364 // Not pushing rsp or rbp.
3365 Push(rsi);
3366 Push(rdi);
3367 Push(r8);
3368 Push(r9);
3369 // r10 is kScratchRegister.
3370 Push(r11);
3371 Push(r12);
3372 // r13 is kRootRegister.
3373 Push(r14);
3374 Push(r15);
3375 STATIC_ASSERT(12 == kNumSafepointSavedRegisters);
3376 // Use lea for symmetry with Popad.
3377 int sp_delta =
3378 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3379 leap(rsp, Operand(rsp, -sp_delta));
3380 }
3381
3382
Popad()3383 void MacroAssembler::Popad() {
3384 // Popad must not change the flags, so use lea instead of addq.
3385 int sp_delta =
3386 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3387 leap(rsp, Operand(rsp, sp_delta));
3388 Pop(r15);
3389 Pop(r14);
3390 Pop(r12);
3391 Pop(r11);
3392 Pop(r9);
3393 Pop(r8);
3394 Pop(rdi);
3395 Pop(rsi);
3396 Pop(rbx);
3397 Pop(rdx);
3398 Pop(rcx);
3399 Pop(rax);
3400 }
3401
3402
Dropad()3403 void MacroAssembler::Dropad() {
3404 addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
3405 }
3406
3407
3408 // Order general registers are pushed by Pushad:
3409 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
3410 const int
3411 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
3412 0,
3413 1,
3414 2,
3415 3,
3416 -1,
3417 -1,
3418 4,
3419 5,
3420 6,
3421 7,
3422 -1,
3423 8,
3424 9,
3425 -1,
3426 10,
3427 11
3428 };
3429
3430
StoreToSafepointRegisterSlot(Register dst,const Immediate & imm)3431 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
3432 const Immediate& imm) {
3433 movp(SafepointRegisterSlot(dst), imm);
3434 }
3435
3436
StoreToSafepointRegisterSlot(Register dst,Register src)3437 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
3438 movp(SafepointRegisterSlot(dst), src);
3439 }
3440
3441
LoadFromSafepointRegisterSlot(Register dst,Register src)3442 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
3443 movp(dst, SafepointRegisterSlot(src));
3444 }
3445
3446
SafepointRegisterSlot(Register reg)3447 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
3448 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3449 }
3450
3451
PushStackHandler()3452 void MacroAssembler::PushStackHandler() {
3453 // Adjust this code if not the case.
3454 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3455 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3456
3457 // Link the current handler as the next handler.
3458 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3459 Push(ExternalOperand(handler_address));
3460
3461 // Set this new handler as the current one.
3462 movp(ExternalOperand(handler_address), rsp);
3463 }
3464
3465
PopStackHandler()3466 void MacroAssembler::PopStackHandler() {
3467 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3468 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3469 Pop(ExternalOperand(handler_address));
3470 addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3471 }
3472
3473
Ret()3474 void MacroAssembler::Ret() {
3475 ret(0);
3476 }
3477
3478
Ret(int bytes_dropped,Register scratch)3479 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3480 if (is_uint16(bytes_dropped)) {
3481 ret(bytes_dropped);
3482 } else {
3483 PopReturnAddressTo(scratch);
3484 addp(rsp, Immediate(bytes_dropped));
3485 PushReturnAddressFrom(scratch);
3486 ret(0);
3487 }
3488 }
3489
3490
FCmp()3491 void MacroAssembler::FCmp() {
3492 fucomip();
3493 fstp(0);
3494 }
3495
3496
CmpObjectType(Register heap_object,InstanceType type,Register map)3497 void MacroAssembler::CmpObjectType(Register heap_object,
3498 InstanceType type,
3499 Register map) {
3500 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3501 CmpInstanceType(map, type);
3502 }
3503
3504
CmpInstanceType(Register map,InstanceType type)3505 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3506 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3507 Immediate(static_cast<int8_t>(type)));
3508 }
3509
3510
CheckFastElements(Register map,Label * fail,Label::Distance distance)3511 void MacroAssembler::CheckFastElements(Register map,
3512 Label* fail,
3513 Label::Distance distance) {
3514 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3515 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3516 STATIC_ASSERT(FAST_ELEMENTS == 2);
3517 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3518 cmpb(FieldOperand(map, Map::kBitField2Offset),
3519 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3520 j(above, fail, distance);
3521 }
3522
3523
CheckFastObjectElements(Register map,Label * fail,Label::Distance distance)3524 void MacroAssembler::CheckFastObjectElements(Register map,
3525 Label* fail,
3526 Label::Distance distance) {
3527 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3528 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3529 STATIC_ASSERT(FAST_ELEMENTS == 2);
3530 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3531 cmpb(FieldOperand(map, Map::kBitField2Offset),
3532 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3533 j(below_equal, fail, distance);
3534 cmpb(FieldOperand(map, Map::kBitField2Offset),
3535 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3536 j(above, fail, distance);
3537 }
3538
3539
CheckFastSmiElements(Register map,Label * fail,Label::Distance distance)3540 void MacroAssembler::CheckFastSmiElements(Register map,
3541 Label* fail,
3542 Label::Distance distance) {
3543 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3544 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3545 cmpb(FieldOperand(map, Map::kBitField2Offset),
3546 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3547 j(above, fail, distance);
3548 }
3549
3550
StoreNumberToDoubleElements(Register maybe_number,Register elements,Register index,XMMRegister xmm_scratch,Label * fail,int elements_offset)3551 void MacroAssembler::StoreNumberToDoubleElements(
3552 Register maybe_number,
3553 Register elements,
3554 Register index,
3555 XMMRegister xmm_scratch,
3556 Label* fail,
3557 int elements_offset) {
3558 Label smi_value, done;
3559
3560 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3561
3562 CheckMap(maybe_number,
3563 isolate()->factory()->heap_number_map(),
3564 fail,
3565 DONT_DO_SMI_CHECK);
3566
3567 // Double value, turn potential sNaN into qNaN.
3568 Move(xmm_scratch, 1.0);
3569 mulsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3570 jmp(&done, Label::kNear);
3571
3572 bind(&smi_value);
3573 // Value is a smi. convert to a double and store.
3574 // Preserve original value.
3575 SmiToInteger32(kScratchRegister, maybe_number);
3576 Cvtlsi2sd(xmm_scratch, kScratchRegister);
3577 bind(&done);
3578 Movsd(FieldOperand(elements, index, times_8,
3579 FixedDoubleArray::kHeaderSize - elements_offset),
3580 xmm_scratch);
3581 }
3582
3583
CompareMap(Register obj,Handle<Map> map)3584 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3585 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3586 }
3587
3588
CheckMap(Register obj,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)3589 void MacroAssembler::CheckMap(Register obj,
3590 Handle<Map> map,
3591 Label* fail,
3592 SmiCheckType smi_check_type) {
3593 if (smi_check_type == DO_SMI_CHECK) {
3594 JumpIfSmi(obj, fail);
3595 }
3596
3597 CompareMap(obj, map);
3598 j(not_equal, fail);
3599 }
3600
3601
ClampUint8(Register reg)3602 void MacroAssembler::ClampUint8(Register reg) {
3603 Label done;
3604 testl(reg, Immediate(0xFFFFFF00));
3605 j(zero, &done, Label::kNear);
3606 setcc(negative, reg); // 1 if negative, 0 if positive.
3607 decb(reg); // 0 if negative, 255 if positive.
3608 bind(&done);
3609 }
3610
3611
ClampDoubleToUint8(XMMRegister input_reg,XMMRegister temp_xmm_reg,Register result_reg)3612 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3613 XMMRegister temp_xmm_reg,
3614 Register result_reg) {
3615 Label done;
3616 Label conv_failure;
3617 Xorpd(temp_xmm_reg, temp_xmm_reg);
3618 Cvtsd2si(result_reg, input_reg);
3619 testl(result_reg, Immediate(0xFFFFFF00));
3620 j(zero, &done, Label::kNear);
3621 cmpl(result_reg, Immediate(1));
3622 j(overflow, &conv_failure, Label::kNear);
3623 movl(result_reg, Immediate(0));
3624 setcc(sign, result_reg);
3625 subl(result_reg, Immediate(1));
3626 andl(result_reg, Immediate(255));
3627 jmp(&done, Label::kNear);
3628 bind(&conv_failure);
3629 Set(result_reg, 0);
3630 Ucomisd(input_reg, temp_xmm_reg);
3631 j(below, &done, Label::kNear);
3632 Set(result_reg, 255);
3633 bind(&done);
3634 }
3635
3636
LoadUint32(XMMRegister dst,Register src)3637 void MacroAssembler::LoadUint32(XMMRegister dst,
3638 Register src) {
3639 if (FLAG_debug_code) {
3640 cmpq(src, Immediate(0xffffffff));
3641 Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3642 }
3643 Cvtqsi2sd(dst, src);
3644 }
3645
3646
SlowTruncateToI(Register result_reg,Register input_reg,int offset)3647 void MacroAssembler::SlowTruncateToI(Register result_reg,
3648 Register input_reg,
3649 int offset) {
3650 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3651 call(stub.GetCode(), RelocInfo::CODE_TARGET);
3652 }
3653
3654
TruncateHeapNumberToI(Register result_reg,Register input_reg)3655 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3656 Register input_reg) {
3657 Label done;
3658 Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3659 Cvttsd2siq(result_reg, xmm0);
3660 cmpq(result_reg, Immediate(1));
3661 j(no_overflow, &done, Label::kNear);
3662
3663 // Slow case.
3664 if (input_reg.is(result_reg)) {
3665 subp(rsp, Immediate(kDoubleSize));
3666 Movsd(MemOperand(rsp, 0), xmm0);
3667 SlowTruncateToI(result_reg, rsp, 0);
3668 addp(rsp, Immediate(kDoubleSize));
3669 } else {
3670 SlowTruncateToI(result_reg, input_reg);
3671 }
3672
3673 bind(&done);
3674 // Keep our invariant that the upper 32 bits are zero.
3675 movl(result_reg, result_reg);
3676 }
3677
3678
TruncateDoubleToI(Register result_reg,XMMRegister input_reg)3679 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3680 XMMRegister input_reg) {
3681 Label done;
3682 Cvttsd2siq(result_reg, input_reg);
3683 cmpq(result_reg, Immediate(1));
3684 j(no_overflow, &done, Label::kNear);
3685
3686 subp(rsp, Immediate(kDoubleSize));
3687 Movsd(MemOperand(rsp, 0), input_reg);
3688 SlowTruncateToI(result_reg, rsp, 0);
3689 addp(rsp, Immediate(kDoubleSize));
3690
3691 bind(&done);
3692 // Keep our invariant that the upper 32 bits are zero.
3693 movl(result_reg, result_reg);
3694 }
3695
3696
DoubleToI(Register result_reg,XMMRegister input_reg,XMMRegister scratch,MinusZeroMode minus_zero_mode,Label * lost_precision,Label * is_nan,Label * minus_zero,Label::Distance dst)3697 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
3698 XMMRegister scratch,
3699 MinusZeroMode minus_zero_mode,
3700 Label* lost_precision, Label* is_nan,
3701 Label* minus_zero, Label::Distance dst) {
3702 Cvttsd2si(result_reg, input_reg);
3703 Cvtlsi2sd(xmm0, result_reg);
3704 Ucomisd(xmm0, input_reg);
3705 j(not_equal, lost_precision, dst);
3706 j(parity_even, is_nan, dst); // NaN.
3707 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3708 Label done;
3709 // The integer converted back is equal to the original. We
3710 // only have to test if we got -0 as an input.
3711 testl(result_reg, result_reg);
3712 j(not_zero, &done, Label::kNear);
3713 Movmskpd(result_reg, input_reg);
3714 // Bit 0 contains the sign of the double in input_reg.
3715 // If input was positive, we are ok and return 0, otherwise
3716 // jump to minus_zero.
3717 andl(result_reg, Immediate(1));
3718 j(not_zero, minus_zero, dst);
3719 bind(&done);
3720 }
3721 }
3722
3723
LoadInstanceDescriptors(Register map,Register descriptors)3724 void MacroAssembler::LoadInstanceDescriptors(Register map,
3725 Register descriptors) {
3726 movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3727 }
3728
3729
NumberOfOwnDescriptors(Register dst,Register map)3730 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3731 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3732 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3733 }
3734
3735
EnumLength(Register dst,Register map)3736 void MacroAssembler::EnumLength(Register dst, Register map) {
3737 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3738 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3739 andl(dst, Immediate(Map::EnumLengthBits::kMask));
3740 Integer32ToSmi(dst, dst);
3741 }
3742
3743
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)3744 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3745 int accessor_index,
3746 AccessorComponent accessor) {
3747 movp(dst, FieldOperand(holder, HeapObject::kMapOffset));
3748 LoadInstanceDescriptors(dst, dst);
3749 movp(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3750 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
3751 : AccessorPair::kSetterOffset;
3752 movp(dst, FieldOperand(dst, offset));
3753 }
3754
3755
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)3756 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3757 Register scratch2, Handle<WeakCell> cell,
3758 Handle<Code> success,
3759 SmiCheckType smi_check_type) {
3760 Label fail;
3761 if (smi_check_type == DO_SMI_CHECK) {
3762 JumpIfSmi(obj, &fail);
3763 }
3764 movq(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
3765 CmpWeakValue(scratch1, cell, scratch2);
3766 j(equal, success, RelocInfo::CODE_TARGET);
3767 bind(&fail);
3768 }
3769
3770
AssertNumber(Register object)3771 void MacroAssembler::AssertNumber(Register object) {
3772 if (emit_debug_code()) {
3773 Label ok;
3774 Condition is_smi = CheckSmi(object);
3775 j(is_smi, &ok, Label::kNear);
3776 Cmp(FieldOperand(object, HeapObject::kMapOffset),
3777 isolate()->factory()->heap_number_map());
3778 Check(equal, kOperandIsNotANumber);
3779 bind(&ok);
3780 }
3781 }
3782
3783
AssertNotSmi(Register object)3784 void MacroAssembler::AssertNotSmi(Register object) {
3785 if (emit_debug_code()) {
3786 Condition is_smi = CheckSmi(object);
3787 Check(NegateCondition(is_smi), kOperandIsASmi);
3788 }
3789 }
3790
3791
AssertSmi(Register object)3792 void MacroAssembler::AssertSmi(Register object) {
3793 if (emit_debug_code()) {
3794 Condition is_smi = CheckSmi(object);
3795 Check(is_smi, kOperandIsNotASmi);
3796 }
3797 }
3798
3799
AssertSmi(const Operand & object)3800 void MacroAssembler::AssertSmi(const Operand& object) {
3801 if (emit_debug_code()) {
3802 Condition is_smi = CheckSmi(object);
3803 Check(is_smi, kOperandIsNotASmi);
3804 }
3805 }
3806
3807
AssertZeroExtended(Register int32_register)3808 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3809 if (emit_debug_code()) {
3810 DCHECK(!int32_register.is(kScratchRegister));
3811 movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3812 cmpq(kScratchRegister, int32_register);
3813 Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3814 }
3815 }
3816
3817
AssertString(Register object)3818 void MacroAssembler::AssertString(Register object) {
3819 if (emit_debug_code()) {
3820 testb(object, Immediate(kSmiTagMask));
3821 Check(not_equal, kOperandIsASmiAndNotAString);
3822 Push(object);
3823 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3824 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3825 Pop(object);
3826 Check(below, kOperandIsNotAString);
3827 }
3828 }
3829
3830
AssertName(Register object)3831 void MacroAssembler::AssertName(Register object) {
3832 if (emit_debug_code()) {
3833 testb(object, Immediate(kSmiTagMask));
3834 Check(not_equal, kOperandIsASmiAndNotAName);
3835 Push(object);
3836 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3837 CmpInstanceType(object, LAST_NAME_TYPE);
3838 Pop(object);
3839 Check(below_equal, kOperandIsNotAName);
3840 }
3841 }
3842
3843
AssertFunction(Register object)3844 void MacroAssembler::AssertFunction(Register object) {
3845 if (emit_debug_code()) {
3846 testb(object, Immediate(kSmiTagMask));
3847 Check(not_equal, kOperandIsASmiAndNotAFunction);
3848 Push(object);
3849 CmpObjectType(object, JS_FUNCTION_TYPE, object);
3850 Pop(object);
3851 Check(equal, kOperandIsNotAFunction);
3852 }
3853 }
3854
3855
AssertBoundFunction(Register object)3856 void MacroAssembler::AssertBoundFunction(Register object) {
3857 if (emit_debug_code()) {
3858 testb(object, Immediate(kSmiTagMask));
3859 Check(not_equal, kOperandIsASmiAndNotABoundFunction);
3860 Push(object);
3861 CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
3862 Pop(object);
3863 Check(equal, kOperandIsNotABoundFunction);
3864 }
3865 }
3866
3867
AssertUndefinedOrAllocationSite(Register object)3868 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3869 if (emit_debug_code()) {
3870 Label done_checking;
3871 AssertNotSmi(object);
3872 Cmp(object, isolate()->factory()->undefined_value());
3873 j(equal, &done_checking);
3874 Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3875 Assert(equal, kExpectedUndefinedOrCell);
3876 bind(&done_checking);
3877 }
3878 }
3879
3880
AssertRootValue(Register src,Heap::RootListIndex root_value_index,BailoutReason reason)3881 void MacroAssembler::AssertRootValue(Register src,
3882 Heap::RootListIndex root_value_index,
3883 BailoutReason reason) {
3884 if (emit_debug_code()) {
3885 DCHECK(!src.is(kScratchRegister));
3886 LoadRoot(kScratchRegister, root_value_index);
3887 cmpp(src, kScratchRegister);
3888 Check(equal, reason);
3889 }
3890 }
3891
3892
3893
IsObjectStringType(Register heap_object,Register map,Register instance_type)3894 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3895 Register map,
3896 Register instance_type) {
3897 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3898 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3899 STATIC_ASSERT(kNotStringTag != 0);
3900 testb(instance_type, Immediate(kIsNotStringMask));
3901 return zero;
3902 }
3903
3904
IsObjectNameType(Register heap_object,Register map,Register instance_type)3905 Condition MacroAssembler::IsObjectNameType(Register heap_object,
3906 Register map,
3907 Register instance_type) {
3908 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3909 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3910 cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3911 return below_equal;
3912 }
3913
3914
GetMapConstructor(Register result,Register map,Register temp)3915 void MacroAssembler::GetMapConstructor(Register result, Register map,
3916 Register temp) {
3917 Label done, loop;
3918 movp(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
3919 bind(&loop);
3920 JumpIfSmi(result, &done, Label::kNear);
3921 CmpObjectType(result, MAP_TYPE, temp);
3922 j(not_equal, &done, Label::kNear);
3923 movp(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
3924 jmp(&loop);
3925 bind(&done);
3926 }
3927
3928
TryGetFunctionPrototype(Register function,Register result,Label * miss)3929 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
3930 Label* miss) {
3931 // Get the prototype or initial map from the function.
3932 movp(result,
3933 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3934
3935 // If the prototype or initial map is the hole, don't return it and
3936 // simply miss the cache instead. This will allow us to allocate a
3937 // prototype object on-demand in the runtime system.
3938 CompareRoot(result, Heap::kTheHoleValueRootIndex);
3939 j(equal, miss);
3940
3941 // If the function does not have an initial map, we're done.
3942 Label done;
3943 CmpObjectType(result, MAP_TYPE, kScratchRegister);
3944 j(not_equal, &done, Label::kNear);
3945
3946 // Get the prototype from the initial map.
3947 movp(result, FieldOperand(result, Map::kPrototypeOffset));
3948
3949 // All done.
3950 bind(&done);
3951 }
3952
3953
SetCounter(StatsCounter * counter,int value)3954 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3955 if (FLAG_native_code_counters && counter->Enabled()) {
3956 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3957 movl(counter_operand, Immediate(value));
3958 }
3959 }
3960
3961
IncrementCounter(StatsCounter * counter,int value)3962 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3963 DCHECK(value > 0);
3964 if (FLAG_native_code_counters && counter->Enabled()) {
3965 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3966 if (value == 1) {
3967 incl(counter_operand);
3968 } else {
3969 addl(counter_operand, Immediate(value));
3970 }
3971 }
3972 }
3973
3974
DecrementCounter(StatsCounter * counter,int value)3975 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3976 DCHECK(value > 0);
3977 if (FLAG_native_code_counters && counter->Enabled()) {
3978 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3979 if (value == 1) {
3980 decl(counter_operand);
3981 } else {
3982 subl(counter_operand, Immediate(value));
3983 }
3984 }
3985 }
3986
3987
DebugBreak()3988 void MacroAssembler::DebugBreak() {
3989 Set(rax, 0); // No arguments.
3990 LoadAddress(rbx,
3991 ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
3992 CEntryStub ces(isolate(), 1);
3993 DCHECK(AllowThisStubCall(&ces));
3994 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
3995 }
3996
3997
InvokeFunction(Register function,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3998 void MacroAssembler::InvokeFunction(Register function,
3999 Register new_target,
4000 const ParameterCount& actual,
4001 InvokeFlag flag,
4002 const CallWrapper& call_wrapper) {
4003 movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
4004 LoadSharedFunctionInfoSpecialField(
4005 rbx, rbx, SharedFunctionInfo::kFormalParameterCountOffset);
4006
4007 ParameterCount expected(rbx);
4008 InvokeFunction(function, new_target, expected, actual, flag, call_wrapper);
4009 }
4010
4011
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4012 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4013 const ParameterCount& expected,
4014 const ParameterCount& actual,
4015 InvokeFlag flag,
4016 const CallWrapper& call_wrapper) {
4017 Move(rdi, function);
4018 InvokeFunction(rdi, no_reg, expected, actual, flag, call_wrapper);
4019 }
4020
4021
InvokeFunction(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4022 void MacroAssembler::InvokeFunction(Register function,
4023 Register new_target,
4024 const ParameterCount& expected,
4025 const ParameterCount& actual,
4026 InvokeFlag flag,
4027 const CallWrapper& call_wrapper) {
4028 DCHECK(function.is(rdi));
4029 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
4030 InvokeFunctionCode(rdi, new_target, expected, actual, flag, call_wrapper);
4031 }
4032
4033
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4034 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
4035 const ParameterCount& expected,
4036 const ParameterCount& actual,
4037 InvokeFlag flag,
4038 const CallWrapper& call_wrapper) {
4039 // You can't call a function without a valid frame.
4040 DCHECK(flag == JUMP_FUNCTION || has_frame());
4041 DCHECK(function.is(rdi));
4042 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(rdx));
4043
4044 if (call_wrapper.NeedsDebugStepCheck()) {
4045 FloodFunctionIfStepping(function, new_target, expected, actual);
4046 }
4047
4048 // Clear the new.target register if not given.
4049 if (!new_target.is_valid()) {
4050 LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
4051 }
4052
4053 Label done;
4054 bool definitely_mismatches = false;
4055 InvokePrologue(expected,
4056 actual,
4057 &done,
4058 &definitely_mismatches,
4059 flag,
4060 Label::kNear,
4061 call_wrapper);
4062 if (!definitely_mismatches) {
4063 // We call indirectly through the code field in the function to
4064 // allow recompilation to take effect without changing any of the
4065 // call sites.
4066 Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
4067 if (flag == CALL_FUNCTION) {
4068 call_wrapper.BeforeCall(CallSize(code));
4069 call(code);
4070 call_wrapper.AfterCall();
4071 } else {
4072 DCHECK(flag == JUMP_FUNCTION);
4073 jmp(code);
4074 }
4075 bind(&done);
4076 }
4077 }
4078
4079
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,Label::Distance near_jump,const CallWrapper & call_wrapper)4080 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4081 const ParameterCount& actual,
4082 Label* done,
4083 bool* definitely_mismatches,
4084 InvokeFlag flag,
4085 Label::Distance near_jump,
4086 const CallWrapper& call_wrapper) {
4087 bool definitely_matches = false;
4088 *definitely_mismatches = false;
4089 Label invoke;
4090 if (expected.is_immediate()) {
4091 DCHECK(actual.is_immediate());
4092 Set(rax, actual.immediate());
4093 if (expected.immediate() == actual.immediate()) {
4094 definitely_matches = true;
4095 } else {
4096 if (expected.immediate() ==
4097 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
4098 // Don't worry about adapting arguments for built-ins that
4099 // don't want that done. Skip adaption code by making it look
4100 // like we have a match between expected and actual number of
4101 // arguments.
4102 definitely_matches = true;
4103 } else {
4104 *definitely_mismatches = true;
4105 Set(rbx, expected.immediate());
4106 }
4107 }
4108 } else {
4109 if (actual.is_immediate()) {
4110 // Expected is in register, actual is immediate. This is the
4111 // case when we invoke function values without going through the
4112 // IC mechanism.
4113 Set(rax, actual.immediate());
4114 cmpp(expected.reg(), Immediate(actual.immediate()));
4115 j(equal, &invoke, Label::kNear);
4116 DCHECK(expected.reg().is(rbx));
4117 } else if (!expected.reg().is(actual.reg())) {
4118 // Both expected and actual are in (different) registers. This
4119 // is the case when we invoke functions using call and apply.
4120 cmpp(expected.reg(), actual.reg());
4121 j(equal, &invoke, Label::kNear);
4122 DCHECK(actual.reg().is(rax));
4123 DCHECK(expected.reg().is(rbx));
4124 } else {
4125 Move(rax, actual.reg());
4126 }
4127 }
4128
4129 if (!definitely_matches) {
4130 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
4131 if (flag == CALL_FUNCTION) {
4132 call_wrapper.BeforeCall(CallSize(adaptor));
4133 Call(adaptor, RelocInfo::CODE_TARGET);
4134 call_wrapper.AfterCall();
4135 if (!*definitely_mismatches) {
4136 jmp(done, near_jump);
4137 }
4138 } else {
4139 Jump(adaptor, RelocInfo::CODE_TARGET);
4140 }
4141 bind(&invoke);
4142 }
4143 }
4144
4145
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)4146 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
4147 const ParameterCount& expected,
4148 const ParameterCount& actual) {
4149 Label skip_flooding;
4150 ExternalReference step_in_enabled =
4151 ExternalReference::debug_step_in_enabled_address(isolate());
4152 Operand step_in_enabled_operand = ExternalOperand(step_in_enabled);
4153 cmpb(step_in_enabled_operand, Immediate(0));
4154 j(equal, &skip_flooding);
4155 {
4156 FrameScope frame(this,
4157 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
4158 if (expected.is_reg()) {
4159 Integer32ToSmi(expected.reg(), expected.reg());
4160 Push(expected.reg());
4161 }
4162 if (actual.is_reg()) {
4163 Integer32ToSmi(actual.reg(), actual.reg());
4164 Push(actual.reg());
4165 }
4166 if (new_target.is_valid()) {
4167 Push(new_target);
4168 }
4169 Push(fun);
4170 Push(fun);
4171 CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
4172 Pop(fun);
4173 if (new_target.is_valid()) {
4174 Pop(new_target);
4175 }
4176 if (actual.is_reg()) {
4177 Pop(actual.reg());
4178 SmiToInteger64(actual.reg(), actual.reg());
4179 }
4180 if (expected.is_reg()) {
4181 Pop(expected.reg());
4182 SmiToInteger64(expected.reg(), expected.reg());
4183 }
4184 }
4185 bind(&skip_flooding);
4186 }
4187
4188
StubPrologue()4189 void MacroAssembler::StubPrologue() {
4190 pushq(rbp); // Caller's frame pointer.
4191 movp(rbp, rsp);
4192 Push(rsi); // Callee's context.
4193 Push(Smi::FromInt(StackFrame::STUB));
4194 }
4195
4196
Prologue(bool code_pre_aging)4197 void MacroAssembler::Prologue(bool code_pre_aging) {
4198 PredictableCodeSizeScope predictible_code_size_scope(this,
4199 kNoCodeAgeSequenceLength);
4200 if (code_pre_aging) {
4201 // Pre-age the code.
4202 Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
4203 RelocInfo::CODE_AGE_SEQUENCE);
4204 Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
4205 } else {
4206 pushq(rbp); // Caller's frame pointer.
4207 movp(rbp, rsp);
4208 Push(rsi); // Callee's context.
4209 Push(rdi); // Callee's JS function.
4210 }
4211 }
4212
4213
EmitLoadTypeFeedbackVector(Register vector)4214 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
4215 movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
4216 movp(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
4217 movp(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
4218 }
4219
4220
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)4221 void MacroAssembler::EnterFrame(StackFrame::Type type,
4222 bool load_constant_pool_pointer_reg) {
4223 // Out-of-line constant pool not implemented on x64.
4224 UNREACHABLE();
4225 }
4226
4227
EnterFrame(StackFrame::Type type)4228 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4229 pushq(rbp);
4230 movp(rbp, rsp);
4231 Push(rsi); // Context.
4232 Push(Smi::FromInt(type));
4233 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4234 Push(kScratchRegister);
4235 if (emit_debug_code()) {
4236 Move(kScratchRegister,
4237 isolate()->factory()->undefined_value(),
4238 RelocInfo::EMBEDDED_OBJECT);
4239 cmpp(Operand(rsp, 0), kScratchRegister);
4240 Check(not_equal, kCodeObjectNotProperlyPatched);
4241 }
4242 }
4243
4244
LeaveFrame(StackFrame::Type type)4245 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4246 if (emit_debug_code()) {
4247 Move(kScratchRegister, Smi::FromInt(type));
4248 cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
4249 Check(equal, kStackFrameTypesMustMatch);
4250 }
4251 movp(rsp, rbp);
4252 popq(rbp);
4253 }
4254
4255
EnterExitFramePrologue(bool save_rax)4256 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
4257 // Set up the frame structure on the stack.
4258 // All constants are relative to the frame pointer of the exit frame.
4259 DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
4260 kFPOnStackSize + kPCOnStackSize);
4261 DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
4262 DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
4263 pushq(rbp);
4264 movp(rbp, rsp);
4265
4266 // Reserve room for entry stack pointer and push the code object.
4267 DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
4268 Push(Immediate(0)); // Saved entry sp, patched before call.
4269 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4270 Push(kScratchRegister); // Accessed from EditFrame::code_slot.
4271
4272 // Save the frame pointer and the context in top.
4273 if (save_rax) {
4274 movp(r14, rax); // Backup rax in callee-save register.
4275 }
4276
4277 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
4278 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
4279 Store(ExternalReference(Isolate::kCFunctionAddress, isolate()), rbx);
4280 }
4281
4282
EnterExitFrameEpilogue(int arg_stack_space,bool save_doubles)4283 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
4284 bool save_doubles) {
4285 #ifdef _WIN64
4286 const int kShadowSpace = 4;
4287 arg_stack_space += kShadowSpace;
4288 #endif
4289 // Optionally save all XMM registers.
4290 if (save_doubles) {
4291 int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
4292 arg_stack_space * kRegisterSize;
4293 subp(rsp, Immediate(space));
4294 int offset = -2 * kPointerSize;
4295 const RegisterConfiguration* config =
4296 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
4297 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4298 DoubleRegister reg =
4299 DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
4300 Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
4301 }
4302 } else if (arg_stack_space > 0) {
4303 subp(rsp, Immediate(arg_stack_space * kRegisterSize));
4304 }
4305
4306 // Get the required frame alignment for the OS.
4307 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
4308 if (kFrameAlignment > 0) {
4309 DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
4310 DCHECK(is_int8(kFrameAlignment));
4311 andp(rsp, Immediate(-kFrameAlignment));
4312 }
4313
4314 // Patch the saved entry sp.
4315 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
4316 }
4317
4318
EnterExitFrame(int arg_stack_space,bool save_doubles)4319 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
4320 EnterExitFramePrologue(true);
4321
4322 // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
4323 // so it must be retained across the C-call.
4324 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
4325 leap(r15, Operand(rbp, r14, times_pointer_size, offset));
4326
4327 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
4328 }
4329
4330
EnterApiExitFrame(int arg_stack_space)4331 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
4332 EnterExitFramePrologue(false);
4333 EnterExitFrameEpilogue(arg_stack_space, false);
4334 }
4335
4336
LeaveExitFrame(bool save_doubles,bool pop_arguments)4337 void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
4338 // Registers:
4339 // r15 : argv
4340 if (save_doubles) {
4341 int offset = -2 * kPointerSize;
4342 const RegisterConfiguration* config =
4343 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
4344 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4345 DoubleRegister reg =
4346 DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
4347 Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
4348 }
4349 }
4350
4351 if (pop_arguments) {
4352 // Get the return address from the stack and restore the frame pointer.
4353 movp(rcx, Operand(rbp, kFPOnStackSize));
4354 movp(rbp, Operand(rbp, 0 * kPointerSize));
4355
4356 // Drop everything up to and including the arguments and the receiver
4357 // from the caller stack.
4358 leap(rsp, Operand(r15, 1 * kPointerSize));
4359
4360 PushReturnAddressFrom(rcx);
4361 } else {
4362 // Otherwise just leave the exit frame.
4363 leave();
4364 }
4365
4366 LeaveExitFrameEpilogue(true);
4367 }
4368
4369
LeaveApiExitFrame(bool restore_context)4370 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
4371 movp(rsp, rbp);
4372 popq(rbp);
4373
4374 LeaveExitFrameEpilogue(restore_context);
4375 }
4376
4377
LeaveExitFrameEpilogue(bool restore_context)4378 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
4379 // Restore current context from top and clear it in debug mode.
4380 ExternalReference context_address(Isolate::kContextAddress, isolate());
4381 Operand context_operand = ExternalOperand(context_address);
4382 if (restore_context) {
4383 movp(rsi, context_operand);
4384 }
4385 #ifdef DEBUG
4386 movp(context_operand, Immediate(0));
4387 #endif
4388
4389 // Clear the top frame.
4390 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
4391 isolate());
4392 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
4393 movp(c_entry_fp_operand, Immediate(0));
4394 }
4395
4396
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)4397 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
4398 Register scratch,
4399 Label* miss) {
4400 Label same_contexts;
4401
4402 DCHECK(!holder_reg.is(scratch));
4403 DCHECK(!scratch.is(kScratchRegister));
4404 // Load current lexical context from the stack frame.
4405 movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
4406
4407 // When generating debug code, make sure the lexical context is set.
4408 if (emit_debug_code()) {
4409 cmpp(scratch, Immediate(0));
4410 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
4411 }
4412 // Load the native context of the current context.
4413 movp(scratch, ContextOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
4414
4415 // Check the context is a native context.
4416 if (emit_debug_code()) {
4417 Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
4418 isolate()->factory()->native_context_map());
4419 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4420 }
4421
4422 // Check if both contexts are the same.
4423 cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4424 j(equal, &same_contexts);
4425
4426 // Compare security tokens.
4427 // Check that the security token in the calling global object is
4428 // compatible with the security token in the receiving global
4429 // object.
4430
4431 // Check the context is a native context.
4432 if (emit_debug_code()) {
4433 // Preserve original value of holder_reg.
4434 Push(holder_reg);
4435 movp(holder_reg,
4436 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4437 CompareRoot(holder_reg, Heap::kNullValueRootIndex);
4438 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
4439
4440 // Read the first word and compare to native_context_map(),
4441 movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
4442 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
4443 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4444 Pop(holder_reg);
4445 }
4446
4447 movp(kScratchRegister,
4448 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4449 int token_offset =
4450 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
4451 movp(scratch, FieldOperand(scratch, token_offset));
4452 cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
4453 j(not_equal, miss);
4454
4455 bind(&same_contexts);
4456 }
4457
4458
4459 // Compute the hash code from the untagged key. This must be kept in sync with
4460 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
4461 // code-stub-hydrogen.cc
GetNumberHash(Register r0,Register scratch)4462 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
4463 // First of all we assign the hash seed to scratch.
4464 LoadRoot(scratch, Heap::kHashSeedRootIndex);
4465 SmiToInteger32(scratch, scratch);
4466
4467 // Xor original key with a seed.
4468 xorl(r0, scratch);
4469
4470 // Compute the hash code from the untagged key. This must be kept in sync
4471 // with ComputeIntegerHash in utils.h.
4472 //
4473 // hash = ~hash + (hash << 15);
4474 movl(scratch, r0);
4475 notl(r0);
4476 shll(scratch, Immediate(15));
4477 addl(r0, scratch);
4478 // hash = hash ^ (hash >> 12);
4479 movl(scratch, r0);
4480 shrl(scratch, Immediate(12));
4481 xorl(r0, scratch);
4482 // hash = hash + (hash << 2);
4483 leal(r0, Operand(r0, r0, times_4, 0));
4484 // hash = hash ^ (hash >> 4);
4485 movl(scratch, r0);
4486 shrl(scratch, Immediate(4));
4487 xorl(r0, scratch);
4488 // hash = hash * 2057;
4489 imull(r0, r0, Immediate(2057));
4490 // hash = hash ^ (hash >> 16);
4491 movl(scratch, r0);
4492 shrl(scratch, Immediate(16));
4493 xorl(r0, scratch);
4494 andl(r0, Immediate(0x3fffffff));
4495 }
4496
4497
4498
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register r0,Register r1,Register r2,Register result)4499 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4500 Register elements,
4501 Register key,
4502 Register r0,
4503 Register r1,
4504 Register r2,
4505 Register result) {
4506 // Register use:
4507 //
4508 // elements - holds the slow-case elements of the receiver on entry.
4509 // Unchanged unless 'result' is the same register.
4510 //
4511 // key - holds the smi key on entry.
4512 // Unchanged unless 'result' is the same register.
4513 //
4514 // Scratch registers:
4515 //
4516 // r0 - holds the untagged key on entry and holds the hash once computed.
4517 //
4518 // r1 - used to hold the capacity mask of the dictionary
4519 //
4520 // r2 - used for the index into the dictionary.
4521 //
4522 // result - holds the result on exit if the load succeeded.
4523 // Allowed to be the same as 'key' or 'result'.
4524 // Unchanged on bailout so 'key' or 'result' can be used
4525 // in further computation.
4526
4527 Label done;
4528
4529 GetNumberHash(r0, r1);
4530
4531 // Compute capacity mask.
4532 SmiToInteger32(r1, FieldOperand(elements,
4533 SeededNumberDictionary::kCapacityOffset));
4534 decl(r1);
4535
4536 // Generate an unrolled loop that performs a few probes before giving up.
4537 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4538 // Use r2 for index calculations and keep the hash intact in r0.
4539 movp(r2, r0);
4540 // Compute the masked index: (hash + i + i * i) & mask.
4541 if (i > 0) {
4542 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4543 }
4544 andp(r2, r1);
4545
4546 // Scale the index by multiplying by the entry size.
4547 DCHECK(SeededNumberDictionary::kEntrySize == 3);
4548 leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
4549
4550 // Check if the key matches.
4551 cmpp(key, FieldOperand(elements,
4552 r2,
4553 times_pointer_size,
4554 SeededNumberDictionary::kElementsStartOffset));
4555 if (i != (kNumberDictionaryProbes - 1)) {
4556 j(equal, &done);
4557 } else {
4558 j(not_equal, miss);
4559 }
4560 }
4561
4562 bind(&done);
4563 // Check that the value is a field property.
4564 const int kDetailsOffset =
4565 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4566 DCHECK_EQ(DATA, 0);
4567 Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4568 Smi::FromInt(PropertyDetails::TypeField::kMask));
4569 j(not_zero, miss);
4570
4571 // Get the value at the masked, scaled index.
4572 const int kValueOffset =
4573 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4574 movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4575 }
4576
4577
LoadAllocationTopHelper(Register result,Register scratch,AllocationFlags flags)4578 void MacroAssembler::LoadAllocationTopHelper(Register result,
4579 Register scratch,
4580 AllocationFlags flags) {
4581 ExternalReference allocation_top =
4582 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4583
4584 // Just return if allocation top is already known.
4585 if ((flags & RESULT_CONTAINS_TOP) != 0) {
4586 // No use of scratch if allocation top is provided.
4587 DCHECK(!scratch.is_valid());
4588 #ifdef DEBUG
4589 // Assert that result actually contains top on entry.
4590 Operand top_operand = ExternalOperand(allocation_top);
4591 cmpp(result, top_operand);
4592 Check(equal, kUnexpectedAllocationTop);
4593 #endif
4594 return;
4595 }
4596
4597 // Move address of new object to result. Use scratch register if available,
4598 // and keep address in scratch until call to UpdateAllocationTopHelper.
4599 if (scratch.is_valid()) {
4600 LoadAddress(scratch, allocation_top);
4601 movp(result, Operand(scratch, 0));
4602 } else {
4603 Load(result, allocation_top);
4604 }
4605 }
4606
4607
MakeSureDoubleAlignedHelper(Register result,Register scratch,Label * gc_required,AllocationFlags flags)4608 void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
4609 Register scratch,
4610 Label* gc_required,
4611 AllocationFlags flags) {
4612 if (kPointerSize == kDoubleSize) {
4613 if (FLAG_debug_code) {
4614 testl(result, Immediate(kDoubleAlignmentMask));
4615 Check(zero, kAllocationIsNotDoubleAligned);
4616 }
4617 } else {
4618 // Align the next allocation. Storing the filler map without checking top
4619 // is safe in new-space because the limit of the heap is aligned there.
4620 DCHECK(kPointerSize * 2 == kDoubleSize);
4621 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4622 // Make sure scratch is not clobbered by this function as it might be
4623 // used in UpdateAllocationTopHelper later.
4624 DCHECK(!scratch.is(kScratchRegister));
4625 Label aligned;
4626 testl(result, Immediate(kDoubleAlignmentMask));
4627 j(zero, &aligned, Label::kNear);
4628 if ((flags & PRETENURE) != 0) {
4629 ExternalReference allocation_limit =
4630 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4631 cmpp(result, ExternalOperand(allocation_limit));
4632 j(above_equal, gc_required);
4633 }
4634 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
4635 movp(Operand(result, 0), kScratchRegister);
4636 addp(result, Immediate(kDoubleSize / 2));
4637 bind(&aligned);
4638 }
4639 }
4640
4641
UpdateAllocationTopHelper(Register result_end,Register scratch,AllocationFlags flags)4642 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4643 Register scratch,
4644 AllocationFlags flags) {
4645 if (emit_debug_code()) {
4646 testp(result_end, Immediate(kObjectAlignmentMask));
4647 Check(zero, kUnalignedAllocationInNewSpace);
4648 }
4649
4650 ExternalReference allocation_top =
4651 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4652
4653 // Update new top.
4654 if (scratch.is_valid()) {
4655 // Scratch already contains address of allocation top.
4656 movp(Operand(scratch, 0), result_end);
4657 } else {
4658 Store(allocation_top, result_end);
4659 }
4660 }
4661
4662
Allocate(int object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4663 void MacroAssembler::Allocate(int object_size,
4664 Register result,
4665 Register result_end,
4666 Register scratch,
4667 Label* gc_required,
4668 AllocationFlags flags) {
4669 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4670 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4671 if (!FLAG_inline_new) {
4672 if (emit_debug_code()) {
4673 // Trash the registers to simulate an allocation failure.
4674 movl(result, Immediate(0x7091));
4675 if (result_end.is_valid()) {
4676 movl(result_end, Immediate(0x7191));
4677 }
4678 if (scratch.is_valid()) {
4679 movl(scratch, Immediate(0x7291));
4680 }
4681 }
4682 jmp(gc_required);
4683 return;
4684 }
4685 DCHECK(!result.is(result_end));
4686
4687 // Load address of new object into result.
4688 LoadAllocationTopHelper(result, scratch, flags);
4689
4690 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4691 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4692 }
4693
4694 // Calculate new top and bail out if new space is exhausted.
4695 ExternalReference allocation_limit =
4696 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4697
4698 Register top_reg = result_end.is_valid() ? result_end : result;
4699
4700 if (!top_reg.is(result)) {
4701 movp(top_reg, result);
4702 }
4703 addp(top_reg, Immediate(object_size));
4704 j(carry, gc_required);
4705 Operand limit_operand = ExternalOperand(allocation_limit);
4706 cmpp(top_reg, limit_operand);
4707 j(above, gc_required);
4708
4709 // Update allocation top.
4710 UpdateAllocationTopHelper(top_reg, scratch, flags);
4711
4712 bool tag_result = (flags & TAG_OBJECT) != 0;
4713 if (top_reg.is(result)) {
4714 if (tag_result) {
4715 subp(result, Immediate(object_size - kHeapObjectTag));
4716 } else {
4717 subp(result, Immediate(object_size));
4718 }
4719 } else if (tag_result) {
4720 // Tag the result if requested.
4721 DCHECK(kHeapObjectTag == 1);
4722 incp(result);
4723 }
4724 }
4725
4726
Allocate(int header_size,ScaleFactor element_size,Register element_count,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4727 void MacroAssembler::Allocate(int header_size,
4728 ScaleFactor element_size,
4729 Register element_count,
4730 Register result,
4731 Register result_end,
4732 Register scratch,
4733 Label* gc_required,
4734 AllocationFlags flags) {
4735 DCHECK((flags & SIZE_IN_WORDS) == 0);
4736 leap(result_end, Operand(element_count, element_size, header_size));
4737 Allocate(result_end, result, result_end, scratch, gc_required, flags);
4738 }
4739
4740
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4741 void MacroAssembler::Allocate(Register object_size,
4742 Register result,
4743 Register result_end,
4744 Register scratch,
4745 Label* gc_required,
4746 AllocationFlags flags) {
4747 DCHECK((flags & SIZE_IN_WORDS) == 0);
4748 if (!FLAG_inline_new) {
4749 if (emit_debug_code()) {
4750 // Trash the registers to simulate an allocation failure.
4751 movl(result, Immediate(0x7091));
4752 movl(result_end, Immediate(0x7191));
4753 if (scratch.is_valid()) {
4754 movl(scratch, Immediate(0x7291));
4755 }
4756 // object_size is left unchanged by this function.
4757 }
4758 jmp(gc_required);
4759 return;
4760 }
4761 DCHECK(!result.is(result_end));
4762
4763 // Load address of new object into result.
4764 LoadAllocationTopHelper(result, scratch, flags);
4765
4766 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4767 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4768 }
4769
4770 // Calculate new top and bail out if new space is exhausted.
4771 ExternalReference allocation_limit =
4772 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4773 if (!object_size.is(result_end)) {
4774 movp(result_end, object_size);
4775 }
4776 addp(result_end, result);
4777 j(carry, gc_required);
4778 Operand limit_operand = ExternalOperand(allocation_limit);
4779 cmpp(result_end, limit_operand);
4780 j(above, gc_required);
4781
4782 // Update allocation top.
4783 UpdateAllocationTopHelper(result_end, scratch, flags);
4784
4785 // Tag the result if requested.
4786 if ((flags & TAG_OBJECT) != 0) {
4787 addp(result, Immediate(kHeapObjectTag));
4788 }
4789 }
4790
4791
AllocateHeapNumber(Register result,Register scratch,Label * gc_required,MutableMode mode)4792 void MacroAssembler::AllocateHeapNumber(Register result,
4793 Register scratch,
4794 Label* gc_required,
4795 MutableMode mode) {
4796 // Allocate heap number in new space.
4797 Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4798
4799 Heap::RootListIndex map_index = mode == MUTABLE
4800 ? Heap::kMutableHeapNumberMapRootIndex
4801 : Heap::kHeapNumberMapRootIndex;
4802
4803 // Set the map.
4804 LoadRoot(kScratchRegister, map_index);
4805 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4806 }
4807
4808
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4809 void MacroAssembler::AllocateTwoByteString(Register result,
4810 Register length,
4811 Register scratch1,
4812 Register scratch2,
4813 Register scratch3,
4814 Label* gc_required) {
4815 // Calculate the number of bytes needed for the characters in the string while
4816 // observing object alignment.
4817 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4818 kObjectAlignmentMask;
4819 DCHECK(kShortSize == 2);
4820 // scratch1 = length * 2 + kObjectAlignmentMask.
4821 leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4822 kHeaderAlignment));
4823 andp(scratch1, Immediate(~kObjectAlignmentMask));
4824 if (kHeaderAlignment > 0) {
4825 subp(scratch1, Immediate(kHeaderAlignment));
4826 }
4827
4828 // Allocate two byte string in new space.
4829 Allocate(SeqTwoByteString::kHeaderSize,
4830 times_1,
4831 scratch1,
4832 result,
4833 scratch2,
4834 scratch3,
4835 gc_required,
4836 TAG_OBJECT);
4837
4838 // Set the map, length and hash field.
4839 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4840 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4841 Integer32ToSmi(scratch1, length);
4842 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4843 movp(FieldOperand(result, String::kHashFieldOffset),
4844 Immediate(String::kEmptyHashField));
4845 }
4846
4847
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4848 void MacroAssembler::AllocateOneByteString(Register result, Register length,
4849 Register scratch1, Register scratch2,
4850 Register scratch3,
4851 Label* gc_required) {
4852 // Calculate the number of bytes needed for the characters in the string while
4853 // observing object alignment.
4854 const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4855 kObjectAlignmentMask;
4856 movl(scratch1, length);
4857 DCHECK(kCharSize == 1);
4858 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4859 andp(scratch1, Immediate(~kObjectAlignmentMask));
4860 if (kHeaderAlignment > 0) {
4861 subp(scratch1, Immediate(kHeaderAlignment));
4862 }
4863
4864 // Allocate one-byte string in new space.
4865 Allocate(SeqOneByteString::kHeaderSize,
4866 times_1,
4867 scratch1,
4868 result,
4869 scratch2,
4870 scratch3,
4871 gc_required,
4872 TAG_OBJECT);
4873
4874 // Set the map, length and hash field.
4875 LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
4876 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4877 Integer32ToSmi(scratch1, length);
4878 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4879 movp(FieldOperand(result, String::kHashFieldOffset),
4880 Immediate(String::kEmptyHashField));
4881 }
4882
4883
AllocateTwoByteConsString(Register result,Register scratch1,Register scratch2,Label * gc_required)4884 void MacroAssembler::AllocateTwoByteConsString(Register result,
4885 Register scratch1,
4886 Register scratch2,
4887 Label* gc_required) {
4888 // Allocate heap number in new space.
4889 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4890 TAG_OBJECT);
4891
4892 // Set the map. The other fields are left uninitialized.
4893 LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4894 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4895 }
4896
4897
AllocateOneByteConsString(Register result,Register scratch1,Register scratch2,Label * gc_required)4898 void MacroAssembler::AllocateOneByteConsString(Register result,
4899 Register scratch1,
4900 Register scratch2,
4901 Label* gc_required) {
4902 Allocate(ConsString::kSize,
4903 result,
4904 scratch1,
4905 scratch2,
4906 gc_required,
4907 TAG_OBJECT);
4908
4909 // Set the map. The other fields are left uninitialized.
4910 LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
4911 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4912 }
4913
4914
AllocateTwoByteSlicedString(Register result,Register scratch1,Register scratch2,Label * gc_required)4915 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4916 Register scratch1,
4917 Register scratch2,
4918 Label* gc_required) {
4919 // Allocate heap number in new space.
4920 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4921 TAG_OBJECT);
4922
4923 // Set the map. The other fields are left uninitialized.
4924 LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4925 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4926 }
4927
4928
AllocateOneByteSlicedString(Register result,Register scratch1,Register scratch2,Label * gc_required)4929 void MacroAssembler::AllocateOneByteSlicedString(Register result,
4930 Register scratch1,
4931 Register scratch2,
4932 Label* gc_required) {
4933 // Allocate heap number in new space.
4934 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4935 TAG_OBJECT);
4936
4937 // Set the map. The other fields are left uninitialized.
4938 LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
4939 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4940 }
4941
4942
AllocateJSValue(Register result,Register constructor,Register value,Register scratch,Label * gc_required)4943 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
4944 Register value, Register scratch,
4945 Label* gc_required) {
4946 DCHECK(!result.is(constructor));
4947 DCHECK(!result.is(scratch));
4948 DCHECK(!result.is(value));
4949
4950 // Allocate JSValue in new space.
4951 Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4952
4953 // Initialize the JSValue.
4954 LoadGlobalFunctionInitialMap(constructor, scratch);
4955 movp(FieldOperand(result, HeapObject::kMapOffset), scratch);
4956 LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4957 movp(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
4958 movp(FieldOperand(result, JSObject::kElementsOffset), scratch);
4959 movp(FieldOperand(result, JSValue::kValueOffset), value);
4960 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
4961 }
4962
4963
4964 // Copy memory, byte-by-byte, from source to destination. Not optimized for
4965 // long or aligned copies. The contents of scratch and length are destroyed.
4966 // Destination is incremented by length, source, length and scratch are
4967 // clobbered.
4968 // A simpler loop is faster on small copies, but slower on large ones.
4969 // The cld() instruction must have been emitted, to set the direction flag(),
4970 // before calling this function.
CopyBytes(Register destination,Register source,Register length,int min_length,Register scratch)4971 void MacroAssembler::CopyBytes(Register destination,
4972 Register source,
4973 Register length,
4974 int min_length,
4975 Register scratch) {
4976 DCHECK(min_length >= 0);
4977 if (emit_debug_code()) {
4978 cmpl(length, Immediate(min_length));
4979 Assert(greater_equal, kInvalidMinLength);
4980 }
4981 Label short_loop, len8, len16, len24, done, short_string;
4982
4983 const int kLongStringLimit = 4 * kPointerSize;
4984 if (min_length <= kLongStringLimit) {
4985 cmpl(length, Immediate(kPointerSize));
4986 j(below, &short_string, Label::kNear);
4987 }
4988
4989 DCHECK(source.is(rsi));
4990 DCHECK(destination.is(rdi));
4991 DCHECK(length.is(rcx));
4992
4993 if (min_length <= kLongStringLimit) {
4994 cmpl(length, Immediate(2 * kPointerSize));
4995 j(below_equal, &len8, Label::kNear);
4996 cmpl(length, Immediate(3 * kPointerSize));
4997 j(below_equal, &len16, Label::kNear);
4998 cmpl(length, Immediate(4 * kPointerSize));
4999 j(below_equal, &len24, Label::kNear);
5000 }
5001
5002 // Because source is 8-byte aligned in our uses of this function,
5003 // we keep source aligned for the rep movs operation by copying the odd bytes
5004 // at the end of the ranges.
5005 movp(scratch, length);
5006 shrl(length, Immediate(kPointerSizeLog2));
5007 repmovsp();
5008 // Move remaining bytes of length.
5009 andl(scratch, Immediate(kPointerSize - 1));
5010 movp(length, Operand(source, scratch, times_1, -kPointerSize));
5011 movp(Operand(destination, scratch, times_1, -kPointerSize), length);
5012 addp(destination, scratch);
5013
5014 if (min_length <= kLongStringLimit) {
5015 jmp(&done, Label::kNear);
5016 bind(&len24);
5017 movp(scratch, Operand(source, 2 * kPointerSize));
5018 movp(Operand(destination, 2 * kPointerSize), scratch);
5019 bind(&len16);
5020 movp(scratch, Operand(source, kPointerSize));
5021 movp(Operand(destination, kPointerSize), scratch);
5022 bind(&len8);
5023 movp(scratch, Operand(source, 0));
5024 movp(Operand(destination, 0), scratch);
5025 // Move remaining bytes of length.
5026 movp(scratch, Operand(source, length, times_1, -kPointerSize));
5027 movp(Operand(destination, length, times_1, -kPointerSize), scratch);
5028 addp(destination, length);
5029 jmp(&done, Label::kNear);
5030
5031 bind(&short_string);
5032 if (min_length == 0) {
5033 testl(length, length);
5034 j(zero, &done, Label::kNear);
5035 }
5036
5037 bind(&short_loop);
5038 movb(scratch, Operand(source, 0));
5039 movb(Operand(destination, 0), scratch);
5040 incp(source);
5041 incp(destination);
5042 decl(length);
5043 j(not_zero, &short_loop);
5044 }
5045
5046 bind(&done);
5047 }
5048
5049
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)5050 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
5051 Register end_address,
5052 Register filler) {
5053 Label loop, entry;
5054 jmp(&entry);
5055 bind(&loop);
5056 movp(Operand(current_address, 0), filler);
5057 addp(current_address, Immediate(kPointerSize));
5058 bind(&entry);
5059 cmpp(current_address, end_address);
5060 j(below, &loop);
5061 }
5062
5063
LoadContext(Register dst,int context_chain_length)5064 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
5065 if (context_chain_length > 0) {
5066 // Move up the chain of contexts to the context containing the slot.
5067 movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5068 for (int i = 1; i < context_chain_length; i++) {
5069 movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5070 }
5071 } else {
5072 // Slot is in the current function context. Move it into the
5073 // destination register in case we store into it (the write barrier
5074 // cannot be allowed to destroy the context in rsi).
5075 movp(dst, rsi);
5076 }
5077
5078 // We should not have found a with context by walking the context
5079 // chain (i.e., the static scope chain and runtime context chain do
5080 // not agree). A variable occurring in such a scope should have
5081 // slot type LOOKUP and not CONTEXT.
5082 if (emit_debug_code()) {
5083 CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
5084 Heap::kWithContextMapRootIndex);
5085 Check(not_equal, kVariableResolvedToWithContext);
5086 }
5087 }
5088
5089
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)5090 void MacroAssembler::LoadTransitionedArrayMapConditional(
5091 ElementsKind expected_kind,
5092 ElementsKind transitioned_kind,
5093 Register map_in_out,
5094 Register scratch,
5095 Label* no_map_match) {
5096 DCHECK(IsFastElementsKind(expected_kind));
5097 DCHECK(IsFastElementsKind(transitioned_kind));
5098
5099 // Check that the function's map is the same as the expected cached map.
5100 movp(scratch, NativeContextOperand());
5101 cmpp(map_in_out,
5102 ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
5103 j(not_equal, no_map_match);
5104
5105 // Use the transitioned cached map.
5106 movp(map_in_out,
5107 ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
5108 }
5109
5110
5111 #ifdef _WIN64
5112 static const int kRegisterPassedArguments = 4;
5113 #else
5114 static const int kRegisterPassedArguments = 6;
5115 #endif
5116
5117
LoadNativeContextSlot(int index,Register dst)5118 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
5119 movp(dst, NativeContextOperand());
5120 movp(dst, ContextOperand(dst, index));
5121 }
5122
5123
LoadGlobalFunctionInitialMap(Register function,Register map)5124 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5125 Register map) {
5126 // Load the initial map. The global functions all have initial maps.
5127 movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5128 if (emit_debug_code()) {
5129 Label ok, fail;
5130 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
5131 jmp(&ok);
5132 bind(&fail);
5133 Abort(kGlobalFunctionsMustHaveInitialMap);
5134 bind(&ok);
5135 }
5136 }
5137
5138
ArgumentStackSlotsForCFunctionCall(int num_arguments)5139 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
5140 // On Windows 64 stack slots are reserved by the caller for all arguments
5141 // including the ones passed in registers, and space is always allocated for
5142 // the four register arguments even if the function takes fewer than four
5143 // arguments.
5144 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
5145 // and the caller does not reserve stack slots for them.
5146 DCHECK(num_arguments >= 0);
5147 #ifdef _WIN64
5148 const int kMinimumStackSlots = kRegisterPassedArguments;
5149 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
5150 return num_arguments;
5151 #else
5152 if (num_arguments < kRegisterPassedArguments) return 0;
5153 return num_arguments - kRegisterPassedArguments;
5154 #endif
5155 }
5156
5157
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)5158 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5159 Register index,
5160 Register value,
5161 uint32_t encoding_mask) {
5162 Label is_object;
5163 JumpIfNotSmi(string, &is_object);
5164 Abort(kNonObject);
5165 bind(&is_object);
5166
5167 Push(value);
5168 movp(value, FieldOperand(string, HeapObject::kMapOffset));
5169 movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
5170
5171 andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
5172 cmpp(value, Immediate(encoding_mask));
5173 Pop(value);
5174 Check(equal, kUnexpectedStringType);
5175
5176 // The index is assumed to be untagged coming in, tag it to compare with the
5177 // string length without using a temp register, it is restored at the end of
5178 // this function.
5179 Integer32ToSmi(index, index);
5180 SmiCompare(index, FieldOperand(string, String::kLengthOffset));
5181 Check(less, kIndexIsTooLarge);
5182
5183 SmiCompare(index, Smi::FromInt(0));
5184 Check(greater_equal, kIndexIsNegative);
5185
5186 // Restore the index
5187 SmiToInteger32(index, index);
5188 }
5189
5190
PrepareCallCFunction(int num_arguments)5191 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
5192 int frame_alignment = base::OS::ActivationFrameAlignment();
5193 DCHECK(frame_alignment != 0);
5194 DCHECK(num_arguments >= 0);
5195
5196 // Make stack end at alignment and allocate space for arguments and old rsp.
5197 movp(kScratchRegister, rsp);
5198 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5199 int argument_slots_on_stack =
5200 ArgumentStackSlotsForCFunctionCall(num_arguments);
5201 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
5202 andp(rsp, Immediate(-frame_alignment));
5203 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
5204 }
5205
5206
CallCFunction(ExternalReference function,int num_arguments)5207 void MacroAssembler::CallCFunction(ExternalReference function,
5208 int num_arguments) {
5209 LoadAddress(rax, function);
5210 CallCFunction(rax, num_arguments);
5211 }
5212
5213
CallCFunction(Register function,int num_arguments)5214 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
5215 DCHECK(has_frame());
5216 // Check stack alignment.
5217 if (emit_debug_code()) {
5218 CheckStackAlignment();
5219 }
5220
5221 call(function);
5222 DCHECK(base::OS::ActivationFrameAlignment() != 0);
5223 DCHECK(num_arguments >= 0);
5224 int argument_slots_on_stack =
5225 ArgumentStackSlotsForCFunctionCall(num_arguments);
5226 movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
5227 }
5228
5229
5230 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8)5231 bool AreAliased(Register reg1,
5232 Register reg2,
5233 Register reg3,
5234 Register reg4,
5235 Register reg5,
5236 Register reg6,
5237 Register reg7,
5238 Register reg8) {
5239 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
5240 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5241 reg7.is_valid() + reg8.is_valid();
5242
5243 RegList regs = 0;
5244 if (reg1.is_valid()) regs |= reg1.bit();
5245 if (reg2.is_valid()) regs |= reg2.bit();
5246 if (reg3.is_valid()) regs |= reg3.bit();
5247 if (reg4.is_valid()) regs |= reg4.bit();
5248 if (reg5.is_valid()) regs |= reg5.bit();
5249 if (reg6.is_valid()) regs |= reg6.bit();
5250 if (reg7.is_valid()) regs |= reg7.bit();
5251 if (reg8.is_valid()) regs |= reg8.bit();
5252 int n_of_non_aliasing_regs = NumRegs(regs);
5253
5254 return n_of_valid_regs != n_of_non_aliasing_regs;
5255 }
5256 #endif
5257
5258
CodePatcher(Isolate * isolate,byte * address,int size)5259 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
5260 : address_(address),
5261 size_(size),
5262 masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
5263 // Create a new macro assembler pointing to the address of the code to patch.
5264 // The size is adjusted with kGap on order for the assembler to generate size
5265 // bytes of instructions without failing with buffer size constraints.
5266 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5267 }
5268
5269
~CodePatcher()5270 CodePatcher::~CodePatcher() {
5271 // Indicate that code has changed.
5272 Assembler::FlushICache(masm_.isolate(), address_, size_);
5273
5274 // Check that the code was patched as expected.
5275 DCHECK(masm_.pc_ == address_ + size_);
5276 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5277 }
5278
5279
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met,Label::Distance condition_met_distance)5280 void MacroAssembler::CheckPageFlag(
5281 Register object,
5282 Register scratch,
5283 int mask,
5284 Condition cc,
5285 Label* condition_met,
5286 Label::Distance condition_met_distance) {
5287 DCHECK(cc == zero || cc == not_zero);
5288 if (scratch.is(object)) {
5289 andp(scratch, Immediate(~Page::kPageAlignmentMask));
5290 } else {
5291 movp(scratch, Immediate(~Page::kPageAlignmentMask));
5292 andp(scratch, object);
5293 }
5294 if (mask < (1 << kBitsPerByte)) {
5295 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
5296 Immediate(static_cast<uint8_t>(mask)));
5297 } else {
5298 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
5299 }
5300 j(cc, condition_met, condition_met_distance);
5301 }
5302
5303
JumpIfBlack(Register object,Register bitmap_scratch,Register mask_scratch,Label * on_black,Label::Distance on_black_distance)5304 void MacroAssembler::JumpIfBlack(Register object,
5305 Register bitmap_scratch,
5306 Register mask_scratch,
5307 Label* on_black,
5308 Label::Distance on_black_distance) {
5309 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
5310
5311 GetMarkBits(object, bitmap_scratch, mask_scratch);
5312
5313 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
5314 // The mask_scratch register contains a 1 at the position of the first bit
5315 // and a 1 at a position of the second bit. All other positions are zero.
5316 movp(rcx, mask_scratch);
5317 andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
5318 cmpp(mask_scratch, rcx);
5319 j(equal, on_black, on_black_distance);
5320 }
5321
5322
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)5323 void MacroAssembler::GetMarkBits(Register addr_reg,
5324 Register bitmap_reg,
5325 Register mask_reg) {
5326 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
5327 movp(bitmap_reg, addr_reg);
5328 // Sign extended 32 bit immediate.
5329 andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
5330 movp(rcx, addr_reg);
5331 int shift =
5332 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
5333 shrl(rcx, Immediate(shift));
5334 andp(rcx,
5335 Immediate((Page::kPageAlignmentMask >> shift) &
5336 ~(Bitmap::kBytesPerCell - 1)));
5337
5338 addp(bitmap_reg, rcx);
5339 movp(rcx, addr_reg);
5340 shrl(rcx, Immediate(kPointerSizeLog2));
5341 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
5342 movl(mask_reg, Immediate(3));
5343 shlp_cl(mask_reg);
5344 }
5345
5346
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Label * value_is_white,Label::Distance distance)5347 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
5348 Register mask_scratch, Label* value_is_white,
5349 Label::Distance distance) {
5350 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
5351 GetMarkBits(value, bitmap_scratch, mask_scratch);
5352
5353 // If the value is black or grey we don't need to do anything.
5354 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5355 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
5356 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
5357 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5358
5359 // Since both black and grey have a 1 in the first position and white does
5360 // not have a 1 there we only need to check one bit.
5361 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5362 j(zero, value_is_white, distance);
5363 }
5364
5365
CheckEnumCache(Register null_value,Label * call_runtime)5366 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5367 Label next, start;
5368 Register empty_fixed_array_value = r8;
5369 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5370 movp(rcx, rax);
5371
5372 // Check if the enum length field is properly initialized, indicating that
5373 // there is an enum cache.
5374 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5375
5376 EnumLength(rdx, rbx);
5377 Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
5378 j(equal, call_runtime);
5379
5380 jmp(&start);
5381
5382 bind(&next);
5383
5384 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5385
5386 // For all objects but the receiver, check that the cache is empty.
5387 EnumLength(rdx, rbx);
5388 Cmp(rdx, Smi::FromInt(0));
5389 j(not_equal, call_runtime);
5390
5391 bind(&start);
5392
5393 // Check that there are no elements. Register rcx contains the current JS
5394 // object we've reached through the prototype chain.
5395 Label no_elements;
5396 cmpp(empty_fixed_array_value,
5397 FieldOperand(rcx, JSObject::kElementsOffset));
5398 j(equal, &no_elements);
5399
5400 // Second chance, the object may be using the empty slow element dictionary.
5401 LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
5402 cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
5403 j(not_equal, call_runtime);
5404
5405 bind(&no_elements);
5406 movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5407 cmpp(rcx, null_value);
5408 j(not_equal, &next);
5409 }
5410
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)5411 void MacroAssembler::TestJSArrayForAllocationMemento(
5412 Register receiver_reg,
5413 Register scratch_reg,
5414 Label* no_memento_found) {
5415 ExternalReference new_space_start =
5416 ExternalReference::new_space_start(isolate());
5417 ExternalReference new_space_allocation_top =
5418 ExternalReference::new_space_allocation_top_address(isolate());
5419
5420 leap(scratch_reg, Operand(receiver_reg,
5421 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5422 Move(kScratchRegister, new_space_start);
5423 cmpp(scratch_reg, kScratchRegister);
5424 j(less, no_memento_found);
5425 cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5426 j(greater, no_memento_found);
5427 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
5428 Heap::kAllocationMementoMapRootIndex);
5429 }
5430
5431
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)5432 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5433 Register object,
5434 Register scratch0,
5435 Register scratch1,
5436 Label* found) {
5437 DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5438 DCHECK(!scratch1.is(scratch0));
5439 Register current = scratch0;
5440 Label loop_again, end;
5441
5442 movp(current, object);
5443 movp(current, FieldOperand(current, HeapObject::kMapOffset));
5444 movp(current, FieldOperand(current, Map::kPrototypeOffset));
5445 CompareRoot(current, Heap::kNullValueRootIndex);
5446 j(equal, &end);
5447
5448 // Loop based on the map going up the prototype chain.
5449 bind(&loop_again);
5450 movp(current, FieldOperand(current, HeapObject::kMapOffset));
5451 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
5452 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
5453 CmpInstanceType(current, JS_OBJECT_TYPE);
5454 j(below, found);
5455 movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5456 DecodeField<Map::ElementsKindBits>(scratch1);
5457 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5458 j(equal, found);
5459 movp(current, FieldOperand(current, Map::kPrototypeOffset));
5460 CompareRoot(current, Heap::kNullValueRootIndex);
5461 j(not_equal, &loop_again);
5462
5463 bind(&end);
5464 }
5465
5466
TruncatingDiv(Register dividend,int32_t divisor)5467 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5468 DCHECK(!dividend.is(rax));
5469 DCHECK(!dividend.is(rdx));
5470 base::MagicNumbersForDivision<uint32_t> mag =
5471 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5472 movl(rax, Immediate(mag.multiplier));
5473 imull(dividend);
5474 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5475 if (divisor > 0 && neg) addl(rdx, dividend);
5476 if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
5477 if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
5478 movl(rax, dividend);
5479 shrl(rax, Immediate(31));
5480 addl(rdx, rax);
5481 }
5482
5483
5484 } // namespace internal
5485 } // namespace v8
5486
5487 #endif // V8_TARGET_ARCH_X64
5488