1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_X64
8 
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/cpu-profiler.h"
14 #include "src/debug.h"
15 #include "src/heap/heap.h"
16 #include "src/isolate-inl.h"
17 #include "src/serialize.h"
18 #include "src/x64/assembler-x64.h"
19 #include "src/x64/macro-assembler-x64.h"
20 
21 namespace v8 {
22 namespace internal {
23 
MacroAssembler(Isolate * arg_isolate,void * buffer,int size)24 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
25     : Assembler(arg_isolate, buffer, size),
26       generating_stub_(false),
27       has_frame_(false),
28       root_array_available_(true) {
29   if (isolate() != NULL) {
30     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
31                                   isolate());
32   }
33 }
34 
35 
36 static const int64_t kInvalidRootRegisterDelta = -1;
37 
38 
RootRegisterDelta(ExternalReference other)39 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
40   if (predictable_code_size() &&
41       (other.address() < reinterpret_cast<Address>(isolate()) ||
42        other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
43     return kInvalidRootRegisterDelta;
44   }
45   Address roots_register_value = kRootRegisterBias +
46       reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
47 
48   int64_t delta = kInvalidRootRegisterDelta;  // Bogus initialization.
49   if (kPointerSize == kInt64Size) {
50     delta = other.address() - roots_register_value;
51   } else {
52     // For x32, zero extend the address to 64-bit and calculate the delta.
53     uint64_t o = static_cast<uint32_t>(
54         reinterpret_cast<intptr_t>(other.address()));
55     uint64_t r = static_cast<uint32_t>(
56         reinterpret_cast<intptr_t>(roots_register_value));
57     delta = o - r;
58   }
59   return delta;
60 }
61 
62 
ExternalOperand(ExternalReference target,Register scratch)63 Operand MacroAssembler::ExternalOperand(ExternalReference target,
64                                         Register scratch) {
65   if (root_array_available_ && !serializer_enabled()) {
66     int64_t delta = RootRegisterDelta(target);
67     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
68       return Operand(kRootRegister, static_cast<int32_t>(delta));
69     }
70   }
71   Move(scratch, target);
72   return Operand(scratch, 0);
73 }
74 
75 
Load(Register destination,ExternalReference source)76 void MacroAssembler::Load(Register destination, ExternalReference source) {
77   if (root_array_available_ && !serializer_enabled()) {
78     int64_t delta = RootRegisterDelta(source);
79     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
80       movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
81       return;
82     }
83   }
84   // Safe code.
85   if (destination.is(rax)) {
86     load_rax(source);
87   } else {
88     Move(kScratchRegister, source);
89     movp(destination, Operand(kScratchRegister, 0));
90   }
91 }
92 
93 
Store(ExternalReference destination,Register source)94 void MacroAssembler::Store(ExternalReference destination, Register source) {
95   if (root_array_available_ && !serializer_enabled()) {
96     int64_t delta = RootRegisterDelta(destination);
97     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
98       movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
99       return;
100     }
101   }
102   // Safe code.
103   if (source.is(rax)) {
104     store_rax(destination);
105   } else {
106     Move(kScratchRegister, destination);
107     movp(Operand(kScratchRegister, 0), source);
108   }
109 }
110 
111 
LoadAddress(Register destination,ExternalReference source)112 void MacroAssembler::LoadAddress(Register destination,
113                                  ExternalReference source) {
114   if (root_array_available_ && !serializer_enabled()) {
115     int64_t delta = RootRegisterDelta(source);
116     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
117       leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
118       return;
119     }
120   }
121   // Safe code.
122   Move(destination, source);
123 }
124 
125 
LoadAddressSize(ExternalReference source)126 int MacroAssembler::LoadAddressSize(ExternalReference source) {
127   if (root_array_available_ && !serializer_enabled()) {
128     // This calculation depends on the internals of LoadAddress.
129     // It's correctness is ensured by the asserts in the Call
130     // instruction below.
131     int64_t delta = RootRegisterDelta(source);
132     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
133       // Operand is leap(scratch, Operand(kRootRegister, delta));
134       // Opcodes : REX.W 8D ModRM Disp8/Disp32  - 4 or 7.
135       int size = 4;
136       if (!is_int8(static_cast<int32_t>(delta))) {
137         size += 3;  // Need full four-byte displacement in lea.
138       }
139       return size;
140     }
141   }
142   // Size of movp(destination, src);
143   return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
144 }
145 
146 
PushAddress(ExternalReference source)147 void MacroAssembler::PushAddress(ExternalReference source) {
148   int64_t address = reinterpret_cast<int64_t>(source.address());
149   if (is_int32(address) && !serializer_enabled()) {
150     if (emit_debug_code()) {
151       Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
152     }
153     Push(Immediate(static_cast<int32_t>(address)));
154     return;
155   }
156   LoadAddress(kScratchRegister, source);
157   Push(kScratchRegister);
158 }
159 
160 
LoadRoot(Register destination,Heap::RootListIndex index)161 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
162   DCHECK(root_array_available_);
163   movp(destination, Operand(kRootRegister,
164                             (index << kPointerSizeLog2) - kRootRegisterBias));
165 }
166 
167 
LoadRootIndexed(Register destination,Register variable_offset,int fixed_offset)168 void MacroAssembler::LoadRootIndexed(Register destination,
169                                      Register variable_offset,
170                                      int fixed_offset) {
171   DCHECK(root_array_available_);
172   movp(destination,
173        Operand(kRootRegister,
174                variable_offset, times_pointer_size,
175                (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
176 }
177 
178 
StoreRoot(Register source,Heap::RootListIndex index)179 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
180   DCHECK(root_array_available_);
181   movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
182        source);
183 }
184 
185 
PushRoot(Heap::RootListIndex index)186 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
187   DCHECK(root_array_available_);
188   Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
189 }
190 
191 
CompareRoot(Register with,Heap::RootListIndex index)192 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
193   DCHECK(root_array_available_);
194   cmpp(with, Operand(kRootRegister,
195                      (index << kPointerSizeLog2) - kRootRegisterBias));
196 }
197 
198 
CompareRoot(const Operand & with,Heap::RootListIndex index)199 void MacroAssembler::CompareRoot(const Operand& with,
200                                  Heap::RootListIndex index) {
201   DCHECK(root_array_available_);
202   DCHECK(!with.AddressUsesRegister(kScratchRegister));
203   LoadRoot(kScratchRegister, index);
204   cmpp(with, kScratchRegister);
205 }
206 
207 
RememberedSetHelper(Register object,Register addr,Register scratch,SaveFPRegsMode save_fp,RememberedSetFinalAction and_then)208 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
209                                          Register addr,
210                                          Register scratch,
211                                          SaveFPRegsMode save_fp,
212                                          RememberedSetFinalAction and_then) {
213   if (emit_debug_code()) {
214     Label ok;
215     JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
216     int3();
217     bind(&ok);
218   }
219   // Load store buffer top.
220   LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
221   // Store pointer to buffer.
222   movp(Operand(scratch, 0), addr);
223   // Increment buffer top.
224   addp(scratch, Immediate(kPointerSize));
225   // Write back new top of buffer.
226   StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
227   // Call stub on end of buffer.
228   Label done;
229   // Check for end of buffer.
230   testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
231   if (and_then == kReturnAtEnd) {
232     Label buffer_overflowed;
233     j(not_equal, &buffer_overflowed, Label::kNear);
234     ret(0);
235     bind(&buffer_overflowed);
236   } else {
237     DCHECK(and_then == kFallThroughAtEnd);
238     j(equal, &done, Label::kNear);
239   }
240   StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
241   CallStub(&store_buffer_overflow);
242   if (and_then == kReturnAtEnd) {
243     ret(0);
244   } else {
245     DCHECK(and_then == kFallThroughAtEnd);
246     bind(&done);
247   }
248 }
249 
250 
InNewSpace(Register object,Register scratch,Condition cc,Label * branch,Label::Distance distance)251 void MacroAssembler::InNewSpace(Register object,
252                                 Register scratch,
253                                 Condition cc,
254                                 Label* branch,
255                                 Label::Distance distance) {
256   if (serializer_enabled()) {
257     // Can't do arithmetic on external references if it might get serialized.
258     // The mask isn't really an address.  We load it as an external reference in
259     // case the size of the new space is different between the snapshot maker
260     // and the running system.
261     if (scratch.is(object)) {
262       Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
263       andp(scratch, kScratchRegister);
264     } else {
265       Move(scratch, ExternalReference::new_space_mask(isolate()));
266       andp(scratch, object);
267     }
268     Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
269     cmpp(scratch, kScratchRegister);
270     j(cc, branch, distance);
271   } else {
272     DCHECK(kPointerSize == kInt64Size
273         ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
274         : kPointerSize == kInt32Size);
275     intptr_t new_space_start =
276         reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
277     Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
278          Assembler::RelocInfoNone());
279     if (scratch.is(object)) {
280       addp(scratch, kScratchRegister);
281     } else {
282       leap(scratch, Operand(object, kScratchRegister, times_1, 0));
283     }
284     andp(scratch,
285          Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
286     j(cc, branch, distance);
287   }
288 }
289 
290 
RecordWriteField(Register object,int offset,Register value,Register dst,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)291 void MacroAssembler::RecordWriteField(
292     Register object,
293     int offset,
294     Register value,
295     Register dst,
296     SaveFPRegsMode save_fp,
297     RememberedSetAction remembered_set_action,
298     SmiCheck smi_check,
299     PointersToHereCheck pointers_to_here_check_for_value) {
300   // First, check if a write barrier is even needed. The tests below
301   // catch stores of Smis.
302   Label done;
303 
304   // Skip barrier if writing a smi.
305   if (smi_check == INLINE_SMI_CHECK) {
306     JumpIfSmi(value, &done);
307   }
308 
309   // Although the object register is tagged, the offset is relative to the start
310   // of the object, so so offset must be a multiple of kPointerSize.
311   DCHECK(IsAligned(offset, kPointerSize));
312 
313   leap(dst, FieldOperand(object, offset));
314   if (emit_debug_code()) {
315     Label ok;
316     testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
317     j(zero, &ok, Label::kNear);
318     int3();
319     bind(&ok);
320   }
321 
322   RecordWrite(object, dst, value, save_fp, remembered_set_action,
323               OMIT_SMI_CHECK, pointers_to_here_check_for_value);
324 
325   bind(&done);
326 
327   // Clobber clobbered input registers when running with the debug-code flag
328   // turned on to provoke errors.
329   if (emit_debug_code()) {
330     Move(value, kZapValue, Assembler::RelocInfoNone());
331     Move(dst, kZapValue, Assembler::RelocInfoNone());
332   }
333 }
334 
335 
RecordWriteArray(Register object,Register value,Register index,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)336 void MacroAssembler::RecordWriteArray(
337     Register object,
338     Register value,
339     Register index,
340     SaveFPRegsMode save_fp,
341     RememberedSetAction remembered_set_action,
342     SmiCheck smi_check,
343     PointersToHereCheck pointers_to_here_check_for_value) {
344   // First, check if a write barrier is even needed. The tests below
345   // catch stores of Smis.
346   Label done;
347 
348   // Skip barrier if writing a smi.
349   if (smi_check == INLINE_SMI_CHECK) {
350     JumpIfSmi(value, &done);
351   }
352 
353   // Array access: calculate the destination address. Index is not a smi.
354   Register dst = index;
355   leap(dst, Operand(object, index, times_pointer_size,
356                    FixedArray::kHeaderSize - kHeapObjectTag));
357 
358   RecordWrite(object, dst, value, save_fp, remembered_set_action,
359               OMIT_SMI_CHECK, pointers_to_here_check_for_value);
360 
361   bind(&done);
362 
363   // Clobber clobbered input registers when running with the debug-code flag
364   // turned on to provoke errors.
365   if (emit_debug_code()) {
366     Move(value, kZapValue, Assembler::RelocInfoNone());
367     Move(index, kZapValue, Assembler::RelocInfoNone());
368   }
369 }
370 
371 
RecordWriteForMap(Register object,Register map,Register dst,SaveFPRegsMode fp_mode)372 void MacroAssembler::RecordWriteForMap(Register object,
373                                        Register map,
374                                        Register dst,
375                                        SaveFPRegsMode fp_mode) {
376   DCHECK(!object.is(kScratchRegister));
377   DCHECK(!object.is(map));
378   DCHECK(!object.is(dst));
379   DCHECK(!map.is(dst));
380   AssertNotSmi(object);
381 
382   if (emit_debug_code()) {
383     Label ok;
384     if (map.is(kScratchRegister)) pushq(map);
385     CompareMap(map, isolate()->factory()->meta_map());
386     if (map.is(kScratchRegister)) popq(map);
387     j(equal, &ok, Label::kNear);
388     int3();
389     bind(&ok);
390   }
391 
392   if (!FLAG_incremental_marking) {
393     return;
394   }
395 
396   if (emit_debug_code()) {
397     Label ok;
398     if (map.is(kScratchRegister)) pushq(map);
399     cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
400     if (map.is(kScratchRegister)) popq(map);
401     j(equal, &ok, Label::kNear);
402     int3();
403     bind(&ok);
404   }
405 
406   // Compute the address.
407   leap(dst, FieldOperand(object, HeapObject::kMapOffset));
408 
409   // First, check if a write barrier is even needed. The tests below
410   // catch stores of smis and stores into the young generation.
411   Label done;
412 
413   // A single check of the map's pages interesting flag suffices, since it is
414   // only set during incremental collection, and then it's also guaranteed that
415   // the from object's page's interesting flag is also set.  This optimization
416   // relies on the fact that maps can never be in new space.
417   CheckPageFlag(map,
418                 map,  // Used as scratch.
419                 MemoryChunk::kPointersToHereAreInterestingMask,
420                 zero,
421                 &done,
422                 Label::kNear);
423 
424   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
425                        fp_mode);
426   CallStub(&stub);
427 
428   bind(&done);
429 
430   // Count number of write barriers in generated code.
431   isolate()->counters()->write_barriers_static()->Increment();
432   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
433 
434   // Clobber clobbered registers when running with the debug-code flag
435   // turned on to provoke errors.
436   if (emit_debug_code()) {
437     Move(dst, kZapValue, Assembler::RelocInfoNone());
438     Move(map, kZapValue, Assembler::RelocInfoNone());
439   }
440 }
441 
442 
RecordWrite(Register object,Register address,Register value,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)443 void MacroAssembler::RecordWrite(
444     Register object,
445     Register address,
446     Register value,
447     SaveFPRegsMode fp_mode,
448     RememberedSetAction remembered_set_action,
449     SmiCheck smi_check,
450     PointersToHereCheck pointers_to_here_check_for_value) {
451   DCHECK(!object.is(value));
452   DCHECK(!object.is(address));
453   DCHECK(!value.is(address));
454   AssertNotSmi(object);
455 
456   if (remembered_set_action == OMIT_REMEMBERED_SET &&
457       !FLAG_incremental_marking) {
458     return;
459   }
460 
461   if (emit_debug_code()) {
462     Label ok;
463     cmpp(value, Operand(address, 0));
464     j(equal, &ok, Label::kNear);
465     int3();
466     bind(&ok);
467   }
468 
469   // First, check if a write barrier is even needed. The tests below
470   // catch stores of smis and stores into the young generation.
471   Label done;
472 
473   if (smi_check == INLINE_SMI_CHECK) {
474     // Skip barrier if writing a smi.
475     JumpIfSmi(value, &done);
476   }
477 
478   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
479     CheckPageFlag(value,
480                   value,  // Used as scratch.
481                   MemoryChunk::kPointersToHereAreInterestingMask,
482                   zero,
483                   &done,
484                   Label::kNear);
485   }
486 
487   CheckPageFlag(object,
488                 value,  // Used as scratch.
489                 MemoryChunk::kPointersFromHereAreInterestingMask,
490                 zero,
491                 &done,
492                 Label::kNear);
493 
494   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
495                        fp_mode);
496   CallStub(&stub);
497 
498   bind(&done);
499 
500   // Count number of write barriers in generated code.
501   isolate()->counters()->write_barriers_static()->Increment();
502   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
503 
504   // Clobber clobbered registers when running with the debug-code flag
505   // turned on to provoke errors.
506   if (emit_debug_code()) {
507     Move(address, kZapValue, Assembler::RelocInfoNone());
508     Move(value, kZapValue, Assembler::RelocInfoNone());
509   }
510 }
511 
512 
Assert(Condition cc,BailoutReason reason)513 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
514   if (emit_debug_code()) Check(cc, reason);
515 }
516 
517 
AssertFastElements(Register elements)518 void MacroAssembler::AssertFastElements(Register elements) {
519   if (emit_debug_code()) {
520     Label ok;
521     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
522                 Heap::kFixedArrayMapRootIndex);
523     j(equal, &ok, Label::kNear);
524     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
525                 Heap::kFixedDoubleArrayMapRootIndex);
526     j(equal, &ok, Label::kNear);
527     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
528                 Heap::kFixedCOWArrayMapRootIndex);
529     j(equal, &ok, Label::kNear);
530     Abort(kJSObjectWithFastElementsMapHasSlowElements);
531     bind(&ok);
532   }
533 }
534 
535 
Check(Condition cc,BailoutReason reason)536 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
537   Label L;
538   j(cc, &L, Label::kNear);
539   Abort(reason);
540   // Control will not return here.
541   bind(&L);
542 }
543 
544 
CheckStackAlignment()545 void MacroAssembler::CheckStackAlignment() {
546   int frame_alignment = base::OS::ActivationFrameAlignment();
547   int frame_alignment_mask = frame_alignment - 1;
548   if (frame_alignment > kPointerSize) {
549     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
550     Label alignment_as_expected;
551     testp(rsp, Immediate(frame_alignment_mask));
552     j(zero, &alignment_as_expected, Label::kNear);
553     // Abort if stack is not aligned.
554     int3();
555     bind(&alignment_as_expected);
556   }
557 }
558 
559 
NegativeZeroTest(Register result,Register op,Label * then_label)560 void MacroAssembler::NegativeZeroTest(Register result,
561                                       Register op,
562                                       Label* then_label) {
563   Label ok;
564   testl(result, result);
565   j(not_zero, &ok, Label::kNear);
566   testl(op, op);
567   j(sign, then_label);
568   bind(&ok);
569 }
570 
571 
Abort(BailoutReason reason)572 void MacroAssembler::Abort(BailoutReason reason) {
573 #ifdef DEBUG
574   const char* msg = GetBailoutReason(reason);
575   if (msg != NULL) {
576     RecordComment("Abort message: ");
577     RecordComment(msg);
578   }
579 
580   if (FLAG_trap_on_abort) {
581     int3();
582     return;
583   }
584 #endif
585 
586   Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
587        Assembler::RelocInfoNone());
588   Push(kScratchRegister);
589 
590   if (!has_frame_) {
591     // We don't actually want to generate a pile of code for this, so just
592     // claim there is a stack frame, without generating one.
593     FrameScope scope(this, StackFrame::NONE);
594     CallRuntime(Runtime::kAbort, 1);
595   } else {
596     CallRuntime(Runtime::kAbort, 1);
597   }
598   // Control will not return here.
599   int3();
600 }
601 
602 
CallStub(CodeStub * stub,TypeFeedbackId ast_id)603 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
604   DCHECK(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
605   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
606 }
607 
608 
TailCallStub(CodeStub * stub)609 void MacroAssembler::TailCallStub(CodeStub* stub) {
610   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
611 }
612 
613 
StubReturn(int argc)614 void MacroAssembler::StubReturn(int argc) {
615   DCHECK(argc >= 1 && generating_stub());
616   ret((argc - 1) * kPointerSize);
617 }
618 
619 
AllowThisStubCall(CodeStub * stub)620 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
621   return has_frame_ || !stub->SometimesSetsUpAFrame();
622 }
623 
624 
IndexFromHash(Register hash,Register index)625 void MacroAssembler::IndexFromHash(Register hash, Register index) {
626   // The assert checks that the constants for the maximum number of digits
627   // for an array index cached in the hash field and the number of bits
628   // reserved for it does not conflict.
629   DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
630          (1 << String::kArrayIndexValueBits));
631   if (!hash.is(index)) {
632     movl(index, hash);
633   }
634   DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
635 }
636 
637 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)638 void MacroAssembler::CallRuntime(const Runtime::Function* f,
639                                  int num_arguments,
640                                  SaveFPRegsMode save_doubles) {
641   // If the expected number of arguments of the runtime function is
642   // constant, we check that the actual number of arguments match the
643   // expectation.
644   CHECK(f->nargs < 0 || f->nargs == num_arguments);
645 
646   // TODO(1236192): Most runtime routines don't need the number of
647   // arguments passed in because it is constant. At some point we
648   // should remove this need and make the runtime routine entry code
649   // smarter.
650   Set(rax, num_arguments);
651   LoadAddress(rbx, ExternalReference(f, isolate()));
652   CEntryStub ces(isolate(), f->result_size, save_doubles);
653   CallStub(&ces);
654 }
655 
656 
CallExternalReference(const ExternalReference & ext,int num_arguments)657 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
658                                            int num_arguments) {
659   Set(rax, num_arguments);
660   LoadAddress(rbx, ext);
661 
662   CEntryStub stub(isolate(), 1);
663   CallStub(&stub);
664 }
665 
666 
TailCallExternalReference(const ExternalReference & ext,int num_arguments,int result_size)667 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
668                                                int num_arguments,
669                                                int result_size) {
670   // ----------- S t a t e -------------
671   //  -- rsp[0]                 : return address
672   //  -- rsp[8]                 : argument num_arguments - 1
673   //  ...
674   //  -- rsp[8 * num_arguments] : argument 0 (receiver)
675   // -----------------------------------
676 
677   // TODO(1236192): Most runtime routines don't need the number of
678   // arguments passed in because it is constant. At some point we
679   // should remove this need and make the runtime routine entry code
680   // smarter.
681   Set(rax, num_arguments);
682   JumpToExternalReference(ext, result_size);
683 }
684 
685 
TailCallRuntime(Runtime::FunctionId fid,int num_arguments,int result_size)686 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
687                                      int num_arguments,
688                                      int result_size) {
689   TailCallExternalReference(ExternalReference(fid, isolate()),
690                             num_arguments,
691                             result_size);
692 }
693 
694 
Offset(ExternalReference ref0,ExternalReference ref1)695 static int Offset(ExternalReference ref0, ExternalReference ref1) {
696   int64_t offset = (ref0.address() - ref1.address());
697   // Check that fits into int.
698   DCHECK(static_cast<int>(offset) == offset);
699   return static_cast<int>(offset);
700 }
701 
702 
PrepareCallApiFunction(int arg_stack_space)703 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
704   EnterApiExitFrame(arg_stack_space);
705 }
706 
707 
CallApiFunctionAndReturn(Register function_address,ExternalReference thunk_ref,Register thunk_last_arg,int stack_space,Operand return_value_operand,Operand * context_restore_operand)708 void MacroAssembler::CallApiFunctionAndReturn(
709     Register function_address,
710     ExternalReference thunk_ref,
711     Register thunk_last_arg,
712     int stack_space,
713     Operand return_value_operand,
714     Operand* context_restore_operand) {
715   Label prologue;
716   Label promote_scheduled_exception;
717   Label exception_handled;
718   Label delete_allocated_handles;
719   Label leave_exit_frame;
720   Label write_back;
721 
722   Factory* factory = isolate()->factory();
723   ExternalReference next_address =
724       ExternalReference::handle_scope_next_address(isolate());
725   const int kNextOffset = 0;
726   const int kLimitOffset = Offset(
727       ExternalReference::handle_scope_limit_address(isolate()),
728       next_address);
729   const int kLevelOffset = Offset(
730       ExternalReference::handle_scope_level_address(isolate()),
731       next_address);
732   ExternalReference scheduled_exception_address =
733       ExternalReference::scheduled_exception_address(isolate());
734 
735   DCHECK(rdx.is(function_address) || r8.is(function_address));
736   // Allocate HandleScope in callee-save registers.
737   Register prev_next_address_reg = r14;
738   Register prev_limit_reg = rbx;
739   Register base_reg = r15;
740   Move(base_reg, next_address);
741   movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
742   movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
743   addl(Operand(base_reg, kLevelOffset), Immediate(1));
744 
745   if (FLAG_log_timer_events) {
746     FrameScope frame(this, StackFrame::MANUAL);
747     PushSafepointRegisters();
748     PrepareCallCFunction(1);
749     LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
750     CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
751     PopSafepointRegisters();
752   }
753 
754 
755   Label profiler_disabled;
756   Label end_profiler_check;
757   Move(rax, ExternalReference::is_profiling_address(isolate()));
758   cmpb(Operand(rax, 0), Immediate(0));
759   j(zero, &profiler_disabled);
760 
761   // Third parameter is the address of the actual getter function.
762   Move(thunk_last_arg, function_address);
763   Move(rax, thunk_ref);
764   jmp(&end_profiler_check);
765 
766   bind(&profiler_disabled);
767   // Call the api function!
768   Move(rax, function_address);
769 
770   bind(&end_profiler_check);
771 
772   // Call the api function!
773   call(rax);
774 
775   if (FLAG_log_timer_events) {
776     FrameScope frame(this, StackFrame::MANUAL);
777     PushSafepointRegisters();
778     PrepareCallCFunction(1);
779     LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
780     CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
781     PopSafepointRegisters();
782   }
783 
784   // Load the value from ReturnValue
785   movp(rax, return_value_operand);
786   bind(&prologue);
787 
788   // No more valid handles (the result handle was the last one). Restore
789   // previous handle scope.
790   subl(Operand(base_reg, kLevelOffset), Immediate(1));
791   movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
792   cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
793   j(not_equal, &delete_allocated_handles);
794   bind(&leave_exit_frame);
795 
796   // Check if the function scheduled an exception.
797   Move(rsi, scheduled_exception_address);
798   Cmp(Operand(rsi, 0), factory->the_hole_value());
799   j(not_equal, &promote_scheduled_exception);
800   bind(&exception_handled);
801 
802 #if ENABLE_EXTRA_CHECKS
803   // Check if the function returned a valid JavaScript value.
804   Label ok;
805   Register return_value = rax;
806   Register map = rcx;
807 
808   JumpIfSmi(return_value, &ok, Label::kNear);
809   movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
810 
811   CmpInstanceType(map, FIRST_NONSTRING_TYPE);
812   j(below, &ok, Label::kNear);
813 
814   CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
815   j(above_equal, &ok, Label::kNear);
816 
817   CompareRoot(map, Heap::kHeapNumberMapRootIndex);
818   j(equal, &ok, Label::kNear);
819 
820   CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
821   j(equal, &ok, Label::kNear);
822 
823   CompareRoot(return_value, Heap::kTrueValueRootIndex);
824   j(equal, &ok, Label::kNear);
825 
826   CompareRoot(return_value, Heap::kFalseValueRootIndex);
827   j(equal, &ok, Label::kNear);
828 
829   CompareRoot(return_value, Heap::kNullValueRootIndex);
830   j(equal, &ok, Label::kNear);
831 
832   Abort(kAPICallReturnedInvalidObject);
833 
834   bind(&ok);
835 #endif
836 
837   bool restore_context = context_restore_operand != NULL;
838   if (restore_context) {
839     movp(rsi, *context_restore_operand);
840   }
841   LeaveApiExitFrame(!restore_context);
842   ret(stack_space * kPointerSize);
843 
844   bind(&promote_scheduled_exception);
845   {
846     FrameScope frame(this, StackFrame::INTERNAL);
847     CallRuntime(Runtime::kPromoteScheduledException, 0);
848   }
849   jmp(&exception_handled);
850 
851   // HandleScope limit has changed. Delete allocated extensions.
852   bind(&delete_allocated_handles);
853   movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
854   movp(prev_limit_reg, rax);
855   LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
856   LoadAddress(rax,
857               ExternalReference::delete_handle_scope_extensions(isolate()));
858   call(rax);
859   movp(rax, prev_limit_reg);
860   jmp(&leave_exit_frame);
861 }
862 
863 
JumpToExternalReference(const ExternalReference & ext,int result_size)864 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
865                                              int result_size) {
866   // Set the entry point and jump to the C entry runtime stub.
867   LoadAddress(rbx, ext);
868   CEntryStub ces(isolate(), result_size);
869   jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
870 }
871 
872 
InvokeBuiltin(Builtins::JavaScript id,InvokeFlag flag,const CallWrapper & call_wrapper)873 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
874                                    InvokeFlag flag,
875                                    const CallWrapper& call_wrapper) {
876   // You can't call a builtin without a valid frame.
877   DCHECK(flag == JUMP_FUNCTION || has_frame());
878 
879   // Rely on the assertion to check that the number of provided
880   // arguments match the expected number of arguments. Fake a
881   // parameter count to avoid emitting code to do the check.
882   ParameterCount expected(0);
883   GetBuiltinEntry(rdx, id);
884   InvokeCode(rdx, expected, expected, flag, call_wrapper);
885 }
886 
887 
GetBuiltinFunction(Register target,Builtins::JavaScript id)888 void MacroAssembler::GetBuiltinFunction(Register target,
889                                         Builtins::JavaScript id) {
890   // Load the builtins object into target register.
891   movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
892   movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
893   movp(target, FieldOperand(target,
894                             JSBuiltinsObject::OffsetOfFunctionWithId(id)));
895 }
896 
897 
GetBuiltinEntry(Register target,Builtins::JavaScript id)898 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
899   DCHECK(!target.is(rdi));
900   // Load the JavaScript builtin function from the builtins object.
901   GetBuiltinFunction(rdi, id);
902   movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
903 }
904 
905 
906 #define REG(Name) { kRegister_ ## Name ## _Code }
907 
908 static const Register saved_regs[] = {
909   REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
910   REG(r9), REG(r10), REG(r11)
911 };
912 
913 #undef REG
914 
915 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
916 
917 
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)918 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
919                                      Register exclusion1,
920                                      Register exclusion2,
921                                      Register exclusion3) {
922   // We don't allow a GC during a store buffer overflow so there is no need to
923   // store the registers in any particular way, but we do have to store and
924   // restore them.
925   for (int i = 0; i < kNumberOfSavedRegs; i++) {
926     Register reg = saved_regs[i];
927     if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
928       pushq(reg);
929     }
930   }
931   // R12 to r15 are callee save on all platforms.
932   if (fp_mode == kSaveFPRegs) {
933     subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
934     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
935       XMMRegister reg = XMMRegister::from_code(i);
936       movsd(Operand(rsp, i * kDoubleSize), reg);
937     }
938   }
939 }
940 
941 
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)942 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
943                                     Register exclusion1,
944                                     Register exclusion2,
945                                     Register exclusion3) {
946   if (fp_mode == kSaveFPRegs) {
947     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
948       XMMRegister reg = XMMRegister::from_code(i);
949       movsd(reg, Operand(rsp, i * kDoubleSize));
950     }
951     addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
952   }
953   for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
954     Register reg = saved_regs[i];
955     if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
956       popq(reg);
957     }
958   }
959 }
960 
961 
Cvtlsi2sd(XMMRegister dst,Register src)962 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
963   xorps(dst, dst);
964   cvtlsi2sd(dst, src);
965 }
966 
967 
Cvtlsi2sd(XMMRegister dst,const Operand & src)968 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
969   xorps(dst, dst);
970   cvtlsi2sd(dst, src);
971 }
972 
973 
Load(Register dst,const Operand & src,Representation r)974 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
975   DCHECK(!r.IsDouble());
976   if (r.IsInteger8()) {
977     movsxbq(dst, src);
978   } else if (r.IsUInteger8()) {
979     movzxbl(dst, src);
980   } else if (r.IsInteger16()) {
981     movsxwq(dst, src);
982   } else if (r.IsUInteger16()) {
983     movzxwl(dst, src);
984   } else if (r.IsInteger32()) {
985     movl(dst, src);
986   } else {
987     movp(dst, src);
988   }
989 }
990 
991 
Store(const Operand & dst,Register src,Representation r)992 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
993   DCHECK(!r.IsDouble());
994   if (r.IsInteger8() || r.IsUInteger8()) {
995     movb(dst, src);
996   } else if (r.IsInteger16() || r.IsUInteger16()) {
997     movw(dst, src);
998   } else if (r.IsInteger32()) {
999     movl(dst, src);
1000   } else {
1001     if (r.IsHeapObject()) {
1002       AssertNotSmi(src);
1003     } else if (r.IsSmi()) {
1004       AssertSmi(src);
1005     }
1006     movp(dst, src);
1007   }
1008 }
1009 
1010 
Set(Register dst,int64_t x)1011 void MacroAssembler::Set(Register dst, int64_t x) {
1012   if (x == 0) {
1013     xorl(dst, dst);
1014   } else if (is_uint32(x)) {
1015     movl(dst, Immediate(static_cast<uint32_t>(x)));
1016   } else if (is_int32(x)) {
1017     movq(dst, Immediate(static_cast<int32_t>(x)));
1018   } else {
1019     movq(dst, x);
1020   }
1021 }
1022 
1023 
Set(const Operand & dst,intptr_t x)1024 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
1025   if (kPointerSize == kInt64Size) {
1026     if (is_int32(x)) {
1027       movp(dst, Immediate(static_cast<int32_t>(x)));
1028     } else {
1029       Set(kScratchRegister, x);
1030       movp(dst, kScratchRegister);
1031     }
1032   } else {
1033     movp(dst, Immediate(static_cast<int32_t>(x)));
1034   }
1035 }
1036 
1037 
1038 // ----------------------------------------------------------------------------
1039 // Smi tagging, untagging and tag detection.
1040 
IsUnsafeInt(const int32_t x)1041 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
1042   static const int kMaxBits = 17;
1043   return !is_intn(x, kMaxBits);
1044 }
1045 
1046 
SafeMove(Register dst,Smi * src)1047 void MacroAssembler::SafeMove(Register dst, Smi* src) {
1048   DCHECK(!dst.is(kScratchRegister));
1049   if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1050     if (SmiValuesAre32Bits()) {
1051       // JIT cookie can be converted to Smi.
1052       Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1053       Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1054       xorp(dst, kScratchRegister);
1055     } else {
1056       DCHECK(SmiValuesAre31Bits());
1057       int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1058       movp(dst, Immediate(value ^ jit_cookie()));
1059       xorp(dst, Immediate(jit_cookie()));
1060     }
1061   } else {
1062     Move(dst, src);
1063   }
1064 }
1065 
1066 
SafePush(Smi * src)1067 void MacroAssembler::SafePush(Smi* src) {
1068   if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1069     if (SmiValuesAre32Bits()) {
1070       // JIT cookie can be converted to Smi.
1071       Push(Smi::FromInt(src->value() ^ jit_cookie()));
1072       Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1073       xorp(Operand(rsp, 0), kScratchRegister);
1074     } else {
1075       DCHECK(SmiValuesAre31Bits());
1076       int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1077       Push(Immediate(value ^ jit_cookie()));
1078       xorp(Operand(rsp, 0), Immediate(jit_cookie()));
1079     }
1080   } else {
1081     Push(src);
1082   }
1083 }
1084 
1085 
GetSmiConstant(Smi * source)1086 Register MacroAssembler::GetSmiConstant(Smi* source) {
1087   int value = source->value();
1088   if (value == 0) {
1089     xorl(kScratchRegister, kScratchRegister);
1090     return kScratchRegister;
1091   }
1092   if (value == 1) {
1093     return kSmiConstantRegister;
1094   }
1095   LoadSmiConstant(kScratchRegister, source);
1096   return kScratchRegister;
1097 }
1098 
1099 
LoadSmiConstant(Register dst,Smi * source)1100 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1101   if (emit_debug_code()) {
1102     Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
1103          Assembler::RelocInfoNone());
1104     cmpp(dst, kSmiConstantRegister);
1105     Assert(equal, kUninitializedKSmiConstantRegister);
1106   }
1107   int value = source->value();
1108   if (value == 0) {
1109     xorl(dst, dst);
1110     return;
1111   }
1112   bool negative = value < 0;
1113   unsigned int uvalue = negative ? -value : value;
1114 
1115   switch (uvalue) {
1116     case 9:
1117       leap(dst,
1118            Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
1119       break;
1120     case 8:
1121       xorl(dst, dst);
1122       leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
1123       break;
1124     case 4:
1125       xorl(dst, dst);
1126       leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
1127       break;
1128     case 5:
1129       leap(dst,
1130            Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
1131       break;
1132     case 3:
1133       leap(dst,
1134            Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
1135       break;
1136     case 2:
1137       leap(dst,
1138            Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
1139       break;
1140     case 1:
1141       movp(dst, kSmiConstantRegister);
1142       break;
1143     case 0:
1144       UNREACHABLE();
1145       return;
1146     default:
1147       Move(dst, source, Assembler::RelocInfoNone());
1148       return;
1149   }
1150   if (negative) {
1151     negp(dst);
1152   }
1153 }
1154 
1155 
Integer32ToSmi(Register dst,Register src)1156 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1157   STATIC_ASSERT(kSmiTag == 0);
1158   if (!dst.is(src)) {
1159     movl(dst, src);
1160   }
1161   shlp(dst, Immediate(kSmiShift));
1162 }
1163 
1164 
Integer32ToSmiField(const Operand & dst,Register src)1165 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1166   if (emit_debug_code()) {
1167     testb(dst, Immediate(0x01));
1168     Label ok;
1169     j(zero, &ok, Label::kNear);
1170     Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1171     bind(&ok);
1172   }
1173 
1174   if (SmiValuesAre32Bits()) {
1175     DCHECK(kSmiShift % kBitsPerByte == 0);
1176     movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1177   } else {
1178     DCHECK(SmiValuesAre31Bits());
1179     Integer32ToSmi(kScratchRegister, src);
1180     movp(dst, kScratchRegister);
1181   }
1182 }
1183 
1184 
Integer64PlusConstantToSmi(Register dst,Register src,int constant)1185 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1186                                                 Register src,
1187                                                 int constant) {
1188   if (dst.is(src)) {
1189     addl(dst, Immediate(constant));
1190   } else {
1191     leal(dst, Operand(src, constant));
1192   }
1193   shlp(dst, Immediate(kSmiShift));
1194 }
1195 
1196 
SmiToInteger32(Register dst,Register src)1197 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1198   STATIC_ASSERT(kSmiTag == 0);
1199   if (!dst.is(src)) {
1200     movp(dst, src);
1201   }
1202 
1203   if (SmiValuesAre32Bits()) {
1204     shrp(dst, Immediate(kSmiShift));
1205   } else {
1206     DCHECK(SmiValuesAre31Bits());
1207     sarl(dst, Immediate(kSmiShift));
1208   }
1209 }
1210 
1211 
SmiToInteger32(Register dst,const Operand & src)1212 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1213   if (SmiValuesAre32Bits()) {
1214     movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1215   } else {
1216     DCHECK(SmiValuesAre31Bits());
1217     movl(dst, src);
1218     sarl(dst, Immediate(kSmiShift));
1219   }
1220 }
1221 
1222 
SmiToInteger64(Register dst,Register src)1223 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1224   STATIC_ASSERT(kSmiTag == 0);
1225   if (!dst.is(src)) {
1226     movp(dst, src);
1227   }
1228   sarp(dst, Immediate(kSmiShift));
1229   if (kPointerSize == kInt32Size) {
1230     // Sign extend to 64-bit.
1231     movsxlq(dst, dst);
1232   }
1233 }
1234 
1235 
SmiToInteger64(Register dst,const Operand & src)1236 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1237   if (SmiValuesAre32Bits()) {
1238     movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1239   } else {
1240     DCHECK(SmiValuesAre31Bits());
1241     movp(dst, src);
1242     SmiToInteger64(dst, dst);
1243   }
1244 }
1245 
1246 
SmiTest(Register src)1247 void MacroAssembler::SmiTest(Register src) {
1248   AssertSmi(src);
1249   testp(src, src);
1250 }
1251 
1252 
SmiCompare(Register smi1,Register smi2)1253 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1254   AssertSmi(smi1);
1255   AssertSmi(smi2);
1256   cmpp(smi1, smi2);
1257 }
1258 
1259 
SmiCompare(Register dst,Smi * src)1260 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1261   AssertSmi(dst);
1262   Cmp(dst, src);
1263 }
1264 
1265 
Cmp(Register dst,Smi * src)1266 void MacroAssembler::Cmp(Register dst, Smi* src) {
1267   DCHECK(!dst.is(kScratchRegister));
1268   if (src->value() == 0) {
1269     testp(dst, dst);
1270   } else {
1271     Register constant_reg = GetSmiConstant(src);
1272     cmpp(dst, constant_reg);
1273   }
1274 }
1275 
1276 
SmiCompare(Register dst,const Operand & src)1277 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1278   AssertSmi(dst);
1279   AssertSmi(src);
1280   cmpp(dst, src);
1281 }
1282 
1283 
SmiCompare(const Operand & dst,Register src)1284 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1285   AssertSmi(dst);
1286   AssertSmi(src);
1287   cmpp(dst, src);
1288 }
1289 
1290 
SmiCompare(const Operand & dst,Smi * src)1291 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1292   AssertSmi(dst);
1293   if (SmiValuesAre32Bits()) {
1294     cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1295   } else {
1296     DCHECK(SmiValuesAre31Bits());
1297     cmpl(dst, Immediate(src));
1298   }
1299 }
1300 
1301 
Cmp(const Operand & dst,Smi * src)1302 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1303   // The Operand cannot use the smi register.
1304   Register smi_reg = GetSmiConstant(src);
1305   DCHECK(!dst.AddressUsesRegister(smi_reg));
1306   cmpp(dst, smi_reg);
1307 }
1308 
1309 
SmiCompareInteger32(const Operand & dst,Register src)1310 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1311   if (SmiValuesAre32Bits()) {
1312     cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1313   } else {
1314     DCHECK(SmiValuesAre31Bits());
1315     SmiToInteger32(kScratchRegister, dst);
1316     cmpl(kScratchRegister, src);
1317   }
1318 }
1319 
1320 
PositiveSmiTimesPowerOfTwoToInteger64(Register dst,Register src,int power)1321 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1322                                                            Register src,
1323                                                            int power) {
1324   DCHECK(power >= 0);
1325   DCHECK(power < 64);
1326   if (power == 0) {
1327     SmiToInteger64(dst, src);
1328     return;
1329   }
1330   if (!dst.is(src)) {
1331     movp(dst, src);
1332   }
1333   if (power < kSmiShift) {
1334     sarp(dst, Immediate(kSmiShift - power));
1335   } else if (power > kSmiShift) {
1336     shlp(dst, Immediate(power - kSmiShift));
1337   }
1338 }
1339 
1340 
PositiveSmiDivPowerOfTwoToInteger32(Register dst,Register src,int power)1341 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1342                                                          Register src,
1343                                                          int power) {
1344   DCHECK((0 <= power) && (power < 32));
1345   if (dst.is(src)) {
1346     shrp(dst, Immediate(power + kSmiShift));
1347   } else {
1348     UNIMPLEMENTED();  // Not used.
1349   }
1350 }
1351 
1352 
SmiOrIfSmis(Register dst,Register src1,Register src2,Label * on_not_smis,Label::Distance near_jump)1353 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1354                                  Label* on_not_smis,
1355                                  Label::Distance near_jump) {
1356   if (dst.is(src1) || dst.is(src2)) {
1357     DCHECK(!src1.is(kScratchRegister));
1358     DCHECK(!src2.is(kScratchRegister));
1359     movp(kScratchRegister, src1);
1360     orp(kScratchRegister, src2);
1361     JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1362     movp(dst, kScratchRegister);
1363   } else {
1364     movp(dst, src1);
1365     orp(dst, src2);
1366     JumpIfNotSmi(dst, on_not_smis, near_jump);
1367   }
1368 }
1369 
1370 
CheckSmi(Register src)1371 Condition MacroAssembler::CheckSmi(Register src) {
1372   STATIC_ASSERT(kSmiTag == 0);
1373   testb(src, Immediate(kSmiTagMask));
1374   return zero;
1375 }
1376 
1377 
CheckSmi(const Operand & src)1378 Condition MacroAssembler::CheckSmi(const Operand& src) {
1379   STATIC_ASSERT(kSmiTag == 0);
1380   testb(src, Immediate(kSmiTagMask));
1381   return zero;
1382 }
1383 
1384 
CheckNonNegativeSmi(Register src)1385 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1386   STATIC_ASSERT(kSmiTag == 0);
1387   // Test that both bits of the mask 0x8000000000000001 are zero.
1388   movp(kScratchRegister, src);
1389   rolp(kScratchRegister, Immediate(1));
1390   testb(kScratchRegister, Immediate(3));
1391   return zero;
1392 }
1393 
1394 
CheckBothSmi(Register first,Register second)1395 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1396   if (first.is(second)) {
1397     return CheckSmi(first);
1398   }
1399   STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1400   if (SmiValuesAre32Bits()) {
1401     leal(kScratchRegister, Operand(first, second, times_1, 0));
1402     testb(kScratchRegister, Immediate(0x03));
1403   } else {
1404     DCHECK(SmiValuesAre31Bits());
1405     movl(kScratchRegister, first);
1406     orl(kScratchRegister, second);
1407     testb(kScratchRegister, Immediate(kSmiTagMask));
1408   }
1409   return zero;
1410 }
1411 
1412 
CheckBothNonNegativeSmi(Register first,Register second)1413 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1414                                                   Register second) {
1415   if (first.is(second)) {
1416     return CheckNonNegativeSmi(first);
1417   }
1418   movp(kScratchRegister, first);
1419   orp(kScratchRegister, second);
1420   rolp(kScratchRegister, Immediate(1));
1421   testl(kScratchRegister, Immediate(3));
1422   return zero;
1423 }
1424 
1425 
CheckEitherSmi(Register first,Register second,Register scratch)1426 Condition MacroAssembler::CheckEitherSmi(Register first,
1427                                          Register second,
1428                                          Register scratch) {
1429   if (first.is(second)) {
1430     return CheckSmi(first);
1431   }
1432   if (scratch.is(second)) {
1433     andl(scratch, first);
1434   } else {
1435     if (!scratch.is(first)) {
1436       movl(scratch, first);
1437     }
1438     andl(scratch, second);
1439   }
1440   testb(scratch, Immediate(kSmiTagMask));
1441   return zero;
1442 }
1443 
1444 
CheckIsMinSmi(Register src)1445 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1446   DCHECK(!src.is(kScratchRegister));
1447   // If we overflow by subtracting one, it's the minimal smi value.
1448   cmpp(src, kSmiConstantRegister);
1449   return overflow;
1450 }
1451 
1452 
CheckInteger32ValidSmiValue(Register src)1453 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1454   if (SmiValuesAre32Bits()) {
1455     // A 32-bit integer value can always be converted to a smi.
1456     return always;
1457   } else {
1458     DCHECK(SmiValuesAre31Bits());
1459     cmpl(src, Immediate(0xc0000000));
1460     return positive;
1461   }
1462 }
1463 
1464 
CheckUInteger32ValidSmiValue(Register src)1465 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1466   if (SmiValuesAre32Bits()) {
1467     // An unsigned 32-bit integer value is valid as long as the high bit
1468     // is not set.
1469     testl(src, src);
1470     return positive;
1471   } else {
1472     DCHECK(SmiValuesAre31Bits());
1473     testl(src, Immediate(0xc0000000));
1474     return zero;
1475   }
1476 }
1477 
1478 
CheckSmiToIndicator(Register dst,Register src)1479 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1480   if (dst.is(src)) {
1481     andl(dst, Immediate(kSmiTagMask));
1482   } else {
1483     movl(dst, Immediate(kSmiTagMask));
1484     andl(dst, src);
1485   }
1486 }
1487 
1488 
CheckSmiToIndicator(Register dst,const Operand & src)1489 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1490   if (!(src.AddressUsesRegister(dst))) {
1491     movl(dst, Immediate(kSmiTagMask));
1492     andl(dst, src);
1493   } else {
1494     movl(dst, src);
1495     andl(dst, Immediate(kSmiTagMask));
1496   }
1497 }
1498 
1499 
JumpIfValidSmiValue(Register src,Label * on_valid,Label::Distance near_jump)1500 void MacroAssembler::JumpIfValidSmiValue(Register src,
1501                                          Label* on_valid,
1502                                          Label::Distance near_jump) {
1503   Condition is_valid = CheckInteger32ValidSmiValue(src);
1504   j(is_valid, on_valid, near_jump);
1505 }
1506 
1507 
JumpIfNotValidSmiValue(Register src,Label * on_invalid,Label::Distance near_jump)1508 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1509                                             Label* on_invalid,
1510                                             Label::Distance near_jump) {
1511   Condition is_valid = CheckInteger32ValidSmiValue(src);
1512   j(NegateCondition(is_valid), on_invalid, near_jump);
1513 }
1514 
1515 
JumpIfUIntValidSmiValue(Register src,Label * on_valid,Label::Distance near_jump)1516 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1517                                              Label* on_valid,
1518                                              Label::Distance near_jump) {
1519   Condition is_valid = CheckUInteger32ValidSmiValue(src);
1520   j(is_valid, on_valid, near_jump);
1521 }
1522 
1523 
JumpIfUIntNotValidSmiValue(Register src,Label * on_invalid,Label::Distance near_jump)1524 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1525                                                 Label* on_invalid,
1526                                                 Label::Distance near_jump) {
1527   Condition is_valid = CheckUInteger32ValidSmiValue(src);
1528   j(NegateCondition(is_valid), on_invalid, near_jump);
1529 }
1530 
1531 
JumpIfSmi(Register src,Label * on_smi,Label::Distance near_jump)1532 void MacroAssembler::JumpIfSmi(Register src,
1533                                Label* on_smi,
1534                                Label::Distance near_jump) {
1535   Condition smi = CheckSmi(src);
1536   j(smi, on_smi, near_jump);
1537 }
1538 
1539 
JumpIfNotSmi(Register src,Label * on_not_smi,Label::Distance near_jump)1540 void MacroAssembler::JumpIfNotSmi(Register src,
1541                                   Label* on_not_smi,
1542                                   Label::Distance near_jump) {
1543   Condition smi = CheckSmi(src);
1544   j(NegateCondition(smi), on_not_smi, near_jump);
1545 }
1546 
1547 
JumpUnlessNonNegativeSmi(Register src,Label * on_not_smi_or_negative,Label::Distance near_jump)1548 void MacroAssembler::JumpUnlessNonNegativeSmi(
1549     Register src, Label* on_not_smi_or_negative,
1550     Label::Distance near_jump) {
1551   Condition non_negative_smi = CheckNonNegativeSmi(src);
1552   j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1553 }
1554 
1555 
JumpIfSmiEqualsConstant(Register src,Smi * constant,Label * on_equals,Label::Distance near_jump)1556 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1557                                              Smi* constant,
1558                                              Label* on_equals,
1559                                              Label::Distance near_jump) {
1560   SmiCompare(src, constant);
1561   j(equal, on_equals, near_jump);
1562 }
1563 
1564 
JumpIfNotBothSmi(Register src1,Register src2,Label * on_not_both_smi,Label::Distance near_jump)1565 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1566                                       Register src2,
1567                                       Label* on_not_both_smi,
1568                                       Label::Distance near_jump) {
1569   Condition both_smi = CheckBothSmi(src1, src2);
1570   j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1571 }
1572 
1573 
JumpUnlessBothNonNegativeSmi(Register src1,Register src2,Label * on_not_both_smi,Label::Distance near_jump)1574 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1575                                                   Register src2,
1576                                                   Label* on_not_both_smi,
1577                                                   Label::Distance near_jump) {
1578   Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1579   j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1580 }
1581 
1582 
SmiAddConstant(Register dst,Register src,Smi * constant)1583 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1584   if (constant->value() == 0) {
1585     if (!dst.is(src)) {
1586       movp(dst, src);
1587     }
1588     return;
1589   } else if (dst.is(src)) {
1590     DCHECK(!dst.is(kScratchRegister));
1591     switch (constant->value()) {
1592       case 1:
1593         addp(dst, kSmiConstantRegister);
1594         return;
1595       case 2:
1596         leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1597         return;
1598       case 4:
1599         leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1600         return;
1601       case 8:
1602         leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1603         return;
1604       default:
1605         Register constant_reg = GetSmiConstant(constant);
1606         addp(dst, constant_reg);
1607         return;
1608     }
1609   } else {
1610     switch (constant->value()) {
1611       case 1:
1612         leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1613         return;
1614       case 2:
1615         leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1616         return;
1617       case 4:
1618         leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1619         return;
1620       case 8:
1621         leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1622         return;
1623       default:
1624         LoadSmiConstant(dst, constant);
1625         addp(dst, src);
1626         return;
1627     }
1628   }
1629 }
1630 
1631 
SmiAddConstant(const Operand & dst,Smi * constant)1632 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1633   if (constant->value() != 0) {
1634     if (SmiValuesAre32Bits()) {
1635       addl(Operand(dst, kSmiShift / kBitsPerByte),
1636            Immediate(constant->value()));
1637     } else {
1638       DCHECK(SmiValuesAre31Bits());
1639       addp(dst, Immediate(constant));
1640     }
1641   }
1642 }
1643 
1644 
SmiAddConstant(Register dst,Register src,Smi * constant,SmiOperationExecutionMode mode,Label * bailout_label,Label::Distance near_jump)1645 void MacroAssembler::SmiAddConstant(Register dst,
1646                                     Register src,
1647                                     Smi* constant,
1648                                     SmiOperationExecutionMode mode,
1649                                     Label* bailout_label,
1650                                     Label::Distance near_jump) {
1651   if (constant->value() == 0) {
1652     if (!dst.is(src)) {
1653       movp(dst, src);
1654     }
1655   } else if (dst.is(src)) {
1656     DCHECK(!dst.is(kScratchRegister));
1657     LoadSmiConstant(kScratchRegister, constant);
1658     addp(dst, kScratchRegister);
1659     if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1660       j(no_overflow, bailout_label, near_jump);
1661       DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1662       subp(dst, kScratchRegister);
1663     } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1664       if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1665         Label done;
1666         j(no_overflow, &done, Label::kNear);
1667         subp(dst, kScratchRegister);
1668         jmp(bailout_label, near_jump);
1669         bind(&done);
1670       } else {
1671         // Bailout if overflow without reserving src.
1672         j(overflow, bailout_label, near_jump);
1673       }
1674     } else {
1675       CHECK(mode.IsEmpty());
1676     }
1677   } else {
1678     DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1679     DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
1680     LoadSmiConstant(dst, constant);
1681     addp(dst, src);
1682     j(overflow, bailout_label, near_jump);
1683   }
1684 }
1685 
1686 
SmiSubConstant(Register dst,Register src,Smi * constant)1687 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1688   if (constant->value() == 0) {
1689     if (!dst.is(src)) {
1690       movp(dst, src);
1691     }
1692   } else if (dst.is(src)) {
1693     DCHECK(!dst.is(kScratchRegister));
1694     Register constant_reg = GetSmiConstant(constant);
1695     subp(dst, constant_reg);
1696   } else {
1697     if (constant->value() == Smi::kMinValue) {
1698       LoadSmiConstant(dst, constant);
1699       // Adding and subtracting the min-value gives the same result, it only
1700       // differs on the overflow bit, which we don't check here.
1701       addp(dst, src);
1702     } else {
1703       // Subtract by adding the negation.
1704       LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1705       addp(dst, src);
1706     }
1707   }
1708 }
1709 
1710 
SmiSubConstant(Register dst,Register src,Smi * constant,SmiOperationExecutionMode mode,Label * bailout_label,Label::Distance near_jump)1711 void MacroAssembler::SmiSubConstant(Register dst,
1712                                     Register src,
1713                                     Smi* constant,
1714                                     SmiOperationExecutionMode mode,
1715                                     Label* bailout_label,
1716                                     Label::Distance near_jump) {
1717   if (constant->value() == 0) {
1718     if (!dst.is(src)) {
1719       movp(dst, src);
1720     }
1721   } else if (dst.is(src)) {
1722     DCHECK(!dst.is(kScratchRegister));
1723     LoadSmiConstant(kScratchRegister, constant);
1724     subp(dst, kScratchRegister);
1725     if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1726       j(no_overflow, bailout_label, near_jump);
1727       DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1728       addp(dst, kScratchRegister);
1729     } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1730       if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1731         Label done;
1732         j(no_overflow, &done, Label::kNear);
1733         addp(dst, kScratchRegister);
1734         jmp(bailout_label, near_jump);
1735         bind(&done);
1736       } else {
1737         // Bailout if overflow without reserving src.
1738         j(overflow, bailout_label, near_jump);
1739       }
1740     } else {
1741       CHECK(mode.IsEmpty());
1742     }
1743   } else {
1744     DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1745     DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
1746     if (constant->value() == Smi::kMinValue) {
1747       DCHECK(!dst.is(kScratchRegister));
1748       movp(dst, src);
1749       LoadSmiConstant(kScratchRegister, constant);
1750       subp(dst, kScratchRegister);
1751       j(overflow, bailout_label, near_jump);
1752     } else {
1753       // Subtract by adding the negation.
1754       LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1755       addp(dst, src);
1756       j(overflow, bailout_label, near_jump);
1757     }
1758   }
1759 }
1760 
1761 
SmiNeg(Register dst,Register src,Label * on_smi_result,Label::Distance near_jump)1762 void MacroAssembler::SmiNeg(Register dst,
1763                             Register src,
1764                             Label* on_smi_result,
1765                             Label::Distance near_jump) {
1766   if (dst.is(src)) {
1767     DCHECK(!dst.is(kScratchRegister));
1768     movp(kScratchRegister, src);
1769     negp(dst);  // Low 32 bits are retained as zero by negation.
1770     // Test if result is zero or Smi::kMinValue.
1771     cmpp(dst, kScratchRegister);
1772     j(not_equal, on_smi_result, near_jump);
1773     movp(src, kScratchRegister);
1774   } else {
1775     movp(dst, src);
1776     negp(dst);
1777     cmpp(dst, src);
1778     // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1779     j(not_equal, on_smi_result, near_jump);
1780   }
1781 }
1782 
1783 
1784 template<class T>
SmiAddHelper(MacroAssembler * masm,Register dst,Register src1,T src2,Label * on_not_smi_result,Label::Distance near_jump)1785 static void SmiAddHelper(MacroAssembler* masm,
1786                          Register dst,
1787                          Register src1,
1788                          T src2,
1789                          Label* on_not_smi_result,
1790                          Label::Distance near_jump) {
1791   if (dst.is(src1)) {
1792     Label done;
1793     masm->addp(dst, src2);
1794     masm->j(no_overflow, &done, Label::kNear);
1795     // Restore src1.
1796     masm->subp(dst, src2);
1797     masm->jmp(on_not_smi_result, near_jump);
1798     masm->bind(&done);
1799   } else {
1800     masm->movp(dst, src1);
1801     masm->addp(dst, src2);
1802     masm->j(overflow, on_not_smi_result, near_jump);
1803   }
1804 }
1805 
1806 
SmiAdd(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1807 void MacroAssembler::SmiAdd(Register dst,
1808                             Register src1,
1809                             Register src2,
1810                             Label* on_not_smi_result,
1811                             Label::Distance near_jump) {
1812   DCHECK_NOT_NULL(on_not_smi_result);
1813   DCHECK(!dst.is(src2));
1814   SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1815 }
1816 
1817 
SmiAdd(Register dst,Register src1,const Operand & src2,Label * on_not_smi_result,Label::Distance near_jump)1818 void MacroAssembler::SmiAdd(Register dst,
1819                             Register src1,
1820                             const Operand& src2,
1821                             Label* on_not_smi_result,
1822                             Label::Distance near_jump) {
1823   DCHECK_NOT_NULL(on_not_smi_result);
1824   DCHECK(!src2.AddressUsesRegister(dst));
1825   SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1826 }
1827 
1828 
SmiAdd(Register dst,Register src1,Register src2)1829 void MacroAssembler::SmiAdd(Register dst,
1830                             Register src1,
1831                             Register src2) {
1832   // No overflow checking. Use only when it's known that
1833   // overflowing is impossible.
1834   if (!dst.is(src1)) {
1835     if (emit_debug_code()) {
1836       movp(kScratchRegister, src1);
1837       addp(kScratchRegister, src2);
1838       Check(no_overflow, kSmiAdditionOverflow);
1839     }
1840     leap(dst, Operand(src1, src2, times_1, 0));
1841   } else {
1842     addp(dst, src2);
1843     Assert(no_overflow, kSmiAdditionOverflow);
1844   }
1845 }
1846 
1847 
1848 template<class T>
SmiSubHelper(MacroAssembler * masm,Register dst,Register src1,T src2,Label * on_not_smi_result,Label::Distance near_jump)1849 static void SmiSubHelper(MacroAssembler* masm,
1850                          Register dst,
1851                          Register src1,
1852                          T src2,
1853                          Label* on_not_smi_result,
1854                          Label::Distance near_jump) {
1855   if (dst.is(src1)) {
1856     Label done;
1857     masm->subp(dst, src2);
1858     masm->j(no_overflow, &done, Label::kNear);
1859     // Restore src1.
1860     masm->addp(dst, src2);
1861     masm->jmp(on_not_smi_result, near_jump);
1862     masm->bind(&done);
1863   } else {
1864     masm->movp(dst, src1);
1865     masm->subp(dst, src2);
1866     masm->j(overflow, on_not_smi_result, near_jump);
1867   }
1868 }
1869 
1870 
SmiSub(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1871 void MacroAssembler::SmiSub(Register dst,
1872                             Register src1,
1873                             Register src2,
1874                             Label* on_not_smi_result,
1875                             Label::Distance near_jump) {
1876   DCHECK_NOT_NULL(on_not_smi_result);
1877   DCHECK(!dst.is(src2));
1878   SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1879 }
1880 
1881 
SmiSub(Register dst,Register src1,const Operand & src2,Label * on_not_smi_result,Label::Distance near_jump)1882 void MacroAssembler::SmiSub(Register dst,
1883                             Register src1,
1884                             const Operand& src2,
1885                             Label* on_not_smi_result,
1886                             Label::Distance near_jump) {
1887   DCHECK_NOT_NULL(on_not_smi_result);
1888   DCHECK(!src2.AddressUsesRegister(dst));
1889   SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1890 }
1891 
1892 
1893 template<class T>
SmiSubNoOverflowHelper(MacroAssembler * masm,Register dst,Register src1,T src2)1894 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1895                                    Register dst,
1896                                    Register src1,
1897                                    T src2) {
1898   // No overflow checking. Use only when it's known that
1899   // overflowing is impossible (e.g., subtracting two positive smis).
1900   if (!dst.is(src1)) {
1901     masm->movp(dst, src1);
1902   }
1903   masm->subp(dst, src2);
1904   masm->Assert(no_overflow, kSmiSubtractionOverflow);
1905 }
1906 
1907 
SmiSub(Register dst,Register src1,Register src2)1908 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1909   DCHECK(!dst.is(src2));
1910   SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1911 }
1912 
1913 
SmiSub(Register dst,Register src1,const Operand & src2)1914 void MacroAssembler::SmiSub(Register dst,
1915                             Register src1,
1916                             const Operand& src2) {
1917   SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1918 }
1919 
1920 
SmiMul(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1921 void MacroAssembler::SmiMul(Register dst,
1922                             Register src1,
1923                             Register src2,
1924                             Label* on_not_smi_result,
1925                             Label::Distance near_jump) {
1926   DCHECK(!dst.is(src2));
1927   DCHECK(!dst.is(kScratchRegister));
1928   DCHECK(!src1.is(kScratchRegister));
1929   DCHECK(!src2.is(kScratchRegister));
1930 
1931   if (dst.is(src1)) {
1932     Label failure, zero_correct_result;
1933     movp(kScratchRegister, src1);  // Create backup for later testing.
1934     SmiToInteger64(dst, src1);
1935     imulp(dst, src2);
1936     j(overflow, &failure, Label::kNear);
1937 
1938     // Check for negative zero result.  If product is zero, and one
1939     // argument is negative, go to slow case.
1940     Label correct_result;
1941     testp(dst, dst);
1942     j(not_zero, &correct_result, Label::kNear);
1943 
1944     movp(dst, kScratchRegister);
1945     xorp(dst, src2);
1946     // Result was positive zero.
1947     j(positive, &zero_correct_result, Label::kNear);
1948 
1949     bind(&failure);  // Reused failure exit, restores src1.
1950     movp(src1, kScratchRegister);
1951     jmp(on_not_smi_result, near_jump);
1952 
1953     bind(&zero_correct_result);
1954     Set(dst, 0);
1955 
1956     bind(&correct_result);
1957   } else {
1958     SmiToInteger64(dst, src1);
1959     imulp(dst, src2);
1960     j(overflow, on_not_smi_result, near_jump);
1961     // Check for negative zero result.  If product is zero, and one
1962     // argument is negative, go to slow case.
1963     Label correct_result;
1964     testp(dst, dst);
1965     j(not_zero, &correct_result, Label::kNear);
1966     // One of src1 and src2 is zero, the check whether the other is
1967     // negative.
1968     movp(kScratchRegister, src1);
1969     xorp(kScratchRegister, src2);
1970     j(negative, on_not_smi_result, near_jump);
1971     bind(&correct_result);
1972   }
1973 }
1974 
1975 
SmiDiv(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1976 void MacroAssembler::SmiDiv(Register dst,
1977                             Register src1,
1978                             Register src2,
1979                             Label* on_not_smi_result,
1980                             Label::Distance near_jump) {
1981   DCHECK(!src1.is(kScratchRegister));
1982   DCHECK(!src2.is(kScratchRegister));
1983   DCHECK(!dst.is(kScratchRegister));
1984   DCHECK(!src2.is(rax));
1985   DCHECK(!src2.is(rdx));
1986   DCHECK(!src1.is(rdx));
1987 
1988   // Check for 0 divisor (result is +/-Infinity).
1989   testp(src2, src2);
1990   j(zero, on_not_smi_result, near_jump);
1991 
1992   if (src1.is(rax)) {
1993     movp(kScratchRegister, src1);
1994   }
1995   SmiToInteger32(rax, src1);
1996   // We need to rule out dividing Smi::kMinValue by -1, since that would
1997   // overflow in idiv and raise an exception.
1998   // We combine this with negative zero test (negative zero only happens
1999   // when dividing zero by a negative number).
2000 
2001   // We overshoot a little and go to slow case if we divide min-value
2002   // by any negative value, not just -1.
2003   Label safe_div;
2004   testl(rax, Immediate(~Smi::kMinValue));
2005   j(not_zero, &safe_div, Label::kNear);
2006   testp(src2, src2);
2007   if (src1.is(rax)) {
2008     j(positive, &safe_div, Label::kNear);
2009     movp(src1, kScratchRegister);
2010     jmp(on_not_smi_result, near_jump);
2011   } else {
2012     j(negative, on_not_smi_result, near_jump);
2013   }
2014   bind(&safe_div);
2015 
2016   SmiToInteger32(src2, src2);
2017   // Sign extend src1 into edx:eax.
2018   cdq();
2019   idivl(src2);
2020   Integer32ToSmi(src2, src2);
2021   // Check that the remainder is zero.
2022   testl(rdx, rdx);
2023   if (src1.is(rax)) {
2024     Label smi_result;
2025     j(zero, &smi_result, Label::kNear);
2026     movp(src1, kScratchRegister);
2027     jmp(on_not_smi_result, near_jump);
2028     bind(&smi_result);
2029   } else {
2030     j(not_zero, on_not_smi_result, near_jump);
2031   }
2032   if (!dst.is(src1) && src1.is(rax)) {
2033     movp(src1, kScratchRegister);
2034   }
2035   Integer32ToSmi(dst, rax);
2036 }
2037 
2038 
SmiMod(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2039 void MacroAssembler::SmiMod(Register dst,
2040                             Register src1,
2041                             Register src2,
2042                             Label* on_not_smi_result,
2043                             Label::Distance near_jump) {
2044   DCHECK(!dst.is(kScratchRegister));
2045   DCHECK(!src1.is(kScratchRegister));
2046   DCHECK(!src2.is(kScratchRegister));
2047   DCHECK(!src2.is(rax));
2048   DCHECK(!src2.is(rdx));
2049   DCHECK(!src1.is(rdx));
2050   DCHECK(!src1.is(src2));
2051 
2052   testp(src2, src2);
2053   j(zero, on_not_smi_result, near_jump);
2054 
2055   if (src1.is(rax)) {
2056     movp(kScratchRegister, src1);
2057   }
2058   SmiToInteger32(rax, src1);
2059   SmiToInteger32(src2, src2);
2060 
2061   // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
2062   Label safe_div;
2063   cmpl(rax, Immediate(Smi::kMinValue));
2064   j(not_equal, &safe_div, Label::kNear);
2065   cmpl(src2, Immediate(-1));
2066   j(not_equal, &safe_div, Label::kNear);
2067   // Retag inputs and go slow case.
2068   Integer32ToSmi(src2, src2);
2069   if (src1.is(rax)) {
2070     movp(src1, kScratchRegister);
2071   }
2072   jmp(on_not_smi_result, near_jump);
2073   bind(&safe_div);
2074 
2075   // Sign extend eax into edx:eax.
2076   cdq();
2077   idivl(src2);
2078   // Restore smi tags on inputs.
2079   Integer32ToSmi(src2, src2);
2080   if (src1.is(rax)) {
2081     movp(src1, kScratchRegister);
2082   }
2083   // Check for a negative zero result.  If the result is zero, and the
2084   // dividend is negative, go slow to return a floating point negative zero.
2085   Label smi_result;
2086   testl(rdx, rdx);
2087   j(not_zero, &smi_result, Label::kNear);
2088   testp(src1, src1);
2089   j(negative, on_not_smi_result, near_jump);
2090   bind(&smi_result);
2091   Integer32ToSmi(dst, rdx);
2092 }
2093 
2094 
SmiNot(Register dst,Register src)2095 void MacroAssembler::SmiNot(Register dst, Register src) {
2096   DCHECK(!dst.is(kScratchRegister));
2097   DCHECK(!src.is(kScratchRegister));
2098   if (SmiValuesAre32Bits()) {
2099     // Set tag and padding bits before negating, so that they are zero
2100     // afterwards.
2101     movl(kScratchRegister, Immediate(~0));
2102   } else {
2103     DCHECK(SmiValuesAre31Bits());
2104     movl(kScratchRegister, Immediate(1));
2105   }
2106   if (dst.is(src)) {
2107     xorp(dst, kScratchRegister);
2108   } else {
2109     leap(dst, Operand(src, kScratchRegister, times_1, 0));
2110   }
2111   notp(dst);
2112 }
2113 
2114 
SmiAnd(Register dst,Register src1,Register src2)2115 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
2116   DCHECK(!dst.is(src2));
2117   if (!dst.is(src1)) {
2118     movp(dst, src1);
2119   }
2120   andp(dst, src2);
2121 }
2122 
2123 
SmiAndConstant(Register dst,Register src,Smi * constant)2124 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
2125   if (constant->value() == 0) {
2126     Set(dst, 0);
2127   } else if (dst.is(src)) {
2128     DCHECK(!dst.is(kScratchRegister));
2129     Register constant_reg = GetSmiConstant(constant);
2130     andp(dst, constant_reg);
2131   } else {
2132     LoadSmiConstant(dst, constant);
2133     andp(dst, src);
2134   }
2135 }
2136 
2137 
SmiOr(Register dst,Register src1,Register src2)2138 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
2139   if (!dst.is(src1)) {
2140     DCHECK(!src1.is(src2));
2141     movp(dst, src1);
2142   }
2143   orp(dst, src2);
2144 }
2145 
2146 
SmiOrConstant(Register dst,Register src,Smi * constant)2147 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
2148   if (dst.is(src)) {
2149     DCHECK(!dst.is(kScratchRegister));
2150     Register constant_reg = GetSmiConstant(constant);
2151     orp(dst, constant_reg);
2152   } else {
2153     LoadSmiConstant(dst, constant);
2154     orp(dst, src);
2155   }
2156 }
2157 
2158 
SmiXor(Register dst,Register src1,Register src2)2159 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2160   if (!dst.is(src1)) {
2161     DCHECK(!src1.is(src2));
2162     movp(dst, src1);
2163   }
2164   xorp(dst, src2);
2165 }
2166 
2167 
SmiXorConstant(Register dst,Register src,Smi * constant)2168 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2169   if (dst.is(src)) {
2170     DCHECK(!dst.is(kScratchRegister));
2171     Register constant_reg = GetSmiConstant(constant);
2172     xorp(dst, constant_reg);
2173   } else {
2174     LoadSmiConstant(dst, constant);
2175     xorp(dst, src);
2176   }
2177 }
2178 
2179 
SmiShiftArithmeticRightConstant(Register dst,Register src,int shift_value)2180 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2181                                                      Register src,
2182                                                      int shift_value) {
2183   DCHECK(is_uint5(shift_value));
2184   if (shift_value > 0) {
2185     if (dst.is(src)) {
2186       sarp(dst, Immediate(shift_value + kSmiShift));
2187       shlp(dst, Immediate(kSmiShift));
2188     } else {
2189       UNIMPLEMENTED();  // Not used.
2190     }
2191   }
2192 }
2193 
2194 
SmiShiftLeftConstant(Register dst,Register src,int shift_value,Label * on_not_smi_result,Label::Distance near_jump)2195 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2196                                           Register src,
2197                                           int shift_value,
2198                                           Label* on_not_smi_result,
2199                                           Label::Distance near_jump) {
2200   if (SmiValuesAre32Bits()) {
2201     if (!dst.is(src)) {
2202       movp(dst, src);
2203     }
2204     if (shift_value > 0) {
2205       // Shift amount specified by lower 5 bits, not six as the shl opcode.
2206       shlq(dst, Immediate(shift_value & 0x1f));
2207     }
2208   } else {
2209     DCHECK(SmiValuesAre31Bits());
2210     if (dst.is(src)) {
2211       UNIMPLEMENTED();  // Not used.
2212     } else {
2213       SmiToInteger32(dst, src);
2214       shll(dst, Immediate(shift_value));
2215       JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
2216       Integer32ToSmi(dst, dst);
2217     }
2218   }
2219 }
2220 
2221 
SmiShiftLogicalRightConstant(Register dst,Register src,int shift_value,Label * on_not_smi_result,Label::Distance near_jump)2222 void MacroAssembler::SmiShiftLogicalRightConstant(
2223     Register dst, Register src, int shift_value,
2224     Label* on_not_smi_result, Label::Distance near_jump) {
2225   // Logic right shift interprets its result as an *unsigned* number.
2226   if (dst.is(src)) {
2227     UNIMPLEMENTED();  // Not used.
2228   } else {
2229     if (shift_value == 0) {
2230       testp(src, src);
2231       j(negative, on_not_smi_result, near_jump);
2232     }
2233     if (SmiValuesAre32Bits()) {
2234       movp(dst, src);
2235       shrp(dst, Immediate(shift_value + kSmiShift));
2236       shlp(dst, Immediate(kSmiShift));
2237     } else {
2238       DCHECK(SmiValuesAre31Bits());
2239       SmiToInteger32(dst, src);
2240       shrp(dst, Immediate(shift_value));
2241       JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
2242       Integer32ToSmi(dst, dst);
2243     }
2244   }
2245 }
2246 
2247 
SmiShiftLeft(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2248 void MacroAssembler::SmiShiftLeft(Register dst,
2249                                   Register src1,
2250                                   Register src2,
2251                                   Label* on_not_smi_result,
2252                                   Label::Distance near_jump) {
2253   if (SmiValuesAre32Bits()) {
2254     DCHECK(!dst.is(rcx));
2255     if (!dst.is(src1)) {
2256       movp(dst, src1);
2257     }
2258     // Untag shift amount.
2259     SmiToInteger32(rcx, src2);
2260     // Shift amount specified by lower 5 bits, not six as the shl opcode.
2261     andp(rcx, Immediate(0x1f));
2262     shlq_cl(dst);
2263   } else {
2264     DCHECK(SmiValuesAre31Bits());
2265     DCHECK(!dst.is(kScratchRegister));
2266     DCHECK(!src1.is(kScratchRegister));
2267     DCHECK(!src2.is(kScratchRegister));
2268     DCHECK(!dst.is(src2));
2269     DCHECK(!dst.is(rcx));
2270 
2271     if (src1.is(rcx) || src2.is(rcx)) {
2272       movq(kScratchRegister, rcx);
2273     }
2274     if (dst.is(src1)) {
2275       UNIMPLEMENTED();  // Not used.
2276     } else {
2277       Label valid_result;
2278       SmiToInteger32(dst, src1);
2279       SmiToInteger32(rcx, src2);
2280       shll_cl(dst);
2281       JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2282       // As src1 or src2 could not be dst, we do not need to restore them for
2283       // clobbering dst.
2284       if (src1.is(rcx) || src2.is(rcx)) {
2285         if (src1.is(rcx)) {
2286           movq(src1, kScratchRegister);
2287         } else {
2288           movq(src2, kScratchRegister);
2289         }
2290       }
2291       jmp(on_not_smi_result, near_jump);
2292       bind(&valid_result);
2293       Integer32ToSmi(dst, dst);
2294     }
2295   }
2296 }
2297 
2298 
SmiShiftLogicalRight(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2299 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2300                                           Register src1,
2301                                           Register src2,
2302                                           Label* on_not_smi_result,
2303                                           Label::Distance near_jump) {
2304   DCHECK(!dst.is(kScratchRegister));
2305   DCHECK(!src1.is(kScratchRegister));
2306   DCHECK(!src2.is(kScratchRegister));
2307   DCHECK(!dst.is(src2));
2308   DCHECK(!dst.is(rcx));
2309   if (src1.is(rcx) || src2.is(rcx)) {
2310     movq(kScratchRegister, rcx);
2311   }
2312   if (dst.is(src1)) {
2313     UNIMPLEMENTED();  // Not used.
2314   } else {
2315     Label valid_result;
2316     SmiToInteger32(dst, src1);
2317     SmiToInteger32(rcx, src2);
2318     shrl_cl(dst);
2319     JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2320     // As src1 or src2 could not be dst, we do not need to restore them for
2321     // clobbering dst.
2322     if (src1.is(rcx) || src2.is(rcx)) {
2323       if (src1.is(rcx)) {
2324         movq(src1, kScratchRegister);
2325       } else {
2326         movq(src2, kScratchRegister);
2327       }
2328      }
2329     jmp(on_not_smi_result, near_jump);
2330     bind(&valid_result);
2331     Integer32ToSmi(dst, dst);
2332   }
2333 }
2334 
2335 
SmiShiftArithmeticRight(Register dst,Register src1,Register src2)2336 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2337                                              Register src1,
2338                                              Register src2) {
2339   DCHECK(!dst.is(kScratchRegister));
2340   DCHECK(!src1.is(kScratchRegister));
2341   DCHECK(!src2.is(kScratchRegister));
2342   DCHECK(!dst.is(rcx));
2343 
2344   SmiToInteger32(rcx, src2);
2345   if (!dst.is(src1)) {
2346     movp(dst, src1);
2347   }
2348   SmiToInteger32(dst, dst);
2349   sarl_cl(dst);
2350   Integer32ToSmi(dst, dst);
2351 }
2352 
2353 
SelectNonSmi(Register dst,Register src1,Register src2,Label * on_not_smis,Label::Distance near_jump)2354 void MacroAssembler::SelectNonSmi(Register dst,
2355                                   Register src1,
2356                                   Register src2,
2357                                   Label* on_not_smis,
2358                                   Label::Distance near_jump) {
2359   DCHECK(!dst.is(kScratchRegister));
2360   DCHECK(!src1.is(kScratchRegister));
2361   DCHECK(!src2.is(kScratchRegister));
2362   DCHECK(!dst.is(src1));
2363   DCHECK(!dst.is(src2));
2364   // Both operands must not be smis.
2365 #ifdef DEBUG
2366   Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2367   Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2368 #endif
2369   STATIC_ASSERT(kSmiTag == 0);
2370   DCHECK_EQ(0, Smi::FromInt(0));
2371   movl(kScratchRegister, Immediate(kSmiTagMask));
2372   andp(kScratchRegister, src1);
2373   testl(kScratchRegister, src2);
2374   // If non-zero then both are smis.
2375   j(not_zero, on_not_smis, near_jump);
2376 
2377   // Exactly one operand is a smi.
2378   DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2379   // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2380   subp(kScratchRegister, Immediate(1));
2381   // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2382   movp(dst, src1);
2383   xorp(dst, src2);
2384   andp(dst, kScratchRegister);
2385   // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2386   xorp(dst, src1);
2387   // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2388 }
2389 
2390 
SmiToIndex(Register dst,Register src,int shift)2391 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2392                                     Register src,
2393                                     int shift) {
2394   if (SmiValuesAre32Bits()) {
2395     DCHECK(is_uint6(shift));
2396     // There is a possible optimization if shift is in the range 60-63, but that
2397     // will (and must) never happen.
2398     if (!dst.is(src)) {
2399       movp(dst, src);
2400     }
2401     if (shift < kSmiShift) {
2402       sarp(dst, Immediate(kSmiShift - shift));
2403     } else {
2404       shlp(dst, Immediate(shift - kSmiShift));
2405     }
2406     return SmiIndex(dst, times_1);
2407   } else {
2408     DCHECK(SmiValuesAre31Bits());
2409     DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2410     if (!dst.is(src)) {
2411       movp(dst, src);
2412     }
2413     // We have to sign extend the index register to 64-bit as the SMI might
2414     // be negative.
2415     movsxlq(dst, dst);
2416     if (shift == times_1) {
2417       sarq(dst, Immediate(kSmiShift));
2418       return SmiIndex(dst, times_1);
2419     }
2420     return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2421   }
2422 }
2423 
2424 
SmiToNegativeIndex(Register dst,Register src,int shift)2425 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2426                                             Register src,
2427                                             int shift) {
2428   if (SmiValuesAre32Bits()) {
2429     // Register src holds a positive smi.
2430     DCHECK(is_uint6(shift));
2431     if (!dst.is(src)) {
2432       movp(dst, src);
2433     }
2434     negp(dst);
2435     if (shift < kSmiShift) {
2436       sarp(dst, Immediate(kSmiShift - shift));
2437     } else {
2438       shlp(dst, Immediate(shift - kSmiShift));
2439     }
2440     return SmiIndex(dst, times_1);
2441   } else {
2442     DCHECK(SmiValuesAre31Bits());
2443     DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2444     if (!dst.is(src)) {
2445       movp(dst, src);
2446     }
2447     negq(dst);
2448     if (shift == times_1) {
2449       sarq(dst, Immediate(kSmiShift));
2450       return SmiIndex(dst, times_1);
2451     }
2452     return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2453   }
2454 }
2455 
2456 
AddSmiField(Register dst,const Operand & src)2457 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2458   if (SmiValuesAre32Bits()) {
2459     DCHECK_EQ(0, kSmiShift % kBitsPerByte);
2460     addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2461   } else {
2462     DCHECK(SmiValuesAre31Bits());
2463     SmiToInteger32(kScratchRegister, src);
2464     addl(dst, kScratchRegister);
2465   }
2466 }
2467 
2468 
Push(Smi * source)2469 void MacroAssembler::Push(Smi* source) {
2470   intptr_t smi = reinterpret_cast<intptr_t>(source);
2471   if (is_int32(smi)) {
2472     Push(Immediate(static_cast<int32_t>(smi)));
2473   } else {
2474     Register constant = GetSmiConstant(source);
2475     Push(constant);
2476   }
2477 }
2478 
2479 
PushRegisterAsTwoSmis(Register src,Register scratch)2480 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2481   DCHECK(!src.is(scratch));
2482   movp(scratch, src);
2483   // High bits.
2484   shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2485   shlp(src, Immediate(kSmiShift));
2486   Push(src);
2487   // Low bits.
2488   shlp(scratch, Immediate(kSmiShift));
2489   Push(scratch);
2490 }
2491 
2492 
PopRegisterAsTwoSmis(Register dst,Register scratch)2493 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2494   DCHECK(!dst.is(scratch));
2495   Pop(scratch);
2496   // Low bits.
2497   shrp(scratch, Immediate(kSmiShift));
2498   Pop(dst);
2499   shrp(dst, Immediate(kSmiShift));
2500   // High bits.
2501   shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2502   orp(dst, scratch);
2503 }
2504 
2505 
Test(const Operand & src,Smi * source)2506 void MacroAssembler::Test(const Operand& src, Smi* source) {
2507   if (SmiValuesAre32Bits()) {
2508     testl(Operand(src, kIntSize), Immediate(source->value()));
2509   } else {
2510     DCHECK(SmiValuesAre31Bits());
2511     testl(src, Immediate(source));
2512   }
2513 }
2514 
2515 
2516 // ----------------------------------------------------------------------------
2517 
2518 
LookupNumberStringCache(Register object,Register result,Register scratch1,Register scratch2,Label * not_found)2519 void MacroAssembler::LookupNumberStringCache(Register object,
2520                                              Register result,
2521                                              Register scratch1,
2522                                              Register scratch2,
2523                                              Label* not_found) {
2524   // Use of registers. Register result is used as a temporary.
2525   Register number_string_cache = result;
2526   Register mask = scratch1;
2527   Register scratch = scratch2;
2528 
2529   // Load the number string cache.
2530   LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2531 
2532   // Make the hash mask from the length of the number string cache. It
2533   // contains two elements (number and string) for each cache entry.
2534   SmiToInteger32(
2535       mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2536   shrl(mask, Immediate(1));
2537   subp(mask, Immediate(1));  // Make mask.
2538 
2539   // Calculate the entry in the number string cache. The hash value in the
2540   // number string cache for smis is just the smi value, and the hash for
2541   // doubles is the xor of the upper and lower words. See
2542   // Heap::GetNumberStringCache.
2543   Label is_smi;
2544   Label load_result_from_cache;
2545   JumpIfSmi(object, &is_smi);
2546   CheckMap(object,
2547            isolate()->factory()->heap_number_map(),
2548            not_found,
2549            DONT_DO_SMI_CHECK);
2550 
2551   STATIC_ASSERT(8 == kDoubleSize);
2552   movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2553   xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2554   andp(scratch, mask);
2555   // Each entry in string cache consists of two pointer sized fields,
2556   // but times_twice_pointer_size (multiplication by 16) scale factor
2557   // is not supported by addrmode on x64 platform.
2558   // So we have to premultiply entry index before lookup.
2559   shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2560 
2561   Register index = scratch;
2562   Register probe = mask;
2563   movp(probe,
2564        FieldOperand(number_string_cache,
2565                     index,
2566                     times_1,
2567                     FixedArray::kHeaderSize));
2568   JumpIfSmi(probe, not_found);
2569   movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2570   ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2571   j(parity_even, not_found);  // Bail out if NaN is involved.
2572   j(not_equal, not_found);  // The cache did not contain this value.
2573   jmp(&load_result_from_cache);
2574 
2575   bind(&is_smi);
2576   SmiToInteger32(scratch, object);
2577   andp(scratch, mask);
2578   // Each entry in string cache consists of two pointer sized fields,
2579   // but times_twice_pointer_size (multiplication by 16) scale factor
2580   // is not supported by addrmode on x64 platform.
2581   // So we have to premultiply entry index before lookup.
2582   shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2583 
2584   // Check if the entry is the smi we are looking for.
2585   cmpp(object,
2586        FieldOperand(number_string_cache,
2587                     index,
2588                     times_1,
2589                     FixedArray::kHeaderSize));
2590   j(not_equal, not_found);
2591 
2592   // Get the result from the cache.
2593   bind(&load_result_from_cache);
2594   movp(result,
2595        FieldOperand(number_string_cache,
2596                     index,
2597                     times_1,
2598                     FixedArray::kHeaderSize + kPointerSize));
2599   IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2600 }
2601 
2602 
JumpIfNotString(Register object,Register object_map,Label * not_string,Label::Distance near_jump)2603 void MacroAssembler::JumpIfNotString(Register object,
2604                                      Register object_map,
2605                                      Label* not_string,
2606                                      Label::Distance near_jump) {
2607   Condition is_smi = CheckSmi(object);
2608   j(is_smi, not_string, near_jump);
2609   CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2610   j(above_equal, not_string, near_jump);
2611 }
2612 
2613 
JumpIfNotBothSequentialOneByteStrings(Register first_object,Register second_object,Register scratch1,Register scratch2,Label * on_fail,Label::Distance near_jump)2614 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
2615     Register first_object, Register second_object, Register scratch1,
2616     Register scratch2, Label* on_fail, Label::Distance near_jump) {
2617   // Check that both objects are not smis.
2618   Condition either_smi = CheckEitherSmi(first_object, second_object);
2619   j(either_smi, on_fail, near_jump);
2620 
2621   // Load instance type for both strings.
2622   movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2623   movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2624   movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2625   movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2626 
2627   // Check that both are flat one-byte strings.
2628   DCHECK(kNotStringTag != 0);
2629   const int kFlatOneByteStringMask =
2630       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2631   const int kFlatOneByteStringTag =
2632       kStringTag | kOneByteStringTag | kSeqStringTag;
2633 
2634   andl(scratch1, Immediate(kFlatOneByteStringMask));
2635   andl(scratch2, Immediate(kFlatOneByteStringMask));
2636   // Interleave the bits to check both scratch1 and scratch2 in one test.
2637   DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2638   leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2639   cmpl(scratch1,
2640        Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2641   j(not_equal, on_fail, near_jump);
2642 }
2643 
2644 
JumpIfInstanceTypeIsNotSequentialOneByte(Register instance_type,Register scratch,Label * failure,Label::Distance near_jump)2645 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
2646     Register instance_type, Register scratch, Label* failure,
2647     Label::Distance near_jump) {
2648   if (!scratch.is(instance_type)) {
2649     movl(scratch, instance_type);
2650   }
2651 
2652   const int kFlatOneByteStringMask =
2653       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2654 
2655   andl(scratch, Immediate(kFlatOneByteStringMask));
2656   cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2657   j(not_equal, failure, near_jump);
2658 }
2659 
2660 
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first_object_instance_type,Register second_object_instance_type,Register scratch1,Register scratch2,Label * on_fail,Label::Distance near_jump)2661 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2662     Register first_object_instance_type, Register second_object_instance_type,
2663     Register scratch1, Register scratch2, Label* on_fail,
2664     Label::Distance near_jump) {
2665   // Load instance type for both strings.
2666   movp(scratch1, first_object_instance_type);
2667   movp(scratch2, second_object_instance_type);
2668 
2669   // Check that both are flat one-byte strings.
2670   DCHECK(kNotStringTag != 0);
2671   const int kFlatOneByteStringMask =
2672       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2673   const int kFlatOneByteStringTag =
2674       kStringTag | kOneByteStringTag | kSeqStringTag;
2675 
2676   andl(scratch1, Immediate(kFlatOneByteStringMask));
2677   andl(scratch2, Immediate(kFlatOneByteStringMask));
2678   // Interleave the bits to check both scratch1 and scratch2 in one test.
2679   DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2680   leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2681   cmpl(scratch1,
2682        Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2683   j(not_equal, on_fail, near_jump);
2684 }
2685 
2686 
2687 template<class T>
JumpIfNotUniqueNameHelper(MacroAssembler * masm,T operand_or_register,Label * not_unique_name,Label::Distance distance)2688 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2689                                       T operand_or_register,
2690                                       Label* not_unique_name,
2691                                       Label::Distance distance) {
2692   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2693   Label succeed;
2694   masm->testb(operand_or_register,
2695               Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2696   masm->j(zero, &succeed, Label::kNear);
2697   masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2698   masm->j(not_equal, not_unique_name, distance);
2699 
2700   masm->bind(&succeed);
2701 }
2702 
2703 
JumpIfNotUniqueNameInstanceType(Operand operand,Label * not_unique_name,Label::Distance distance)2704 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2705                                                      Label* not_unique_name,
2706                                                      Label::Distance distance) {
2707   JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2708 }
2709 
2710 
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name,Label::Distance distance)2711 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2712                                                      Label* not_unique_name,
2713                                                      Label::Distance distance) {
2714   JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2715 }
2716 
2717 
Move(Register dst,Register src)2718 void MacroAssembler::Move(Register dst, Register src) {
2719   if (!dst.is(src)) {
2720     movp(dst, src);
2721   }
2722 }
2723 
2724 
Move(Register dst,Handle<Object> source)2725 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2726   AllowDeferredHandleDereference smi_check;
2727   if (source->IsSmi()) {
2728     Move(dst, Smi::cast(*source));
2729   } else {
2730     MoveHeapObject(dst, source);
2731   }
2732 }
2733 
2734 
Move(const Operand & dst,Handle<Object> source)2735 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2736   AllowDeferredHandleDereference smi_check;
2737   if (source->IsSmi()) {
2738     Move(dst, Smi::cast(*source));
2739   } else {
2740     MoveHeapObject(kScratchRegister, source);
2741     movp(dst, kScratchRegister);
2742   }
2743 }
2744 
2745 
Cmp(Register dst,Handle<Object> source)2746 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2747   AllowDeferredHandleDereference smi_check;
2748   if (source->IsSmi()) {
2749     Cmp(dst, Smi::cast(*source));
2750   } else {
2751     MoveHeapObject(kScratchRegister, source);
2752     cmpp(dst, kScratchRegister);
2753   }
2754 }
2755 
2756 
Cmp(const Operand & dst,Handle<Object> source)2757 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2758   AllowDeferredHandleDereference smi_check;
2759   if (source->IsSmi()) {
2760     Cmp(dst, Smi::cast(*source));
2761   } else {
2762     MoveHeapObject(kScratchRegister, source);
2763     cmpp(dst, kScratchRegister);
2764   }
2765 }
2766 
2767 
Push(Handle<Object> source)2768 void MacroAssembler::Push(Handle<Object> source) {
2769   AllowDeferredHandleDereference smi_check;
2770   if (source->IsSmi()) {
2771     Push(Smi::cast(*source));
2772   } else {
2773     MoveHeapObject(kScratchRegister, source);
2774     Push(kScratchRegister);
2775   }
2776 }
2777 
2778 
MoveHeapObject(Register result,Handle<Object> object)2779 void MacroAssembler::MoveHeapObject(Register result,
2780                                     Handle<Object> object) {
2781   AllowDeferredHandleDereference using_raw_address;
2782   DCHECK(object->IsHeapObject());
2783   if (isolate()->heap()->InNewSpace(*object)) {
2784     Handle<Cell> cell = isolate()->factory()->NewCell(object);
2785     Move(result, cell, RelocInfo::CELL);
2786     movp(result, Operand(result, 0));
2787   } else {
2788     Move(result, object, RelocInfo::EMBEDDED_OBJECT);
2789   }
2790 }
2791 
2792 
LoadGlobalCell(Register dst,Handle<Cell> cell)2793 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2794   if (dst.is(rax)) {
2795     AllowDeferredHandleDereference embedding_raw_address;
2796     load_rax(cell.location(), RelocInfo::CELL);
2797   } else {
2798     Move(dst, cell, RelocInfo::CELL);
2799     movp(dst, Operand(dst, 0));
2800   }
2801 }
2802 
2803 
Drop(int stack_elements)2804 void MacroAssembler::Drop(int stack_elements) {
2805   if (stack_elements > 0) {
2806     addp(rsp, Immediate(stack_elements * kPointerSize));
2807   }
2808 }
2809 
2810 
DropUnderReturnAddress(int stack_elements,Register scratch)2811 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
2812                                             Register scratch) {
2813   DCHECK(stack_elements > 0);
2814   if (kPointerSize == kInt64Size && stack_elements == 1) {
2815     popq(MemOperand(rsp, 0));
2816     return;
2817   }
2818 
2819   PopReturnAddressTo(scratch);
2820   Drop(stack_elements);
2821   PushReturnAddressFrom(scratch);
2822 }
2823 
2824 
Push(Register src)2825 void MacroAssembler::Push(Register src) {
2826   if (kPointerSize == kInt64Size) {
2827     pushq(src);
2828   } else {
2829     // x32 uses 64-bit push for rbp in the prologue.
2830     DCHECK(src.code() != rbp.code());
2831     leal(rsp, Operand(rsp, -4));
2832     movp(Operand(rsp, 0), src);
2833   }
2834 }
2835 
2836 
Push(const Operand & src)2837 void MacroAssembler::Push(const Operand& src) {
2838   if (kPointerSize == kInt64Size) {
2839     pushq(src);
2840   } else {
2841     movp(kScratchRegister, src);
2842     leal(rsp, Operand(rsp, -4));
2843     movp(Operand(rsp, 0), kScratchRegister);
2844   }
2845 }
2846 
2847 
PushQuad(const Operand & src)2848 void MacroAssembler::PushQuad(const Operand& src) {
2849   if (kPointerSize == kInt64Size) {
2850     pushq(src);
2851   } else {
2852     movp(kScratchRegister, src);
2853     pushq(kScratchRegister);
2854   }
2855 }
2856 
2857 
Push(Immediate value)2858 void MacroAssembler::Push(Immediate value) {
2859   if (kPointerSize == kInt64Size) {
2860     pushq(value);
2861   } else {
2862     leal(rsp, Operand(rsp, -4));
2863     movp(Operand(rsp, 0), value);
2864   }
2865 }
2866 
2867 
PushImm32(int32_t imm32)2868 void MacroAssembler::PushImm32(int32_t imm32) {
2869   if (kPointerSize == kInt64Size) {
2870     pushq_imm32(imm32);
2871   } else {
2872     leal(rsp, Operand(rsp, -4));
2873     movp(Operand(rsp, 0), Immediate(imm32));
2874   }
2875 }
2876 
2877 
Pop(Register dst)2878 void MacroAssembler::Pop(Register dst) {
2879   if (kPointerSize == kInt64Size) {
2880     popq(dst);
2881   } else {
2882     // x32 uses 64-bit pop for rbp in the epilogue.
2883     DCHECK(dst.code() != rbp.code());
2884     movp(dst, Operand(rsp, 0));
2885     leal(rsp, Operand(rsp, 4));
2886   }
2887 }
2888 
2889 
Pop(const Operand & dst)2890 void MacroAssembler::Pop(const Operand& dst) {
2891   if (kPointerSize == kInt64Size) {
2892     popq(dst);
2893   } else {
2894     Register scratch = dst.AddressUsesRegister(kScratchRegister)
2895         ? kSmiConstantRegister : kScratchRegister;
2896     movp(scratch, Operand(rsp, 0));
2897     movp(dst, scratch);
2898     leal(rsp, Operand(rsp, 4));
2899     if (scratch.is(kSmiConstantRegister)) {
2900       // Restore kSmiConstantRegister.
2901       movp(kSmiConstantRegister,
2902            reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
2903            Assembler::RelocInfoNone());
2904     }
2905   }
2906 }
2907 
2908 
PopQuad(const Operand & dst)2909 void MacroAssembler::PopQuad(const Operand& dst) {
2910   if (kPointerSize == kInt64Size) {
2911     popq(dst);
2912   } else {
2913     popq(kScratchRegister);
2914     movp(dst, kScratchRegister);
2915   }
2916 }
2917 
2918 
LoadSharedFunctionInfoSpecialField(Register dst,Register base,int offset)2919 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
2920                                                         Register base,
2921                                                         int offset) {
2922   DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
2923          offset <= SharedFunctionInfo::kSize &&
2924          (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2925   if (kPointerSize == kInt64Size) {
2926     movsxlq(dst, FieldOperand(base, offset));
2927   } else {
2928     movp(dst, FieldOperand(base, offset));
2929     SmiToInteger32(dst, dst);
2930   }
2931 }
2932 
2933 
TestBitSharedFunctionInfoSpecialField(Register base,int offset,int bits)2934 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
2935                                                            int offset,
2936                                                            int bits) {
2937   DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
2938          offset <= SharedFunctionInfo::kSize &&
2939          (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2940   if (kPointerSize == kInt32Size) {
2941     // On x32, this field is represented by SMI.
2942     bits += kSmiShift;
2943   }
2944   int byte_offset = bits / kBitsPerByte;
2945   int bit_in_byte = bits & (kBitsPerByte - 1);
2946   testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
2947 }
2948 
2949 
Jump(ExternalReference ext)2950 void MacroAssembler::Jump(ExternalReference ext) {
2951   LoadAddress(kScratchRegister, ext);
2952   jmp(kScratchRegister);
2953 }
2954 
2955 
Jump(const Operand & op)2956 void MacroAssembler::Jump(const Operand& op) {
2957   if (kPointerSize == kInt64Size) {
2958     jmp(op);
2959   } else {
2960     movp(kScratchRegister, op);
2961     jmp(kScratchRegister);
2962   }
2963 }
2964 
2965 
Jump(Address destination,RelocInfo::Mode rmode)2966 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2967   Move(kScratchRegister, destination, rmode);
2968   jmp(kScratchRegister);
2969 }
2970 
2971 
Jump(Handle<Code> code_object,RelocInfo::Mode rmode)2972 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
2973   // TODO(X64): Inline this
2974   jmp(code_object, rmode);
2975 }
2976 
2977 
CallSize(ExternalReference ext)2978 int MacroAssembler::CallSize(ExternalReference ext) {
2979   // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
2980   return LoadAddressSize(ext) +
2981          Assembler::kCallScratchRegisterInstructionLength;
2982 }
2983 
2984 
Call(ExternalReference ext)2985 void MacroAssembler::Call(ExternalReference ext) {
2986 #ifdef DEBUG
2987   int end_position = pc_offset() + CallSize(ext);
2988 #endif
2989   LoadAddress(kScratchRegister, ext);
2990   call(kScratchRegister);
2991 #ifdef DEBUG
2992   CHECK_EQ(end_position, pc_offset());
2993 #endif
2994 }
2995 
2996 
Call(const Operand & op)2997 void MacroAssembler::Call(const Operand& op) {
2998   if (kPointerSize == kInt64Size) {
2999     call(op);
3000   } else {
3001     movp(kScratchRegister, op);
3002     call(kScratchRegister);
3003   }
3004 }
3005 
3006 
Call(Address destination,RelocInfo::Mode rmode)3007 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
3008 #ifdef DEBUG
3009   int end_position = pc_offset() + CallSize(destination);
3010 #endif
3011   Move(kScratchRegister, destination, rmode);
3012   call(kScratchRegister);
3013 #ifdef DEBUG
3014   CHECK_EQ(pc_offset(), end_position);
3015 #endif
3016 }
3017 
3018 
Call(Handle<Code> code_object,RelocInfo::Mode rmode,TypeFeedbackId ast_id)3019 void MacroAssembler::Call(Handle<Code> code_object,
3020                           RelocInfo::Mode rmode,
3021                           TypeFeedbackId ast_id) {
3022 #ifdef DEBUG
3023   int end_position = pc_offset() + CallSize(code_object);
3024 #endif
3025   DCHECK(RelocInfo::IsCodeTarget(rmode) ||
3026       rmode == RelocInfo::CODE_AGE_SEQUENCE);
3027   call(code_object, rmode, ast_id);
3028 #ifdef DEBUG
3029   CHECK_EQ(end_position, pc_offset());
3030 #endif
3031 }
3032 
3033 
Pushad()3034 void MacroAssembler::Pushad() {
3035   Push(rax);
3036   Push(rcx);
3037   Push(rdx);
3038   Push(rbx);
3039   // Not pushing rsp or rbp.
3040   Push(rsi);
3041   Push(rdi);
3042   Push(r8);
3043   Push(r9);
3044   // r10 is kScratchRegister.
3045   Push(r11);
3046   // r12 is kSmiConstantRegister.
3047   // r13 is kRootRegister.
3048   Push(r14);
3049   Push(r15);
3050   STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
3051   // Use lea for symmetry with Popad.
3052   int sp_delta =
3053       (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3054   leap(rsp, Operand(rsp, -sp_delta));
3055 }
3056 
3057 
Popad()3058 void MacroAssembler::Popad() {
3059   // Popad must not change the flags, so use lea instead of addq.
3060   int sp_delta =
3061       (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3062   leap(rsp, Operand(rsp, sp_delta));
3063   Pop(r15);
3064   Pop(r14);
3065   Pop(r11);
3066   Pop(r9);
3067   Pop(r8);
3068   Pop(rdi);
3069   Pop(rsi);
3070   Pop(rbx);
3071   Pop(rdx);
3072   Pop(rcx);
3073   Pop(rax);
3074 }
3075 
3076 
Dropad()3077 void MacroAssembler::Dropad() {
3078   addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
3079 }
3080 
3081 
3082 // Order general registers are pushed by Pushad:
3083 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
3084 const int
3085 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
3086     0,
3087     1,
3088     2,
3089     3,
3090     -1,
3091     -1,
3092     4,
3093     5,
3094     6,
3095     7,
3096     -1,
3097     8,
3098     -1,
3099     -1,
3100     9,
3101     10
3102 };
3103 
3104 
StoreToSafepointRegisterSlot(Register dst,const Immediate & imm)3105 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
3106                                                   const Immediate& imm) {
3107   movp(SafepointRegisterSlot(dst), imm);
3108 }
3109 
3110 
StoreToSafepointRegisterSlot(Register dst,Register src)3111 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
3112   movp(SafepointRegisterSlot(dst), src);
3113 }
3114 
3115 
LoadFromSafepointRegisterSlot(Register dst,Register src)3116 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
3117   movp(dst, SafepointRegisterSlot(src));
3118 }
3119 
3120 
SafepointRegisterSlot(Register reg)3121 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
3122   return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3123 }
3124 
3125 
PushTryHandler(StackHandler::Kind kind,int handler_index)3126 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3127                                     int handler_index) {
3128   // Adjust this code if not the case.
3129   STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3130                                                 kFPOnStackSize);
3131   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3132   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3133   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3134   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3135   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3136 
3137   // We will build up the handler from the bottom by pushing on the stack.
3138   // First push the frame pointer and context.
3139   if (kind == StackHandler::JS_ENTRY) {
3140     // The frame pointer does not point to a JS frame so we save NULL for
3141     // rbp. We expect the code throwing an exception to check rbp before
3142     // dereferencing it to restore the context.
3143     pushq(Immediate(0));  // NULL frame pointer.
3144     Push(Smi::FromInt(0));  // No context.
3145   } else {
3146     pushq(rbp);
3147     Push(rsi);
3148   }
3149 
3150   // Push the state and the code object.
3151   unsigned state =
3152       StackHandler::IndexField::encode(handler_index) |
3153       StackHandler::KindField::encode(kind);
3154   Push(Immediate(state));
3155   Push(CodeObject());
3156 
3157   // Link the current handler as the next handler.
3158   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3159   Push(ExternalOperand(handler_address));
3160   // Set this new handler as the current one.
3161   movp(ExternalOperand(handler_address), rsp);
3162 }
3163 
3164 
PopTryHandler()3165 void MacroAssembler::PopTryHandler() {
3166   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3167   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3168   Pop(ExternalOperand(handler_address));
3169   addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3170 }
3171 
3172 
JumpToHandlerEntry()3173 void MacroAssembler::JumpToHandlerEntry() {
3174   // Compute the handler entry address and jump to it.  The handler table is
3175   // a fixed array of (smi-tagged) code offsets.
3176   // rax = exception, rdi = code object, rdx = state.
3177   movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
3178   shrp(rdx, Immediate(StackHandler::kKindWidth));
3179   movp(rdx,
3180        FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
3181   SmiToInteger64(rdx, rdx);
3182   leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
3183   jmp(rdi);
3184 }
3185 
3186 
Throw(Register value)3187 void MacroAssembler::Throw(Register value) {
3188   // Adjust this code if not the case.
3189   STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3190                                                 kFPOnStackSize);
3191   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3192   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3193   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3194   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3195   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3196 
3197   // The exception is expected in rax.
3198   if (!value.is(rax)) {
3199     movp(rax, value);
3200   }
3201   // Drop the stack pointer to the top of the top handler.
3202   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3203   movp(rsp, ExternalOperand(handler_address));
3204   // Restore the next handler.
3205   Pop(ExternalOperand(handler_address));
3206 
3207   // Remove the code object and state, compute the handler address in rdi.
3208   Pop(rdi);  // Code object.
3209   Pop(rdx);  // Offset and state.
3210 
3211   // Restore the context and frame pointer.
3212   Pop(rsi);  // Context.
3213   popq(rbp);  // Frame pointer.
3214 
3215   // If the handler is a JS frame, restore the context to the frame.
3216   // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
3217   // rbp or rsi.
3218   Label skip;
3219   testp(rsi, rsi);
3220   j(zero, &skip, Label::kNear);
3221   movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
3222   bind(&skip);
3223 
3224   JumpToHandlerEntry();
3225 }
3226 
3227 
ThrowUncatchable(Register value)3228 void MacroAssembler::ThrowUncatchable(Register value) {
3229   // Adjust this code if not the case.
3230   STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3231                                                 kFPOnStackSize);
3232   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3233   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3234   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3235   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3236   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3237 
3238   // The exception is expected in rax.
3239   if (!value.is(rax)) {
3240     movp(rax, value);
3241   }
3242   // Drop the stack pointer to the top of the top stack handler.
3243   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3244   Load(rsp, handler_address);
3245 
3246   // Unwind the handlers until the top ENTRY handler is found.
3247   Label fetch_next, check_kind;
3248   jmp(&check_kind, Label::kNear);
3249   bind(&fetch_next);
3250   movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
3251 
3252   bind(&check_kind);
3253   STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
3254   testl(Operand(rsp, StackHandlerConstants::kStateOffset),
3255         Immediate(StackHandler::KindField::kMask));
3256   j(not_zero, &fetch_next);
3257 
3258   // Set the top handler address to next handler past the top ENTRY handler.
3259   Pop(ExternalOperand(handler_address));
3260 
3261   // Remove the code object and state, compute the handler address in rdi.
3262   Pop(rdi);  // Code object.
3263   Pop(rdx);  // Offset and state.
3264 
3265   // Clear the context pointer and frame pointer (0 was saved in the handler).
3266   Pop(rsi);
3267   popq(rbp);
3268 
3269   JumpToHandlerEntry();
3270 }
3271 
3272 
Ret()3273 void MacroAssembler::Ret() {
3274   ret(0);
3275 }
3276 
3277 
Ret(int bytes_dropped,Register scratch)3278 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3279   if (is_uint16(bytes_dropped)) {
3280     ret(bytes_dropped);
3281   } else {
3282     PopReturnAddressTo(scratch);
3283     addp(rsp, Immediate(bytes_dropped));
3284     PushReturnAddressFrom(scratch);
3285     ret(0);
3286   }
3287 }
3288 
3289 
FCmp()3290 void MacroAssembler::FCmp() {
3291   fucomip();
3292   fstp(0);
3293 }
3294 
3295 
CmpObjectType(Register heap_object,InstanceType type,Register map)3296 void MacroAssembler::CmpObjectType(Register heap_object,
3297                                    InstanceType type,
3298                                    Register map) {
3299   movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3300   CmpInstanceType(map, type);
3301 }
3302 
3303 
CmpInstanceType(Register map,InstanceType type)3304 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3305   cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3306        Immediate(static_cast<int8_t>(type)));
3307 }
3308 
3309 
CheckFastElements(Register map,Label * fail,Label::Distance distance)3310 void MacroAssembler::CheckFastElements(Register map,
3311                                        Label* fail,
3312                                        Label::Distance distance) {
3313   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3314   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3315   STATIC_ASSERT(FAST_ELEMENTS == 2);
3316   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3317   cmpb(FieldOperand(map, Map::kBitField2Offset),
3318        Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3319   j(above, fail, distance);
3320 }
3321 
3322 
CheckFastObjectElements(Register map,Label * fail,Label::Distance distance)3323 void MacroAssembler::CheckFastObjectElements(Register map,
3324                                              Label* fail,
3325                                              Label::Distance distance) {
3326   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3327   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3328   STATIC_ASSERT(FAST_ELEMENTS == 2);
3329   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3330   cmpb(FieldOperand(map, Map::kBitField2Offset),
3331        Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3332   j(below_equal, fail, distance);
3333   cmpb(FieldOperand(map, Map::kBitField2Offset),
3334        Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3335   j(above, fail, distance);
3336 }
3337 
3338 
CheckFastSmiElements(Register map,Label * fail,Label::Distance distance)3339 void MacroAssembler::CheckFastSmiElements(Register map,
3340                                           Label* fail,
3341                                           Label::Distance distance) {
3342   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3343   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3344   cmpb(FieldOperand(map, Map::kBitField2Offset),
3345        Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3346   j(above, fail, distance);
3347 }
3348 
3349 
StoreNumberToDoubleElements(Register maybe_number,Register elements,Register index,XMMRegister xmm_scratch,Label * fail,int elements_offset)3350 void MacroAssembler::StoreNumberToDoubleElements(
3351     Register maybe_number,
3352     Register elements,
3353     Register index,
3354     XMMRegister xmm_scratch,
3355     Label* fail,
3356     int elements_offset) {
3357   Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
3358 
3359   JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3360 
3361   CheckMap(maybe_number,
3362            isolate()->factory()->heap_number_map(),
3363            fail,
3364            DONT_DO_SMI_CHECK);
3365 
3366   // Double value, canonicalize NaN.
3367   uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
3368   cmpl(FieldOperand(maybe_number, offset),
3369        Immediate(kNaNOrInfinityLowerBoundUpper32));
3370   j(greater_equal, &maybe_nan, Label::kNear);
3371 
3372   bind(&not_nan);
3373   movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3374   bind(&have_double_value);
3375   movsd(FieldOperand(elements, index, times_8,
3376                      FixedDoubleArray::kHeaderSize - elements_offset),
3377         xmm_scratch);
3378   jmp(&done);
3379 
3380   bind(&maybe_nan);
3381   // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3382   // it's an Infinity, and the non-NaN code path applies.
3383   j(greater, &is_nan, Label::kNear);
3384   cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
3385   j(zero, &not_nan);
3386   bind(&is_nan);
3387   // Convert all NaNs to the same canonical NaN value when they are stored in
3388   // the double array.
3389   Set(kScratchRegister,
3390       bit_cast<uint64_t>(
3391           FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
3392   movq(xmm_scratch, kScratchRegister);
3393   jmp(&have_double_value, Label::kNear);
3394 
3395   bind(&smi_value);
3396   // Value is a smi. convert to a double and store.
3397   // Preserve original value.
3398   SmiToInteger32(kScratchRegister, maybe_number);
3399   Cvtlsi2sd(xmm_scratch, kScratchRegister);
3400   movsd(FieldOperand(elements, index, times_8,
3401                      FixedDoubleArray::kHeaderSize - elements_offset),
3402         xmm_scratch);
3403   bind(&done);
3404 }
3405 
3406 
CompareMap(Register obj,Handle<Map> map)3407 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3408   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3409 }
3410 
3411 
CheckMap(Register obj,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)3412 void MacroAssembler::CheckMap(Register obj,
3413                               Handle<Map> map,
3414                               Label* fail,
3415                               SmiCheckType smi_check_type) {
3416   if (smi_check_type == DO_SMI_CHECK) {
3417     JumpIfSmi(obj, fail);
3418   }
3419 
3420   CompareMap(obj, map);
3421   j(not_equal, fail);
3422 }
3423 
3424 
ClampUint8(Register reg)3425 void MacroAssembler::ClampUint8(Register reg) {
3426   Label done;
3427   testl(reg, Immediate(0xFFFFFF00));
3428   j(zero, &done, Label::kNear);
3429   setcc(negative, reg);  // 1 if negative, 0 if positive.
3430   decb(reg);  // 0 if negative, 255 if positive.
3431   bind(&done);
3432 }
3433 
3434 
ClampDoubleToUint8(XMMRegister input_reg,XMMRegister temp_xmm_reg,Register result_reg)3435 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3436                                         XMMRegister temp_xmm_reg,
3437                                         Register result_reg) {
3438   Label done;
3439   Label conv_failure;
3440   xorps(temp_xmm_reg, temp_xmm_reg);
3441   cvtsd2si(result_reg, input_reg);
3442   testl(result_reg, Immediate(0xFFFFFF00));
3443   j(zero, &done, Label::kNear);
3444   cmpl(result_reg, Immediate(1));
3445   j(overflow, &conv_failure, Label::kNear);
3446   movl(result_reg, Immediate(0));
3447   setcc(sign, result_reg);
3448   subl(result_reg, Immediate(1));
3449   andl(result_reg, Immediate(255));
3450   jmp(&done, Label::kNear);
3451   bind(&conv_failure);
3452   Set(result_reg, 0);
3453   ucomisd(input_reg, temp_xmm_reg);
3454   j(below, &done, Label::kNear);
3455   Set(result_reg, 255);
3456   bind(&done);
3457 }
3458 
3459 
LoadUint32(XMMRegister dst,Register src)3460 void MacroAssembler::LoadUint32(XMMRegister dst,
3461                                 Register src) {
3462   if (FLAG_debug_code) {
3463     cmpq(src, Immediate(0xffffffff));
3464     Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3465   }
3466   cvtqsi2sd(dst, src);
3467 }
3468 
3469 
SlowTruncateToI(Register result_reg,Register input_reg,int offset)3470 void MacroAssembler::SlowTruncateToI(Register result_reg,
3471                                      Register input_reg,
3472                                      int offset) {
3473   DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3474   call(stub.GetCode(), RelocInfo::CODE_TARGET);
3475 }
3476 
3477 
TruncateHeapNumberToI(Register result_reg,Register input_reg)3478 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3479                                            Register input_reg) {
3480   Label done;
3481   movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3482   cvttsd2siq(result_reg, xmm0);
3483   cmpq(result_reg, Immediate(1));
3484   j(no_overflow, &done, Label::kNear);
3485 
3486   // Slow case.
3487   if (input_reg.is(result_reg)) {
3488     subp(rsp, Immediate(kDoubleSize));
3489     movsd(MemOperand(rsp, 0), xmm0);
3490     SlowTruncateToI(result_reg, rsp, 0);
3491     addp(rsp, Immediate(kDoubleSize));
3492   } else {
3493     SlowTruncateToI(result_reg, input_reg);
3494   }
3495 
3496   bind(&done);
3497   // Keep our invariant that the upper 32 bits are zero.
3498   movl(result_reg, result_reg);
3499 }
3500 
3501 
TruncateDoubleToI(Register result_reg,XMMRegister input_reg)3502 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3503                                        XMMRegister input_reg) {
3504   Label done;
3505   cvttsd2siq(result_reg, input_reg);
3506   cmpq(result_reg, Immediate(1));
3507   j(no_overflow, &done, Label::kNear);
3508 
3509   subp(rsp, Immediate(kDoubleSize));
3510   movsd(MemOperand(rsp, 0), input_reg);
3511   SlowTruncateToI(result_reg, rsp, 0);
3512   addp(rsp, Immediate(kDoubleSize));
3513 
3514   bind(&done);
3515   // Keep our invariant that the upper 32 bits are zero.
3516   movl(result_reg, result_reg);
3517 }
3518 
3519 
DoubleToI(Register result_reg,XMMRegister input_reg,XMMRegister scratch,MinusZeroMode minus_zero_mode,Label * lost_precision,Label * is_nan,Label * minus_zero,Label::Distance dst)3520 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
3521                                XMMRegister scratch,
3522                                MinusZeroMode minus_zero_mode,
3523                                Label* lost_precision, Label* is_nan,
3524                                Label* minus_zero, Label::Distance dst) {
3525   cvttsd2si(result_reg, input_reg);
3526   Cvtlsi2sd(xmm0, result_reg);
3527   ucomisd(xmm0, input_reg);
3528   j(not_equal, lost_precision, dst);
3529   j(parity_even, is_nan, dst);  // NaN.
3530   if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3531     Label done;
3532     // The integer converted back is equal to the original. We
3533     // only have to test if we got -0 as an input.
3534     testl(result_reg, result_reg);
3535     j(not_zero, &done, Label::kNear);
3536     movmskpd(result_reg, input_reg);
3537     // Bit 0 contains the sign of the double in input_reg.
3538     // If input was positive, we are ok and return 0, otherwise
3539     // jump to minus_zero.
3540     andl(result_reg, Immediate(1));
3541     j(not_zero, minus_zero, dst);
3542     bind(&done);
3543   }
3544 }
3545 
3546 
LoadInstanceDescriptors(Register map,Register descriptors)3547 void MacroAssembler::LoadInstanceDescriptors(Register map,
3548                                              Register descriptors) {
3549   movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3550 }
3551 
3552 
NumberOfOwnDescriptors(Register dst,Register map)3553 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3554   movl(dst, FieldOperand(map, Map::kBitField3Offset));
3555   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3556 }
3557 
3558 
EnumLength(Register dst,Register map)3559 void MacroAssembler::EnumLength(Register dst, Register map) {
3560   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3561   movl(dst, FieldOperand(map, Map::kBitField3Offset));
3562   andl(dst, Immediate(Map::EnumLengthBits::kMask));
3563   Integer32ToSmi(dst, dst);
3564 }
3565 
3566 
DispatchMap(Register obj,Register unused,Handle<Map> map,Handle<Code> success,SmiCheckType smi_check_type)3567 void MacroAssembler::DispatchMap(Register obj,
3568                                  Register unused,
3569                                  Handle<Map> map,
3570                                  Handle<Code> success,
3571                                  SmiCheckType smi_check_type) {
3572   Label fail;
3573   if (smi_check_type == DO_SMI_CHECK) {
3574     JumpIfSmi(obj, &fail);
3575   }
3576   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3577   j(equal, success, RelocInfo::CODE_TARGET);
3578 
3579   bind(&fail);
3580 }
3581 
3582 
AssertNumber(Register object)3583 void MacroAssembler::AssertNumber(Register object) {
3584   if (emit_debug_code()) {
3585     Label ok;
3586     Condition is_smi = CheckSmi(object);
3587     j(is_smi, &ok, Label::kNear);
3588     Cmp(FieldOperand(object, HeapObject::kMapOffset),
3589         isolate()->factory()->heap_number_map());
3590     Check(equal, kOperandIsNotANumber);
3591     bind(&ok);
3592   }
3593 }
3594 
3595 
AssertNotSmi(Register object)3596 void MacroAssembler::AssertNotSmi(Register object) {
3597   if (emit_debug_code()) {
3598     Condition is_smi = CheckSmi(object);
3599     Check(NegateCondition(is_smi), kOperandIsASmi);
3600   }
3601 }
3602 
3603 
AssertSmi(Register object)3604 void MacroAssembler::AssertSmi(Register object) {
3605   if (emit_debug_code()) {
3606     Condition is_smi = CheckSmi(object);
3607     Check(is_smi, kOperandIsNotASmi);
3608   }
3609 }
3610 
3611 
AssertSmi(const Operand & object)3612 void MacroAssembler::AssertSmi(const Operand& object) {
3613   if (emit_debug_code()) {
3614     Condition is_smi = CheckSmi(object);
3615     Check(is_smi, kOperandIsNotASmi);
3616   }
3617 }
3618 
3619 
AssertZeroExtended(Register int32_register)3620 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3621   if (emit_debug_code()) {
3622     DCHECK(!int32_register.is(kScratchRegister));
3623     movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3624     cmpq(kScratchRegister, int32_register);
3625     Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3626   }
3627 }
3628 
3629 
AssertString(Register object)3630 void MacroAssembler::AssertString(Register object) {
3631   if (emit_debug_code()) {
3632     testb(object, Immediate(kSmiTagMask));
3633     Check(not_equal, kOperandIsASmiAndNotAString);
3634     Push(object);
3635     movp(object, FieldOperand(object, HeapObject::kMapOffset));
3636     CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3637     Pop(object);
3638     Check(below, kOperandIsNotAString);
3639   }
3640 }
3641 
3642 
AssertName(Register object)3643 void MacroAssembler::AssertName(Register object) {
3644   if (emit_debug_code()) {
3645     testb(object, Immediate(kSmiTagMask));
3646     Check(not_equal, kOperandIsASmiAndNotAName);
3647     Push(object);
3648     movp(object, FieldOperand(object, HeapObject::kMapOffset));
3649     CmpInstanceType(object, LAST_NAME_TYPE);
3650     Pop(object);
3651     Check(below_equal, kOperandIsNotAName);
3652   }
3653 }
3654 
3655 
AssertUndefinedOrAllocationSite(Register object)3656 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3657   if (emit_debug_code()) {
3658     Label done_checking;
3659     AssertNotSmi(object);
3660     Cmp(object, isolate()->factory()->undefined_value());
3661     j(equal, &done_checking);
3662     Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3663     Assert(equal, kExpectedUndefinedOrCell);
3664     bind(&done_checking);
3665   }
3666 }
3667 
3668 
AssertRootValue(Register src,Heap::RootListIndex root_value_index,BailoutReason reason)3669 void MacroAssembler::AssertRootValue(Register src,
3670                                      Heap::RootListIndex root_value_index,
3671                                      BailoutReason reason) {
3672   if (emit_debug_code()) {
3673     DCHECK(!src.is(kScratchRegister));
3674     LoadRoot(kScratchRegister, root_value_index);
3675     cmpp(src, kScratchRegister);
3676     Check(equal, reason);
3677   }
3678 }
3679 
3680 
3681 
IsObjectStringType(Register heap_object,Register map,Register instance_type)3682 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3683                                              Register map,
3684                                              Register instance_type) {
3685   movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3686   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3687   STATIC_ASSERT(kNotStringTag != 0);
3688   testb(instance_type, Immediate(kIsNotStringMask));
3689   return zero;
3690 }
3691 
3692 
IsObjectNameType(Register heap_object,Register map,Register instance_type)3693 Condition MacroAssembler::IsObjectNameType(Register heap_object,
3694                                            Register map,
3695                                            Register instance_type) {
3696   movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3697   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3698   cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3699   return below_equal;
3700 }
3701 
3702 
TryGetFunctionPrototype(Register function,Register result,Label * miss,bool miss_on_bound_function)3703 void MacroAssembler::TryGetFunctionPrototype(Register function,
3704                                              Register result,
3705                                              Label* miss,
3706                                              bool miss_on_bound_function) {
3707   Label non_instance;
3708   if (miss_on_bound_function) {
3709     // Check that the receiver isn't a smi.
3710     testl(function, Immediate(kSmiTagMask));
3711     j(zero, miss);
3712 
3713     // Check that the function really is a function.
3714     CmpObjectType(function, JS_FUNCTION_TYPE, result);
3715     j(not_equal, miss);
3716 
3717     movp(kScratchRegister,
3718          FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3719     // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
3720     // field).
3721     TestBitSharedFunctionInfoSpecialField(kScratchRegister,
3722         SharedFunctionInfo::kCompilerHintsOffset,
3723         SharedFunctionInfo::kBoundFunction);
3724     j(not_zero, miss);
3725 
3726     // Make sure that the function has an instance prototype.
3727     testb(FieldOperand(result, Map::kBitFieldOffset),
3728           Immediate(1 << Map::kHasNonInstancePrototype));
3729     j(not_zero, &non_instance, Label::kNear);
3730   }
3731 
3732   // Get the prototype or initial map from the function.
3733   movp(result,
3734        FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3735 
3736   // If the prototype or initial map is the hole, don't return it and
3737   // simply miss the cache instead. This will allow us to allocate a
3738   // prototype object on-demand in the runtime system.
3739   CompareRoot(result, Heap::kTheHoleValueRootIndex);
3740   j(equal, miss);
3741 
3742   // If the function does not have an initial map, we're done.
3743   Label done;
3744   CmpObjectType(result, MAP_TYPE, kScratchRegister);
3745   j(not_equal, &done, Label::kNear);
3746 
3747   // Get the prototype from the initial map.
3748   movp(result, FieldOperand(result, Map::kPrototypeOffset));
3749 
3750   if (miss_on_bound_function) {
3751     jmp(&done, Label::kNear);
3752 
3753     // Non-instance prototype: Fetch prototype from constructor field
3754     // in initial map.
3755     bind(&non_instance);
3756     movp(result, FieldOperand(result, Map::kConstructorOffset));
3757   }
3758 
3759   // All done.
3760   bind(&done);
3761 }
3762 
3763 
SetCounter(StatsCounter * counter,int value)3764 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3765   if (FLAG_native_code_counters && counter->Enabled()) {
3766     Operand counter_operand = ExternalOperand(ExternalReference(counter));
3767     movl(counter_operand, Immediate(value));
3768   }
3769 }
3770 
3771 
IncrementCounter(StatsCounter * counter,int value)3772 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3773   DCHECK(value > 0);
3774   if (FLAG_native_code_counters && counter->Enabled()) {
3775     Operand counter_operand = ExternalOperand(ExternalReference(counter));
3776     if (value == 1) {
3777       incl(counter_operand);
3778     } else {
3779       addl(counter_operand, Immediate(value));
3780     }
3781   }
3782 }
3783 
3784 
DecrementCounter(StatsCounter * counter,int value)3785 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3786   DCHECK(value > 0);
3787   if (FLAG_native_code_counters && counter->Enabled()) {
3788     Operand counter_operand = ExternalOperand(ExternalReference(counter));
3789     if (value == 1) {
3790       decl(counter_operand);
3791     } else {
3792       subl(counter_operand, Immediate(value));
3793     }
3794   }
3795 }
3796 
3797 
DebugBreak()3798 void MacroAssembler::DebugBreak() {
3799   Set(rax, 0);  // No arguments.
3800   LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3801   CEntryStub ces(isolate(), 1);
3802   DCHECK(AllowThisStubCall(&ces));
3803   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3804 }
3805 
3806 
InvokeCode(Register code,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3807 void MacroAssembler::InvokeCode(Register code,
3808                                 const ParameterCount& expected,
3809                                 const ParameterCount& actual,
3810                                 InvokeFlag flag,
3811                                 const CallWrapper& call_wrapper) {
3812   // You can't call a function without a valid frame.
3813   DCHECK(flag == JUMP_FUNCTION || has_frame());
3814 
3815   Label done;
3816   bool definitely_mismatches = false;
3817   InvokePrologue(expected,
3818                  actual,
3819                  Handle<Code>::null(),
3820                  code,
3821                  &done,
3822                  &definitely_mismatches,
3823                  flag,
3824                  Label::kNear,
3825                  call_wrapper);
3826   if (!definitely_mismatches) {
3827     if (flag == CALL_FUNCTION) {
3828       call_wrapper.BeforeCall(CallSize(code));
3829       call(code);
3830       call_wrapper.AfterCall();
3831     } else {
3832       DCHECK(flag == JUMP_FUNCTION);
3833       jmp(code);
3834     }
3835     bind(&done);
3836   }
3837 }
3838 
3839 
InvokeFunction(Register function,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3840 void MacroAssembler::InvokeFunction(Register function,
3841                                     const ParameterCount& actual,
3842                                     InvokeFlag flag,
3843                                     const CallWrapper& call_wrapper) {
3844   // You can't call a function without a valid frame.
3845   DCHECK(flag == JUMP_FUNCTION || has_frame());
3846 
3847   DCHECK(function.is(rdi));
3848   movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3849   movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3850   LoadSharedFunctionInfoSpecialField(rbx, rdx,
3851       SharedFunctionInfo::kFormalParameterCountOffset);
3852   // Advances rdx to the end of the Code object header, to the start of
3853   // the executable code.
3854   movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3855 
3856   ParameterCount expected(rbx);
3857   InvokeCode(rdx, expected, actual, flag, call_wrapper);
3858 }
3859 
3860 
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3861 void MacroAssembler::InvokeFunction(Register function,
3862                                     const ParameterCount& expected,
3863                                     const ParameterCount& actual,
3864                                     InvokeFlag flag,
3865                                     const CallWrapper& call_wrapper) {
3866   // You can't call a function without a valid frame.
3867   DCHECK(flag == JUMP_FUNCTION || has_frame());
3868 
3869   DCHECK(function.is(rdi));
3870   movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3871   // Advances rdx to the end of the Code object header, to the start of
3872   // the executable code.
3873   movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3874 
3875   InvokeCode(rdx, expected, actual, flag, call_wrapper);
3876 }
3877 
3878 
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3879 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3880                                     const ParameterCount& expected,
3881                                     const ParameterCount& actual,
3882                                     InvokeFlag flag,
3883                                     const CallWrapper& call_wrapper) {
3884   Move(rdi, function);
3885   InvokeFunction(rdi, expected, actual, flag, call_wrapper);
3886 }
3887 
3888 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Handle<Code> code_constant,Register code_register,Label * done,bool * definitely_mismatches,InvokeFlag flag,Label::Distance near_jump,const CallWrapper & call_wrapper)3889 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3890                                     const ParameterCount& actual,
3891                                     Handle<Code> code_constant,
3892                                     Register code_register,
3893                                     Label* done,
3894                                     bool* definitely_mismatches,
3895                                     InvokeFlag flag,
3896                                     Label::Distance near_jump,
3897                                     const CallWrapper& call_wrapper) {
3898   bool definitely_matches = false;
3899   *definitely_mismatches = false;
3900   Label invoke;
3901   if (expected.is_immediate()) {
3902     DCHECK(actual.is_immediate());
3903     if (expected.immediate() == actual.immediate()) {
3904       definitely_matches = true;
3905     } else {
3906       Set(rax, actual.immediate());
3907       if (expected.immediate() ==
3908               SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3909         // Don't worry about adapting arguments for built-ins that
3910         // don't want that done. Skip adaption code by making it look
3911         // like we have a match between expected and actual number of
3912         // arguments.
3913         definitely_matches = true;
3914       } else {
3915         *definitely_mismatches = true;
3916         Set(rbx, expected.immediate());
3917       }
3918     }
3919   } else {
3920     if (actual.is_immediate()) {
3921       // Expected is in register, actual is immediate. This is the
3922       // case when we invoke function values without going through the
3923       // IC mechanism.
3924       cmpp(expected.reg(), Immediate(actual.immediate()));
3925       j(equal, &invoke, Label::kNear);
3926       DCHECK(expected.reg().is(rbx));
3927       Set(rax, actual.immediate());
3928     } else if (!expected.reg().is(actual.reg())) {
3929       // Both expected and actual are in (different) registers. This
3930       // is the case when we invoke functions using call and apply.
3931       cmpp(expected.reg(), actual.reg());
3932       j(equal, &invoke, Label::kNear);
3933       DCHECK(actual.reg().is(rax));
3934       DCHECK(expected.reg().is(rbx));
3935     }
3936   }
3937 
3938   if (!definitely_matches) {
3939     Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3940     if (!code_constant.is_null()) {
3941       Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3942       addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3943     } else if (!code_register.is(rdx)) {
3944       movp(rdx, code_register);
3945     }
3946 
3947     if (flag == CALL_FUNCTION) {
3948       call_wrapper.BeforeCall(CallSize(adaptor));
3949       Call(adaptor, RelocInfo::CODE_TARGET);
3950       call_wrapper.AfterCall();
3951       if (!*definitely_mismatches) {
3952         jmp(done, near_jump);
3953       }
3954     } else {
3955       Jump(adaptor, RelocInfo::CODE_TARGET);
3956     }
3957     bind(&invoke);
3958   }
3959 }
3960 
3961 
StubPrologue()3962 void MacroAssembler::StubPrologue() {
3963     pushq(rbp);  // Caller's frame pointer.
3964     movp(rbp, rsp);
3965     Push(rsi);  // Callee's context.
3966     Push(Smi::FromInt(StackFrame::STUB));
3967 }
3968 
3969 
Prologue(bool code_pre_aging)3970 void MacroAssembler::Prologue(bool code_pre_aging) {
3971   PredictableCodeSizeScope predictible_code_size_scope(this,
3972       kNoCodeAgeSequenceLength);
3973   if (code_pre_aging) {
3974       // Pre-age the code.
3975     Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
3976          RelocInfo::CODE_AGE_SEQUENCE);
3977     Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
3978   } else {
3979     pushq(rbp);  // Caller's frame pointer.
3980     movp(rbp, rsp);
3981     Push(rsi);  // Callee's context.
3982     Push(rdi);  // Callee's JS function.
3983   }
3984 }
3985 
3986 
EnterFrame(StackFrame::Type type)3987 void MacroAssembler::EnterFrame(StackFrame::Type type) {
3988   pushq(rbp);
3989   movp(rbp, rsp);
3990   Push(rsi);  // Context.
3991   Push(Smi::FromInt(type));
3992   Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3993   Push(kScratchRegister);
3994   if (emit_debug_code()) {
3995     Move(kScratchRegister,
3996          isolate()->factory()->undefined_value(),
3997          RelocInfo::EMBEDDED_OBJECT);
3998     cmpp(Operand(rsp, 0), kScratchRegister);
3999     Check(not_equal, kCodeObjectNotProperlyPatched);
4000   }
4001 }
4002 
4003 
LeaveFrame(StackFrame::Type type)4004 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4005   if (emit_debug_code()) {
4006     Move(kScratchRegister, Smi::FromInt(type));
4007     cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
4008     Check(equal, kStackFrameTypesMustMatch);
4009   }
4010   movp(rsp, rbp);
4011   popq(rbp);
4012 }
4013 
4014 
EnterExitFramePrologue(bool save_rax)4015 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
4016   // Set up the frame structure on the stack.
4017   // All constants are relative to the frame pointer of the exit frame.
4018   DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
4019          kFPOnStackSize + kPCOnStackSize);
4020   DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
4021   DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
4022   pushq(rbp);
4023   movp(rbp, rsp);
4024 
4025   // Reserve room for entry stack pointer and push the code object.
4026   DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
4027   Push(Immediate(0));  // Saved entry sp, patched before call.
4028   Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4029   Push(kScratchRegister);  // Accessed from EditFrame::code_slot.
4030 
4031   // Save the frame pointer and the context in top.
4032   if (save_rax) {
4033     movp(r14, rax);  // Backup rax in callee-save register.
4034   }
4035 
4036   Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
4037   Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
4038 }
4039 
4040 
EnterExitFrameEpilogue(int arg_stack_space,bool save_doubles)4041 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
4042                                             bool save_doubles) {
4043 #ifdef _WIN64
4044   const int kShadowSpace = 4;
4045   arg_stack_space += kShadowSpace;
4046 #endif
4047   // Optionally save all XMM registers.
4048   if (save_doubles) {
4049     int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
4050         arg_stack_space * kRegisterSize;
4051     subp(rsp, Immediate(space));
4052     int offset = -2 * kPointerSize;
4053     for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
4054       XMMRegister reg = XMMRegister::FromAllocationIndex(i);
4055       movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
4056     }
4057   } else if (arg_stack_space > 0) {
4058     subp(rsp, Immediate(arg_stack_space * kRegisterSize));
4059   }
4060 
4061   // Get the required frame alignment for the OS.
4062   const int kFrameAlignment = base::OS::ActivationFrameAlignment();
4063   if (kFrameAlignment > 0) {
4064     DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
4065     DCHECK(is_int8(kFrameAlignment));
4066     andp(rsp, Immediate(-kFrameAlignment));
4067   }
4068 
4069   // Patch the saved entry sp.
4070   movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
4071 }
4072 
4073 
EnterExitFrame(int arg_stack_space,bool save_doubles)4074 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
4075   EnterExitFramePrologue(true);
4076 
4077   // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
4078   // so it must be retained across the C-call.
4079   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
4080   leap(r15, Operand(rbp, r14, times_pointer_size, offset));
4081 
4082   EnterExitFrameEpilogue(arg_stack_space, save_doubles);
4083 }
4084 
4085 
EnterApiExitFrame(int arg_stack_space)4086 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
4087   EnterExitFramePrologue(false);
4088   EnterExitFrameEpilogue(arg_stack_space, false);
4089 }
4090 
4091 
LeaveExitFrame(bool save_doubles)4092 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
4093   // Registers:
4094   // r15 : argv
4095   if (save_doubles) {
4096     int offset = -2 * kPointerSize;
4097     for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
4098       XMMRegister reg = XMMRegister::FromAllocationIndex(i);
4099       movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
4100     }
4101   }
4102   // Get the return address from the stack and restore the frame pointer.
4103   movp(rcx, Operand(rbp, kFPOnStackSize));
4104   movp(rbp, Operand(rbp, 0 * kPointerSize));
4105 
4106   // Drop everything up to and including the arguments and the receiver
4107   // from the caller stack.
4108   leap(rsp, Operand(r15, 1 * kPointerSize));
4109 
4110   PushReturnAddressFrom(rcx);
4111 
4112   LeaveExitFrameEpilogue(true);
4113 }
4114 
4115 
LeaveApiExitFrame(bool restore_context)4116 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
4117   movp(rsp, rbp);
4118   popq(rbp);
4119 
4120   LeaveExitFrameEpilogue(restore_context);
4121 }
4122 
4123 
LeaveExitFrameEpilogue(bool restore_context)4124 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
4125   // Restore current context from top and clear it in debug mode.
4126   ExternalReference context_address(Isolate::kContextAddress, isolate());
4127   Operand context_operand = ExternalOperand(context_address);
4128   if (restore_context) {
4129     movp(rsi, context_operand);
4130   }
4131 #ifdef DEBUG
4132   movp(context_operand, Immediate(0));
4133 #endif
4134 
4135   // Clear the top frame.
4136   ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
4137                                        isolate());
4138   Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
4139   movp(c_entry_fp_operand, Immediate(0));
4140 }
4141 
4142 
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)4143 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
4144                                             Register scratch,
4145                                             Label* miss) {
4146   Label same_contexts;
4147 
4148   DCHECK(!holder_reg.is(scratch));
4149   DCHECK(!scratch.is(kScratchRegister));
4150   // Load current lexical context from the stack frame.
4151   movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
4152 
4153   // When generating debug code, make sure the lexical context is set.
4154   if (emit_debug_code()) {
4155     cmpp(scratch, Immediate(0));
4156     Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
4157   }
4158   // Load the native context of the current context.
4159   int offset =
4160       Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
4161   movp(scratch, FieldOperand(scratch, offset));
4162   movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4163 
4164   // Check the context is a native context.
4165   if (emit_debug_code()) {
4166     Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
4167         isolate()->factory()->native_context_map());
4168     Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4169   }
4170 
4171   // Check if both contexts are the same.
4172   cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4173   j(equal, &same_contexts);
4174 
4175   // Compare security tokens.
4176   // Check that the security token in the calling global object is
4177   // compatible with the security token in the receiving global
4178   // object.
4179 
4180   // Check the context is a native context.
4181   if (emit_debug_code()) {
4182     // Preserve original value of holder_reg.
4183     Push(holder_reg);
4184     movp(holder_reg,
4185          FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4186     CompareRoot(holder_reg, Heap::kNullValueRootIndex);
4187     Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
4188 
4189     // Read the first word and compare to native_context_map(),
4190     movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
4191     CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
4192     Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4193     Pop(holder_reg);
4194   }
4195 
4196   movp(kScratchRegister,
4197        FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4198   int token_offset =
4199       Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
4200   movp(scratch, FieldOperand(scratch, token_offset));
4201   cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
4202   j(not_equal, miss);
4203 
4204   bind(&same_contexts);
4205 }
4206 
4207 
4208 // Compute the hash code from the untagged key.  This must be kept in sync with
4209 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
4210 // code-stub-hydrogen.cc
GetNumberHash(Register r0,Register scratch)4211 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
4212   // First of all we assign the hash seed to scratch.
4213   LoadRoot(scratch, Heap::kHashSeedRootIndex);
4214   SmiToInteger32(scratch, scratch);
4215 
4216   // Xor original key with a seed.
4217   xorl(r0, scratch);
4218 
4219   // Compute the hash code from the untagged key.  This must be kept in sync
4220   // with ComputeIntegerHash in utils.h.
4221   //
4222   // hash = ~hash + (hash << 15);
4223   movl(scratch, r0);
4224   notl(r0);
4225   shll(scratch, Immediate(15));
4226   addl(r0, scratch);
4227   // hash = hash ^ (hash >> 12);
4228   movl(scratch, r0);
4229   shrl(scratch, Immediate(12));
4230   xorl(r0, scratch);
4231   // hash = hash + (hash << 2);
4232   leal(r0, Operand(r0, r0, times_4, 0));
4233   // hash = hash ^ (hash >> 4);
4234   movl(scratch, r0);
4235   shrl(scratch, Immediate(4));
4236   xorl(r0, scratch);
4237   // hash = hash * 2057;
4238   imull(r0, r0, Immediate(2057));
4239   // hash = hash ^ (hash >> 16);
4240   movl(scratch, r0);
4241   shrl(scratch, Immediate(16));
4242   xorl(r0, scratch);
4243 }
4244 
4245 
4246 
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register r0,Register r1,Register r2,Register result)4247 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4248                                               Register elements,
4249                                               Register key,
4250                                               Register r0,
4251                                               Register r1,
4252                                               Register r2,
4253                                               Register result) {
4254   // Register use:
4255   //
4256   // elements - holds the slow-case elements of the receiver on entry.
4257   //            Unchanged unless 'result' is the same register.
4258   //
4259   // key      - holds the smi key on entry.
4260   //            Unchanged unless 'result' is the same register.
4261   //
4262   // Scratch registers:
4263   //
4264   // r0 - holds the untagged key on entry and holds the hash once computed.
4265   //
4266   // r1 - used to hold the capacity mask of the dictionary
4267   //
4268   // r2 - used for the index into the dictionary.
4269   //
4270   // result - holds the result on exit if the load succeeded.
4271   //          Allowed to be the same as 'key' or 'result'.
4272   //          Unchanged on bailout so 'key' or 'result' can be used
4273   //          in further computation.
4274 
4275   Label done;
4276 
4277   GetNumberHash(r0, r1);
4278 
4279   // Compute capacity mask.
4280   SmiToInteger32(r1, FieldOperand(elements,
4281                                   SeededNumberDictionary::kCapacityOffset));
4282   decl(r1);
4283 
4284   // Generate an unrolled loop that performs a few probes before giving up.
4285   for (int i = 0; i < kNumberDictionaryProbes; i++) {
4286     // Use r2 for index calculations and keep the hash intact in r0.
4287     movp(r2, r0);
4288     // Compute the masked index: (hash + i + i * i) & mask.
4289     if (i > 0) {
4290       addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4291     }
4292     andp(r2, r1);
4293 
4294     // Scale the index by multiplying by the entry size.
4295     DCHECK(SeededNumberDictionary::kEntrySize == 3);
4296     leap(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
4297 
4298     // Check if the key matches.
4299     cmpp(key, FieldOperand(elements,
4300                            r2,
4301                            times_pointer_size,
4302                            SeededNumberDictionary::kElementsStartOffset));
4303     if (i != (kNumberDictionaryProbes - 1)) {
4304       j(equal, &done);
4305     } else {
4306       j(not_equal, miss);
4307     }
4308   }
4309 
4310   bind(&done);
4311   // Check that the value is a normal propety.
4312   const int kDetailsOffset =
4313       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4314   DCHECK_EQ(NORMAL, 0);
4315   Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4316        Smi::FromInt(PropertyDetails::TypeField::kMask));
4317   j(not_zero, miss);
4318 
4319   // Get the value at the masked, scaled index.
4320   const int kValueOffset =
4321       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4322   movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4323 }
4324 
4325 
LoadAllocationTopHelper(Register result,Register scratch,AllocationFlags flags)4326 void MacroAssembler::LoadAllocationTopHelper(Register result,
4327                                              Register scratch,
4328                                              AllocationFlags flags) {
4329   ExternalReference allocation_top =
4330       AllocationUtils::GetAllocationTopReference(isolate(), flags);
4331 
4332   // Just return if allocation top is already known.
4333   if ((flags & RESULT_CONTAINS_TOP) != 0) {
4334     // No use of scratch if allocation top is provided.
4335     DCHECK(!scratch.is_valid());
4336 #ifdef DEBUG
4337     // Assert that result actually contains top on entry.
4338     Operand top_operand = ExternalOperand(allocation_top);
4339     cmpp(result, top_operand);
4340     Check(equal, kUnexpectedAllocationTop);
4341 #endif
4342     return;
4343   }
4344 
4345   // Move address of new object to result. Use scratch register if available,
4346   // and keep address in scratch until call to UpdateAllocationTopHelper.
4347   if (scratch.is_valid()) {
4348     LoadAddress(scratch, allocation_top);
4349     movp(result, Operand(scratch, 0));
4350   } else {
4351     Load(result, allocation_top);
4352   }
4353 }
4354 
4355 
MakeSureDoubleAlignedHelper(Register result,Register scratch,Label * gc_required,AllocationFlags flags)4356 void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
4357                                                  Register scratch,
4358                                                  Label* gc_required,
4359                                                  AllocationFlags flags) {
4360   if (kPointerSize == kDoubleSize) {
4361     if (FLAG_debug_code) {
4362       testl(result, Immediate(kDoubleAlignmentMask));
4363       Check(zero, kAllocationIsNotDoubleAligned);
4364     }
4365   } else {
4366     // Align the next allocation. Storing the filler map without checking top
4367     // is safe in new-space because the limit of the heap is aligned there.
4368     DCHECK(kPointerSize * 2 == kDoubleSize);
4369     DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
4370     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4371     // Make sure scratch is not clobbered by this function as it might be
4372     // used in UpdateAllocationTopHelper later.
4373     DCHECK(!scratch.is(kScratchRegister));
4374     Label aligned;
4375     testl(result, Immediate(kDoubleAlignmentMask));
4376     j(zero, &aligned, Label::kNear);
4377     if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
4378       ExternalReference allocation_limit =
4379           AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4380       cmpp(result, ExternalOperand(allocation_limit));
4381       j(above_equal, gc_required);
4382     }
4383     LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
4384     movp(Operand(result, 0), kScratchRegister);
4385     addp(result, Immediate(kDoubleSize / 2));
4386     bind(&aligned);
4387   }
4388 }
4389 
4390 
UpdateAllocationTopHelper(Register result_end,Register scratch,AllocationFlags flags)4391 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4392                                                Register scratch,
4393                                                AllocationFlags flags) {
4394   if (emit_debug_code()) {
4395     testp(result_end, Immediate(kObjectAlignmentMask));
4396     Check(zero, kUnalignedAllocationInNewSpace);
4397   }
4398 
4399   ExternalReference allocation_top =
4400       AllocationUtils::GetAllocationTopReference(isolate(), flags);
4401 
4402   // Update new top.
4403   if (scratch.is_valid()) {
4404     // Scratch already contains address of allocation top.
4405     movp(Operand(scratch, 0), result_end);
4406   } else {
4407     Store(allocation_top, result_end);
4408   }
4409 }
4410 
4411 
Allocate(int object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4412 void MacroAssembler::Allocate(int object_size,
4413                               Register result,
4414                               Register result_end,
4415                               Register scratch,
4416                               Label* gc_required,
4417                               AllocationFlags flags) {
4418   DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4419   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4420   if (!FLAG_inline_new) {
4421     if (emit_debug_code()) {
4422       // Trash the registers to simulate an allocation failure.
4423       movl(result, Immediate(0x7091));
4424       if (result_end.is_valid()) {
4425         movl(result_end, Immediate(0x7191));
4426       }
4427       if (scratch.is_valid()) {
4428         movl(scratch, Immediate(0x7291));
4429       }
4430     }
4431     jmp(gc_required);
4432     return;
4433   }
4434   DCHECK(!result.is(result_end));
4435 
4436   // Load address of new object into result.
4437   LoadAllocationTopHelper(result, scratch, flags);
4438 
4439   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4440     MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4441   }
4442 
4443   // Calculate new top and bail out if new space is exhausted.
4444   ExternalReference allocation_limit =
4445       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4446 
4447   Register top_reg = result_end.is_valid() ? result_end : result;
4448 
4449   if (!top_reg.is(result)) {
4450     movp(top_reg, result);
4451   }
4452   addp(top_reg, Immediate(object_size));
4453   j(carry, gc_required);
4454   Operand limit_operand = ExternalOperand(allocation_limit);
4455   cmpp(top_reg, limit_operand);
4456   j(above, gc_required);
4457 
4458   // Update allocation top.
4459   UpdateAllocationTopHelper(top_reg, scratch, flags);
4460 
4461   bool tag_result = (flags & TAG_OBJECT) != 0;
4462   if (top_reg.is(result)) {
4463     if (tag_result) {
4464       subp(result, Immediate(object_size - kHeapObjectTag));
4465     } else {
4466       subp(result, Immediate(object_size));
4467     }
4468   } else if (tag_result) {
4469     // Tag the result if requested.
4470     DCHECK(kHeapObjectTag == 1);
4471     incp(result);
4472   }
4473 }
4474 
4475 
Allocate(int header_size,ScaleFactor element_size,Register element_count,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4476 void MacroAssembler::Allocate(int header_size,
4477                               ScaleFactor element_size,
4478                               Register element_count,
4479                               Register result,
4480                               Register result_end,
4481                               Register scratch,
4482                               Label* gc_required,
4483                               AllocationFlags flags) {
4484   DCHECK((flags & SIZE_IN_WORDS) == 0);
4485   leap(result_end, Operand(element_count, element_size, header_size));
4486   Allocate(result_end, result, result_end, scratch, gc_required, flags);
4487 }
4488 
4489 
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4490 void MacroAssembler::Allocate(Register object_size,
4491                               Register result,
4492                               Register result_end,
4493                               Register scratch,
4494                               Label* gc_required,
4495                               AllocationFlags flags) {
4496   DCHECK((flags & SIZE_IN_WORDS) == 0);
4497   if (!FLAG_inline_new) {
4498     if (emit_debug_code()) {
4499       // Trash the registers to simulate an allocation failure.
4500       movl(result, Immediate(0x7091));
4501       movl(result_end, Immediate(0x7191));
4502       if (scratch.is_valid()) {
4503         movl(scratch, Immediate(0x7291));
4504       }
4505       // object_size is left unchanged by this function.
4506     }
4507     jmp(gc_required);
4508     return;
4509   }
4510   DCHECK(!result.is(result_end));
4511 
4512   // Load address of new object into result.
4513   LoadAllocationTopHelper(result, scratch, flags);
4514 
4515   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4516     MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4517   }
4518 
4519   // Calculate new top and bail out if new space is exhausted.
4520   ExternalReference allocation_limit =
4521       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4522   if (!object_size.is(result_end)) {
4523     movp(result_end, object_size);
4524   }
4525   addp(result_end, result);
4526   j(carry, gc_required);
4527   Operand limit_operand = ExternalOperand(allocation_limit);
4528   cmpp(result_end, limit_operand);
4529   j(above, gc_required);
4530 
4531   // Update allocation top.
4532   UpdateAllocationTopHelper(result_end, scratch, flags);
4533 
4534   // Tag the result if requested.
4535   if ((flags & TAG_OBJECT) != 0) {
4536     addp(result, Immediate(kHeapObjectTag));
4537   }
4538 }
4539 
4540 
UndoAllocationInNewSpace(Register object)4541 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
4542   ExternalReference new_space_allocation_top =
4543       ExternalReference::new_space_allocation_top_address(isolate());
4544 
4545   // Make sure the object has no tag before resetting top.
4546   andp(object, Immediate(~kHeapObjectTagMask));
4547   Operand top_operand = ExternalOperand(new_space_allocation_top);
4548 #ifdef DEBUG
4549   cmpp(object, top_operand);
4550   Check(below, kUndoAllocationOfNonAllocatedMemory);
4551 #endif
4552   movp(top_operand, object);
4553 }
4554 
4555 
AllocateHeapNumber(Register result,Register scratch,Label * gc_required,MutableMode mode)4556 void MacroAssembler::AllocateHeapNumber(Register result,
4557                                         Register scratch,
4558                                         Label* gc_required,
4559                                         MutableMode mode) {
4560   // Allocate heap number in new space.
4561   Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4562 
4563   Heap::RootListIndex map_index = mode == MUTABLE
4564       ? Heap::kMutableHeapNumberMapRootIndex
4565       : Heap::kHeapNumberMapRootIndex;
4566 
4567   // Set the map.
4568   LoadRoot(kScratchRegister, map_index);
4569   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4570 }
4571 
4572 
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4573 void MacroAssembler::AllocateTwoByteString(Register result,
4574                                            Register length,
4575                                            Register scratch1,
4576                                            Register scratch2,
4577                                            Register scratch3,
4578                                            Label* gc_required) {
4579   // Calculate the number of bytes needed for the characters in the string while
4580   // observing object alignment.
4581   const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4582                                kObjectAlignmentMask;
4583   DCHECK(kShortSize == 2);
4584   // scratch1 = length * 2 + kObjectAlignmentMask.
4585   leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4586                 kHeaderAlignment));
4587   andp(scratch1, Immediate(~kObjectAlignmentMask));
4588   if (kHeaderAlignment > 0) {
4589     subp(scratch1, Immediate(kHeaderAlignment));
4590   }
4591 
4592   // Allocate two byte string in new space.
4593   Allocate(SeqTwoByteString::kHeaderSize,
4594            times_1,
4595            scratch1,
4596            result,
4597            scratch2,
4598            scratch3,
4599            gc_required,
4600            TAG_OBJECT);
4601 
4602   // Set the map, length and hash field.
4603   LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4604   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4605   Integer32ToSmi(scratch1, length);
4606   movp(FieldOperand(result, String::kLengthOffset), scratch1);
4607   movp(FieldOperand(result, String::kHashFieldOffset),
4608        Immediate(String::kEmptyHashField));
4609 }
4610 
4611 
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4612 void MacroAssembler::AllocateOneByteString(Register result, Register length,
4613                                            Register scratch1, Register scratch2,
4614                                            Register scratch3,
4615                                            Label* gc_required) {
4616   // Calculate the number of bytes needed for the characters in the string while
4617   // observing object alignment.
4618   const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4619                                kObjectAlignmentMask;
4620   movl(scratch1, length);
4621   DCHECK(kCharSize == 1);
4622   addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4623   andp(scratch1, Immediate(~kObjectAlignmentMask));
4624   if (kHeaderAlignment > 0) {
4625     subp(scratch1, Immediate(kHeaderAlignment));
4626   }
4627 
4628   // Allocate one-byte string in new space.
4629   Allocate(SeqOneByteString::kHeaderSize,
4630            times_1,
4631            scratch1,
4632            result,
4633            scratch2,
4634            scratch3,
4635            gc_required,
4636            TAG_OBJECT);
4637 
4638   // Set the map, length and hash field.
4639   LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
4640   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4641   Integer32ToSmi(scratch1, length);
4642   movp(FieldOperand(result, String::kLengthOffset), scratch1);
4643   movp(FieldOperand(result, String::kHashFieldOffset),
4644        Immediate(String::kEmptyHashField));
4645 }
4646 
4647 
AllocateTwoByteConsString(Register result,Register scratch1,Register scratch2,Label * gc_required)4648 void MacroAssembler::AllocateTwoByteConsString(Register result,
4649                                         Register scratch1,
4650                                         Register scratch2,
4651                                         Label* gc_required) {
4652   // Allocate heap number in new space.
4653   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4654            TAG_OBJECT);
4655 
4656   // Set the map. The other fields are left uninitialized.
4657   LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4658   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4659 }
4660 
4661 
AllocateOneByteConsString(Register result,Register scratch1,Register scratch2,Label * gc_required)4662 void MacroAssembler::AllocateOneByteConsString(Register result,
4663                                                Register scratch1,
4664                                                Register scratch2,
4665                                                Label* gc_required) {
4666   Allocate(ConsString::kSize,
4667            result,
4668            scratch1,
4669            scratch2,
4670            gc_required,
4671            TAG_OBJECT);
4672 
4673   // Set the map. The other fields are left uninitialized.
4674   LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
4675   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4676 }
4677 
4678 
AllocateTwoByteSlicedString(Register result,Register scratch1,Register scratch2,Label * gc_required)4679 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4680                                           Register scratch1,
4681                                           Register scratch2,
4682                                           Label* gc_required) {
4683   // Allocate heap number in new space.
4684   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4685            TAG_OBJECT);
4686 
4687   // Set the map. The other fields are left uninitialized.
4688   LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4689   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4690 }
4691 
4692 
AllocateOneByteSlicedString(Register result,Register scratch1,Register scratch2,Label * gc_required)4693 void MacroAssembler::AllocateOneByteSlicedString(Register result,
4694                                                  Register scratch1,
4695                                                  Register scratch2,
4696                                                  Label* gc_required) {
4697   // Allocate heap number in new space.
4698   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4699            TAG_OBJECT);
4700 
4701   // Set the map. The other fields are left uninitialized.
4702   LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
4703   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4704 }
4705 
4706 
4707 // Copy memory, byte-by-byte, from source to destination.  Not optimized for
4708 // long or aligned copies.  The contents of scratch and length are destroyed.
4709 // Destination is incremented by length, source, length and scratch are
4710 // clobbered.
4711 // A simpler loop is faster on small copies, but slower on large ones.
4712 // The cld() instruction must have been emitted, to set the direction flag(),
4713 // before calling this function.
CopyBytes(Register destination,Register source,Register length,int min_length,Register scratch)4714 void MacroAssembler::CopyBytes(Register destination,
4715                                Register source,
4716                                Register length,
4717                                int min_length,
4718                                Register scratch) {
4719   DCHECK(min_length >= 0);
4720   if (emit_debug_code()) {
4721     cmpl(length, Immediate(min_length));
4722     Assert(greater_equal, kInvalidMinLength);
4723   }
4724   Label short_loop, len8, len16, len24, done, short_string;
4725 
4726   const int kLongStringLimit = 4 * kPointerSize;
4727   if (min_length <= kLongStringLimit) {
4728     cmpl(length, Immediate(kPointerSize));
4729     j(below, &short_string, Label::kNear);
4730   }
4731 
4732   DCHECK(source.is(rsi));
4733   DCHECK(destination.is(rdi));
4734   DCHECK(length.is(rcx));
4735 
4736   if (min_length <= kLongStringLimit) {
4737     cmpl(length, Immediate(2 * kPointerSize));
4738     j(below_equal, &len8, Label::kNear);
4739     cmpl(length, Immediate(3 * kPointerSize));
4740     j(below_equal, &len16, Label::kNear);
4741     cmpl(length, Immediate(4 * kPointerSize));
4742     j(below_equal, &len24, Label::kNear);
4743   }
4744 
4745   // Because source is 8-byte aligned in our uses of this function,
4746   // we keep source aligned for the rep movs operation by copying the odd bytes
4747   // at the end of the ranges.
4748   movp(scratch, length);
4749   shrl(length, Immediate(kPointerSizeLog2));
4750   repmovsp();
4751   // Move remaining bytes of length.
4752   andl(scratch, Immediate(kPointerSize - 1));
4753   movp(length, Operand(source, scratch, times_1, -kPointerSize));
4754   movp(Operand(destination, scratch, times_1, -kPointerSize), length);
4755   addp(destination, scratch);
4756 
4757   if (min_length <= kLongStringLimit) {
4758     jmp(&done, Label::kNear);
4759     bind(&len24);
4760     movp(scratch, Operand(source, 2 * kPointerSize));
4761     movp(Operand(destination, 2 * kPointerSize), scratch);
4762     bind(&len16);
4763     movp(scratch, Operand(source, kPointerSize));
4764     movp(Operand(destination, kPointerSize), scratch);
4765     bind(&len8);
4766     movp(scratch, Operand(source, 0));
4767     movp(Operand(destination, 0), scratch);
4768     // Move remaining bytes of length.
4769     movp(scratch, Operand(source, length, times_1, -kPointerSize));
4770     movp(Operand(destination, length, times_1, -kPointerSize), scratch);
4771     addp(destination, length);
4772     jmp(&done, Label::kNear);
4773 
4774     bind(&short_string);
4775     if (min_length == 0) {
4776       testl(length, length);
4777       j(zero, &done, Label::kNear);
4778     }
4779 
4780     bind(&short_loop);
4781     movb(scratch, Operand(source, 0));
4782     movb(Operand(destination, 0), scratch);
4783     incp(source);
4784     incp(destination);
4785     decl(length);
4786     j(not_zero, &short_loop);
4787   }
4788 
4789   bind(&done);
4790 }
4791 
4792 
InitializeFieldsWithFiller(Register start_offset,Register end_offset,Register filler)4793 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4794                                                 Register end_offset,
4795                                                 Register filler) {
4796   Label loop, entry;
4797   jmp(&entry);
4798   bind(&loop);
4799   movp(Operand(start_offset, 0), filler);
4800   addp(start_offset, Immediate(kPointerSize));
4801   bind(&entry);
4802   cmpp(start_offset, end_offset);
4803   j(less, &loop);
4804 }
4805 
4806 
LoadContext(Register dst,int context_chain_length)4807 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4808   if (context_chain_length > 0) {
4809     // Move up the chain of contexts to the context containing the slot.
4810     movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4811     for (int i = 1; i < context_chain_length; i++) {
4812       movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4813     }
4814   } else {
4815     // Slot is in the current function context.  Move it into the
4816     // destination register in case we store into it (the write barrier
4817     // cannot be allowed to destroy the context in rsi).
4818     movp(dst, rsi);
4819   }
4820 
4821   // We should not have found a with context by walking the context
4822   // chain (i.e., the static scope chain and runtime context chain do
4823   // not agree).  A variable occurring in such a scope should have
4824   // slot type LOOKUP and not CONTEXT.
4825   if (emit_debug_code()) {
4826     CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4827                 Heap::kWithContextMapRootIndex);
4828     Check(not_equal, kVariableResolvedToWithContext);
4829   }
4830 }
4831 
4832 
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)4833 void MacroAssembler::LoadTransitionedArrayMapConditional(
4834     ElementsKind expected_kind,
4835     ElementsKind transitioned_kind,
4836     Register map_in_out,
4837     Register scratch,
4838     Label* no_map_match) {
4839   // Load the global or builtins object from the current context.
4840   movp(scratch,
4841        Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4842   movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4843 
4844   // Check that the function's map is the same as the expected cached map.
4845   movp(scratch, Operand(scratch,
4846                         Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4847 
4848   int offset = expected_kind * kPointerSize +
4849       FixedArrayBase::kHeaderSize;
4850   cmpp(map_in_out, FieldOperand(scratch, offset));
4851   j(not_equal, no_map_match);
4852 
4853   // Use the transitioned cached map.
4854   offset = transitioned_kind * kPointerSize +
4855       FixedArrayBase::kHeaderSize;
4856   movp(map_in_out, FieldOperand(scratch, offset));
4857 }
4858 
4859 
4860 #ifdef _WIN64
4861 static const int kRegisterPassedArguments = 4;
4862 #else
4863 static const int kRegisterPassedArguments = 6;
4864 #endif
4865 
LoadGlobalFunction(int index,Register function)4866 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4867   // Load the global or builtins object from the current context.
4868   movp(function,
4869        Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4870   // Load the native context from the global or builtins object.
4871   movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
4872   // Load the function from the native context.
4873   movp(function, Operand(function, Context::SlotOffset(index)));
4874 }
4875 
4876 
LoadGlobalFunctionInitialMap(Register function,Register map)4877 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4878                                                   Register map) {
4879   // Load the initial map.  The global functions all have initial maps.
4880   movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4881   if (emit_debug_code()) {
4882     Label ok, fail;
4883     CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4884     jmp(&ok);
4885     bind(&fail);
4886     Abort(kGlobalFunctionsMustHaveInitialMap);
4887     bind(&ok);
4888   }
4889 }
4890 
4891 
ArgumentStackSlotsForCFunctionCall(int num_arguments)4892 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4893   // On Windows 64 stack slots are reserved by the caller for all arguments
4894   // including the ones passed in registers, and space is always allocated for
4895   // the four register arguments even if the function takes fewer than four
4896   // arguments.
4897   // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4898   // and the caller does not reserve stack slots for them.
4899   DCHECK(num_arguments >= 0);
4900 #ifdef _WIN64
4901   const int kMinimumStackSlots = kRegisterPassedArguments;
4902   if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4903   return num_arguments;
4904 #else
4905   if (num_arguments < kRegisterPassedArguments) return 0;
4906   return num_arguments - kRegisterPassedArguments;
4907 #endif
4908 }
4909 
4910 
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)4911 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
4912                                                Register index,
4913                                                Register value,
4914                                                uint32_t encoding_mask) {
4915   Label is_object;
4916   JumpIfNotSmi(string, &is_object);
4917   Abort(kNonObject);
4918   bind(&is_object);
4919 
4920   Push(value);
4921   movp(value, FieldOperand(string, HeapObject::kMapOffset));
4922   movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
4923 
4924   andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
4925   cmpp(value, Immediate(encoding_mask));
4926   Pop(value);
4927   Check(equal, kUnexpectedStringType);
4928 
4929   // The index is assumed to be untagged coming in, tag it to compare with the
4930   // string length without using a temp register, it is restored at the end of
4931   // this function.
4932   Integer32ToSmi(index, index);
4933   SmiCompare(index, FieldOperand(string, String::kLengthOffset));
4934   Check(less, kIndexIsTooLarge);
4935 
4936   SmiCompare(index, Smi::FromInt(0));
4937   Check(greater_equal, kIndexIsNegative);
4938 
4939   // Restore the index
4940   SmiToInteger32(index, index);
4941 }
4942 
4943 
PrepareCallCFunction(int num_arguments)4944 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
4945   int frame_alignment = base::OS::ActivationFrameAlignment();
4946   DCHECK(frame_alignment != 0);
4947   DCHECK(num_arguments >= 0);
4948 
4949   // Make stack end at alignment and allocate space for arguments and old rsp.
4950   movp(kScratchRegister, rsp);
4951   DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4952   int argument_slots_on_stack =
4953       ArgumentStackSlotsForCFunctionCall(num_arguments);
4954   subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
4955   andp(rsp, Immediate(-frame_alignment));
4956   movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
4957 }
4958 
4959 
CallCFunction(ExternalReference function,int num_arguments)4960 void MacroAssembler::CallCFunction(ExternalReference function,
4961                                    int num_arguments) {
4962   LoadAddress(rax, function);
4963   CallCFunction(rax, num_arguments);
4964 }
4965 
4966 
CallCFunction(Register function,int num_arguments)4967 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
4968   DCHECK(has_frame());
4969   // Check stack alignment.
4970   if (emit_debug_code()) {
4971     CheckStackAlignment();
4972   }
4973 
4974   call(function);
4975   DCHECK(base::OS::ActivationFrameAlignment() != 0);
4976   DCHECK(num_arguments >= 0);
4977   int argument_slots_on_stack =
4978       ArgumentStackSlotsForCFunctionCall(num_arguments);
4979   movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
4980 }
4981 
4982 
4983 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8)4984 bool AreAliased(Register reg1,
4985                 Register reg2,
4986                 Register reg3,
4987                 Register reg4,
4988                 Register reg5,
4989                 Register reg6,
4990                 Register reg7,
4991                 Register reg8) {
4992   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
4993       reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
4994       reg7.is_valid() + reg8.is_valid();
4995 
4996   RegList regs = 0;
4997   if (reg1.is_valid()) regs |= reg1.bit();
4998   if (reg2.is_valid()) regs |= reg2.bit();
4999   if (reg3.is_valid()) regs |= reg3.bit();
5000   if (reg4.is_valid()) regs |= reg4.bit();
5001   if (reg5.is_valid()) regs |= reg5.bit();
5002   if (reg6.is_valid()) regs |= reg6.bit();
5003   if (reg7.is_valid()) regs |= reg7.bit();
5004   if (reg8.is_valid()) regs |= reg8.bit();
5005   int n_of_non_aliasing_regs = NumRegs(regs);
5006 
5007   return n_of_valid_regs != n_of_non_aliasing_regs;
5008 }
5009 #endif
5010 
5011 
CodePatcher(byte * address,int size)5012 CodePatcher::CodePatcher(byte* address, int size)
5013     : address_(address),
5014       size_(size),
5015       masm_(NULL, address, size + Assembler::kGap) {
5016   // Create a new macro assembler pointing to the address of the code to patch.
5017   // The size is adjusted with kGap on order for the assembler to generate size
5018   // bytes of instructions without failing with buffer size constraints.
5019   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5020 }
5021 
5022 
~CodePatcher()5023 CodePatcher::~CodePatcher() {
5024   // Indicate that code has changed.
5025   CpuFeatures::FlushICache(address_, size_);
5026 
5027   // Check that the code was patched as expected.
5028   DCHECK(masm_.pc_ == address_ + size_);
5029   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5030 }
5031 
5032 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met,Label::Distance condition_met_distance)5033 void MacroAssembler::CheckPageFlag(
5034     Register object,
5035     Register scratch,
5036     int mask,
5037     Condition cc,
5038     Label* condition_met,
5039     Label::Distance condition_met_distance) {
5040   DCHECK(cc == zero || cc == not_zero);
5041   if (scratch.is(object)) {
5042     andp(scratch, Immediate(~Page::kPageAlignmentMask));
5043   } else {
5044     movp(scratch, Immediate(~Page::kPageAlignmentMask));
5045     andp(scratch, object);
5046   }
5047   if (mask < (1 << kBitsPerByte)) {
5048     testb(Operand(scratch, MemoryChunk::kFlagsOffset),
5049           Immediate(static_cast<uint8_t>(mask)));
5050   } else {
5051     testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
5052   }
5053   j(cc, condition_met, condition_met_distance);
5054 }
5055 
5056 
CheckMapDeprecated(Handle<Map> map,Register scratch,Label * if_deprecated)5057 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5058                                         Register scratch,
5059                                         Label* if_deprecated) {
5060   if (map->CanBeDeprecated()) {
5061     Move(scratch, map);
5062     movl(scratch, FieldOperand(scratch, Map::kBitField3Offset));
5063     andl(scratch, Immediate(Map::Deprecated::kMask));
5064     j(not_zero, if_deprecated);
5065   }
5066 }
5067 
5068 
JumpIfBlack(Register object,Register bitmap_scratch,Register mask_scratch,Label * on_black,Label::Distance on_black_distance)5069 void MacroAssembler::JumpIfBlack(Register object,
5070                                  Register bitmap_scratch,
5071                                  Register mask_scratch,
5072                                  Label* on_black,
5073                                  Label::Distance on_black_distance) {
5074   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
5075   GetMarkBits(object, bitmap_scratch, mask_scratch);
5076 
5077   DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5078   // The mask_scratch register contains a 1 at the position of the first bit
5079   // and a 0 at all other positions, including the position of the second bit.
5080   movp(rcx, mask_scratch);
5081   // Make rcx into a mask that covers both marking bits using the operation
5082   // rcx = mask | (mask << 1).
5083   leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
5084   // Note that we are using a 4-byte aligned 8-byte load.
5085   andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
5086   cmpp(mask_scratch, rcx);
5087   j(equal, on_black, on_black_distance);
5088 }
5089 
5090 
5091 // Detect some, but not all, common pointer-free objects.  This is used by the
5092 // incremental write barrier which doesn't care about oddballs (they are always
5093 // marked black immediately so this code is not hit).
JumpIfDataObject(Register value,Register scratch,Label * not_data_object,Label::Distance not_data_object_distance)5094 void MacroAssembler::JumpIfDataObject(
5095     Register value,
5096     Register scratch,
5097     Label* not_data_object,
5098     Label::Distance not_data_object_distance) {
5099   Label is_data_object;
5100   movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
5101   CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5102   j(equal, &is_data_object, Label::kNear);
5103   DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5104   DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5105   // If it's a string and it's not a cons string then it's an object containing
5106   // no GC pointers.
5107   testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
5108         Immediate(kIsIndirectStringMask | kIsNotStringMask));
5109   j(not_zero, not_data_object, not_data_object_distance);
5110   bind(&is_data_object);
5111 }
5112 
5113 
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)5114 void MacroAssembler::GetMarkBits(Register addr_reg,
5115                                  Register bitmap_reg,
5116                                  Register mask_reg) {
5117   DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
5118   movp(bitmap_reg, addr_reg);
5119   // Sign extended 32 bit immediate.
5120   andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
5121   movp(rcx, addr_reg);
5122   int shift =
5123       Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
5124   shrl(rcx, Immediate(shift));
5125   andp(rcx,
5126        Immediate((Page::kPageAlignmentMask >> shift) &
5127                  ~(Bitmap::kBytesPerCell - 1)));
5128 
5129   addp(bitmap_reg, rcx);
5130   movp(rcx, addr_reg);
5131   shrl(rcx, Immediate(kPointerSizeLog2));
5132   andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
5133   movl(mask_reg, Immediate(1));
5134   shlp_cl(mask_reg);
5135 }
5136 
5137 
EnsureNotWhite(Register value,Register bitmap_scratch,Register mask_scratch,Label * value_is_white_and_not_data,Label::Distance distance)5138 void MacroAssembler::EnsureNotWhite(
5139     Register value,
5140     Register bitmap_scratch,
5141     Register mask_scratch,
5142     Label* value_is_white_and_not_data,
5143     Label::Distance distance) {
5144   DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
5145   GetMarkBits(value, bitmap_scratch, mask_scratch);
5146 
5147   // If the value is black or grey we don't need to do anything.
5148   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5149   DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5150   DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5151   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5152 
5153   Label done;
5154 
5155   // Since both black and grey have a 1 in the first position and white does
5156   // not have a 1 there we only need to check one bit.
5157   testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5158   j(not_zero, &done, Label::kNear);
5159 
5160   if (emit_debug_code()) {
5161     // Check for impossible bit pattern.
5162     Label ok;
5163     Push(mask_scratch);
5164     // shl.  May overflow making the check conservative.
5165     addp(mask_scratch, mask_scratch);
5166     testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5167     j(zero, &ok, Label::kNear);
5168     int3();
5169     bind(&ok);
5170     Pop(mask_scratch);
5171   }
5172 
5173   // Value is white.  We check whether it is data that doesn't need scanning.
5174   // Currently only checks for HeapNumber and non-cons strings.
5175   Register map = rcx;  // Holds map while checking type.
5176   Register length = rcx;  // Holds length of object after checking type.
5177   Label not_heap_number;
5178   Label is_data_object;
5179 
5180   // Check for heap-number
5181   movp(map, FieldOperand(value, HeapObject::kMapOffset));
5182   CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5183   j(not_equal, &not_heap_number, Label::kNear);
5184   movp(length, Immediate(HeapNumber::kSize));
5185   jmp(&is_data_object, Label::kNear);
5186 
5187   bind(&not_heap_number);
5188   // Check for strings.
5189   DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5190   DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5191   // If it's a string and it's not a cons string then it's an object containing
5192   // no GC pointers.
5193   Register instance_type = rcx;
5194   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
5195   testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
5196   j(not_zero, value_is_white_and_not_data);
5197   // It's a non-indirect (non-cons and non-slice) string.
5198   // If it's external, the length is just ExternalString::kSize.
5199   // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5200   Label not_external;
5201   // External strings are the only ones with the kExternalStringTag bit
5202   // set.
5203   DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5204   DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5205   testb(instance_type, Immediate(kExternalStringTag));
5206   j(zero, &not_external, Label::kNear);
5207   movp(length, Immediate(ExternalString::kSize));
5208   jmp(&is_data_object, Label::kNear);
5209 
5210   bind(&not_external);
5211   // Sequential string, either Latin1 or UC16.
5212   DCHECK(kOneByteStringTag == 0x04);
5213   andp(length, Immediate(kStringEncodingMask));
5214   xorp(length, Immediate(kStringEncodingMask));
5215   addp(length, Immediate(0x04));
5216   // Value now either 4 (if Latin1) or 8 (if UC16), i.e. char-size shifted by 2.
5217   imulp(length, FieldOperand(value, String::kLengthOffset));
5218   shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
5219   addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
5220   andp(length, Immediate(~kObjectAlignmentMask));
5221 
5222   bind(&is_data_object);
5223   // Value is a data object, and it is white.  Mark it black.  Since we know
5224   // that the object is white we can make it black by flipping one bit.
5225   orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5226 
5227   andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
5228   addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
5229 
5230   bind(&done);
5231 }
5232 
5233 
CheckEnumCache(Register null_value,Label * call_runtime)5234 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5235   Label next, start;
5236   Register empty_fixed_array_value = r8;
5237   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5238   movp(rcx, rax);
5239 
5240   // Check if the enum length field is properly initialized, indicating that
5241   // there is an enum cache.
5242   movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5243 
5244   EnumLength(rdx, rbx);
5245   Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
5246   j(equal, call_runtime);
5247 
5248   jmp(&start);
5249 
5250   bind(&next);
5251 
5252   movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5253 
5254   // For all objects but the receiver, check that the cache is empty.
5255   EnumLength(rdx, rbx);
5256   Cmp(rdx, Smi::FromInt(0));
5257   j(not_equal, call_runtime);
5258 
5259   bind(&start);
5260 
5261   // Check that there are no elements. Register rcx contains the current JS
5262   // object we've reached through the prototype chain.
5263   Label no_elements;
5264   cmpp(empty_fixed_array_value,
5265        FieldOperand(rcx, JSObject::kElementsOffset));
5266   j(equal, &no_elements);
5267 
5268   // Second chance, the object may be using the empty slow element dictionary.
5269   LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
5270   cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
5271   j(not_equal, call_runtime);
5272 
5273   bind(&no_elements);
5274   movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5275   cmpp(rcx, null_value);
5276   j(not_equal, &next);
5277 }
5278 
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)5279 void MacroAssembler::TestJSArrayForAllocationMemento(
5280     Register receiver_reg,
5281     Register scratch_reg,
5282     Label* no_memento_found) {
5283   ExternalReference new_space_start =
5284       ExternalReference::new_space_start(isolate());
5285   ExternalReference new_space_allocation_top =
5286       ExternalReference::new_space_allocation_top_address(isolate());
5287 
5288   leap(scratch_reg, Operand(receiver_reg,
5289       JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5290   Move(kScratchRegister, new_space_start);
5291   cmpp(scratch_reg, kScratchRegister);
5292   j(less, no_memento_found);
5293   cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5294   j(greater, no_memento_found);
5295   CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
5296               Heap::kAllocationMementoMapRootIndex);
5297 }
5298 
5299 
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)5300 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5301     Register object,
5302     Register scratch0,
5303     Register scratch1,
5304     Label* found) {
5305   DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5306   DCHECK(!scratch1.is(scratch0));
5307   Register current = scratch0;
5308   Label loop_again;
5309 
5310   movp(current, object);
5311 
5312   // Loop based on the map going up the prototype chain.
5313   bind(&loop_again);
5314   movp(current, FieldOperand(current, HeapObject::kMapOffset));
5315   movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5316   DecodeField<Map::ElementsKindBits>(scratch1);
5317   cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5318   j(equal, found);
5319   movp(current, FieldOperand(current, Map::kPrototypeOffset));
5320   CompareRoot(current, Heap::kNullValueRootIndex);
5321   j(not_equal, &loop_again);
5322 }
5323 
5324 
TruncatingDiv(Register dividend,int32_t divisor)5325 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5326   DCHECK(!dividend.is(rax));
5327   DCHECK(!dividend.is(rdx));
5328   base::MagicNumbersForDivision<uint32_t> mag =
5329       base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5330   movl(rax, Immediate(mag.multiplier));
5331   imull(dividend);
5332   bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5333   if (divisor > 0 && neg) addl(rdx, dividend);
5334   if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
5335   if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
5336   movl(rax, dividend);
5337   shrl(rax, Immediate(31));
5338   addl(rdx, rax);
5339 }
5340 
5341 
5342 } }  // namespace v8::internal
5343 
5344 #endif  // V8_TARGET_ARCH_X64
5345