• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/x64/codegen-x64.h"
6 
7 #if V8_TARGET_ARCH_X64
8 
9 #include "src/codegen.h"
10 #include "src/macro-assembler.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 // -------------------------------------------------------------------------
16 // Platform-specific RuntimeCallHelper functions.
17 
BeforeCall(MacroAssembler * masm) const18 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
19   masm->EnterFrame(StackFrame::INTERNAL);
20   DCHECK(!masm->has_frame());
21   masm->set_has_frame(true);
22 }
23 
24 
AfterCall(MacroAssembler * masm) const25 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
26   masm->LeaveFrame(StackFrame::INTERNAL);
27   DCHECK(masm->has_frame());
28   masm->set_has_frame(false);
29 }
30 
31 
32 #define __ masm.
33 
34 
CreateExpFunction(Isolate * isolate)35 UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
36   size_t actual_size;
37   byte* buffer =
38       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
39   if (buffer == nullptr) return nullptr;
40   ExternalReference::InitializeMathExpData();
41 
42   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
43                       CodeObjectRequired::kNo);
44   // xmm0: raw double input.
45   XMMRegister input = xmm0;
46   XMMRegister result = xmm1;
47   __ pushq(rax);
48   __ pushq(rbx);
49 
50   MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
51 
52   __ popq(rbx);
53   __ popq(rax);
54   __ Movsd(xmm0, result);
55   __ Ret();
56 
57   CodeDesc desc;
58   masm.GetCode(&desc);
59   DCHECK(!RelocInfo::RequiresRelocation(desc));
60 
61   Assembler::FlushICache(isolate, buffer, actual_size);
62   base::OS::ProtectCode(buffer, actual_size);
63   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
64 }
65 
66 
CreateSqrtFunction(Isolate * isolate)67 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
68   size_t actual_size;
69   // Allocate buffer in executable space.
70   byte* buffer =
71       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
72   if (buffer == nullptr) return nullptr;
73 
74   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
75                       CodeObjectRequired::kNo);
76   // xmm0: raw double input.
77   // Move double input into registers.
78   __ Sqrtsd(xmm0, xmm0);
79   __ Ret();
80 
81   CodeDesc desc;
82   masm.GetCode(&desc);
83   DCHECK(!RelocInfo::RequiresRelocation(desc));
84 
85   Assembler::FlushICache(isolate, buffer, actual_size);
86   base::OS::ProtectCode(buffer, actual_size);
87   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
88 }
89 
90 #undef __
91 
92 // -------------------------------------------------------------------------
93 // Code generators
94 
95 #define __ ACCESS_MASM(masm)
96 
GenerateMapChangeElementsTransition(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * allocation_memento_found)97 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
98     MacroAssembler* masm,
99     Register receiver,
100     Register key,
101     Register value,
102     Register target_map,
103     AllocationSiteMode mode,
104     Label* allocation_memento_found) {
105   // Return address is on the stack.
106   Register scratch = rdi;
107   DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
108 
109   if (mode == TRACK_ALLOCATION_SITE) {
110     DCHECK(allocation_memento_found != NULL);
111     __ JumpIfJSArrayHasAllocationMemento(
112         receiver, scratch, allocation_memento_found);
113   }
114 
115   // Set transitioned map.
116   __ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
117   __ RecordWriteField(receiver,
118                       HeapObject::kMapOffset,
119                       target_map,
120                       scratch,
121                       kDontSaveFPRegs,
122                       EMIT_REMEMBERED_SET,
123                       OMIT_SMI_CHECK);
124 }
125 
126 
GenerateSmiToDouble(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)127 void ElementsTransitionGenerator::GenerateSmiToDouble(
128     MacroAssembler* masm,
129     Register receiver,
130     Register key,
131     Register value,
132     Register target_map,
133     AllocationSiteMode mode,
134     Label* fail) {
135   // Return address is on the stack.
136   DCHECK(receiver.is(rdx));
137   DCHECK(key.is(rcx));
138   DCHECK(value.is(rax));
139   DCHECK(target_map.is(rbx));
140 
141   // The fail label is not actually used since we do not allocate.
142   Label allocated, new_backing_store, only_change_map, done;
143 
144   if (mode == TRACK_ALLOCATION_SITE) {
145     __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
146   }
147 
148   // Check for empty arrays, which only require a map transition and no changes
149   // to the backing store.
150   __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
151   __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
152   __ j(equal, &only_change_map);
153 
154   __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
155   if (kPointerSize == kDoubleSize) {
156     // Check backing store for COW-ness. For COW arrays we have to
157     // allocate a new backing store.
158     __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
159                    Heap::kFixedCOWArrayMapRootIndex);
160     __ j(equal, &new_backing_store);
161   } else {
162     // For x32 port we have to allocate a new backing store as SMI size is
163     // not equal with double size.
164     DCHECK(kDoubleSize == 2 * kPointerSize);
165     __ jmp(&new_backing_store);
166   }
167 
168   // Check if the backing store is in new-space. If not, we need to allocate
169   // a new one since the old one is in pointer-space.
170   // If in new space, we can reuse the old backing store because it is
171   // the same size.
172   __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
173 
174   __ movp(r14, r8);  // Destination array equals source array.
175 
176   // r8 : source FixedArray
177   // r9 : elements array length
178   // r14: destination FixedDoubleArray
179   // Set backing store's map
180   __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
181   __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
182 
183   __ bind(&allocated);
184   // Set transitioned map.
185   __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
186   __ RecordWriteField(rdx,
187                       HeapObject::kMapOffset,
188                       rbx,
189                       rdi,
190                       kDontSaveFPRegs,
191                       EMIT_REMEMBERED_SET,
192                       OMIT_SMI_CHECK);
193 
194   // Convert smis to doubles and holes to hole NaNs.  The Array's length
195   // remains unchanged.
196   STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
197   STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
198 
199   Label loop, entry, convert_hole;
200   __ movq(r15, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
201   // r15: the-hole NaN
202   __ jmp(&entry);
203 
204   // Allocate new backing store.
205   __ bind(&new_backing_store);
206   __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
207   __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
208   // Set backing store's map
209   __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
210   __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
211   // Set receiver's backing store.
212   __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r14);
213   __ movp(r11, r14);
214   __ RecordWriteField(rdx,
215                       JSObject::kElementsOffset,
216                       r11,
217                       r15,
218                       kDontSaveFPRegs,
219                       EMIT_REMEMBERED_SET,
220                       OMIT_SMI_CHECK);
221   // Set backing store's length.
222   __ Integer32ToSmi(r11, r9);
223   __ movp(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
224   __ jmp(&allocated);
225 
226   __ bind(&only_change_map);
227   // Set transitioned map.
228   __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
229   __ RecordWriteField(rdx,
230                       HeapObject::kMapOffset,
231                       rbx,
232                       rdi,
233                       kDontSaveFPRegs,
234                       OMIT_REMEMBERED_SET,
235                       OMIT_SMI_CHECK);
236   __ jmp(&done);
237 
238   // Conversion loop.
239   __ bind(&loop);
240   __ movp(rbx,
241           FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
242   // r9 : current element's index
243   // rbx: current element (smi-tagged)
244   __ JumpIfNotSmi(rbx, &convert_hole);
245   __ SmiToInteger32(rbx, rbx);
246   __ Cvtlsi2sd(xmm0, rbx);
247   __ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), xmm0);
248   __ jmp(&entry);
249   __ bind(&convert_hole);
250 
251   if (FLAG_debug_code) {
252     __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
253     __ Assert(equal, kObjectFoundInSmiOnlyArray);
254   }
255 
256   __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
257   __ bind(&entry);
258   __ decp(r9);
259   __ j(not_sign, &loop);
260 
261   __ bind(&done);
262 }
263 
264 
GenerateDoubleToObject(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)265 void ElementsTransitionGenerator::GenerateDoubleToObject(
266     MacroAssembler* masm,
267     Register receiver,
268     Register key,
269     Register value,
270     Register target_map,
271     AllocationSiteMode mode,
272     Label* fail) {
273   // Return address is on the stack.
274   DCHECK(receiver.is(rdx));
275   DCHECK(key.is(rcx));
276   DCHECK(value.is(rax));
277   DCHECK(target_map.is(rbx));
278 
279   Label loop, entry, convert_hole, gc_required, only_change_map;
280 
281   if (mode == TRACK_ALLOCATION_SITE) {
282     __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
283   }
284 
285   // Check for empty arrays, which only require a map transition and no changes
286   // to the backing store.
287   __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
288   __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
289   __ j(equal, &only_change_map);
290 
291   __ Push(rax);
292 
293   __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
294   __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
295   // r8 : source FixedDoubleArray
296   // r9 : number of elements
297   __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
298   __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
299   // r11: destination FixedArray
300   __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
301   __ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
302   __ Integer32ToSmi(r14, r9);
303   __ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
304 
305   // Prepare for conversion loop.
306   __ movq(rsi, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
307   __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
308   // rsi: the-hole NaN
309   // rdi: pointer to the-hole
310 
311   // Allocating heap numbers in the loop below can fail and cause a jump to
312   // gc_required. We can't leave a partly initialized FixedArray behind,
313   // so pessimistically fill it with holes now.
314   Label initialization_loop, initialization_loop_entry;
315   __ jmp(&initialization_loop_entry, Label::kNear);
316   __ bind(&initialization_loop);
317   __ movp(FieldOperand(r11, r9, times_pointer_size, FixedArray::kHeaderSize),
318           rdi);
319   __ bind(&initialization_loop_entry);
320   __ decp(r9);
321   __ j(not_sign, &initialization_loop);
322 
323   __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
324   __ jmp(&entry);
325 
326   // Call into runtime if GC is required.
327   __ bind(&gc_required);
328   __ Pop(rax);
329   __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
330   __ jmp(fail);
331 
332   // Box doubles into heap numbers.
333   __ bind(&loop);
334   __ movq(r14, FieldOperand(r8,
335                             r9,
336                             times_8,
337                             FixedDoubleArray::kHeaderSize));
338   // r9 : current element's index
339   // r14: current element
340   __ cmpq(r14, rsi);
341   __ j(equal, &convert_hole);
342 
343   // Non-hole double, copy value into a heap number.
344   __ AllocateHeapNumber(rax, r15, &gc_required);
345   // rax: new heap number
346   __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
347   __ movp(FieldOperand(r11,
348                        r9,
349                        times_pointer_size,
350                        FixedArray::kHeaderSize),
351           rax);
352   __ movp(r15, r9);
353   __ RecordWriteArray(r11,
354                       rax,
355                       r15,
356                       kDontSaveFPRegs,
357                       EMIT_REMEMBERED_SET,
358                       OMIT_SMI_CHECK);
359   __ jmp(&entry, Label::kNear);
360 
361   // Replace the-hole NaN with the-hole pointer.
362   __ bind(&convert_hole);
363   __ movp(FieldOperand(r11,
364                        r9,
365                        times_pointer_size,
366                        FixedArray::kHeaderSize),
367           rdi);
368 
369   __ bind(&entry);
370   __ decp(r9);
371   __ j(not_sign, &loop);
372 
373   // Replace receiver's backing store with newly created and filled FixedArray.
374   __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r11);
375   __ RecordWriteField(rdx,
376                       JSObject::kElementsOffset,
377                       r11,
378                       r15,
379                       kDontSaveFPRegs,
380                       EMIT_REMEMBERED_SET,
381                       OMIT_SMI_CHECK);
382   __ Pop(rax);
383   __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
384 
385   __ bind(&only_change_map);
386   // Set transitioned map.
387   __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
388   __ RecordWriteField(rdx,
389                       HeapObject::kMapOffset,
390                       rbx,
391                       rdi,
392                       kDontSaveFPRegs,
393                       OMIT_REMEMBERED_SET,
394                       OMIT_SMI_CHECK);
395 }
396 
397 
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)398 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
399                                        Register string,
400                                        Register index,
401                                        Register result,
402                                        Label* call_runtime) {
403   // Fetch the instance type of the receiver into result register.
404   __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
405   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
406 
407   // We need special handling for indirect strings.
408   Label check_sequential;
409   __ testb(result, Immediate(kIsIndirectStringMask));
410   __ j(zero, &check_sequential, Label::kNear);
411 
412   // Dispatch on the indirect string shape: slice or cons.
413   Label cons_string;
414   __ testb(result, Immediate(kSlicedNotConsMask));
415   __ j(zero, &cons_string, Label::kNear);
416 
417   // Handle slices.
418   Label indirect_string_loaded;
419   __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
420   __ addp(index, result);
421   __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
422   __ jmp(&indirect_string_loaded, Label::kNear);
423 
424   // Handle cons strings.
425   // Check whether the right hand side is the empty string (i.e. if
426   // this is really a flat string in a cons string). If that is not
427   // the case we would rather go to the runtime system now to flatten
428   // the string.
429   __ bind(&cons_string);
430   __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
431                  Heap::kempty_stringRootIndex);
432   __ j(not_equal, call_runtime);
433   __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
434 
435   __ bind(&indirect_string_loaded);
436   __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
437   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
438 
439   // Distinguish sequential and external strings. Only these two string
440   // representations can reach here (slices and flat cons strings have been
441   // reduced to the underlying sequential or external string).
442   Label seq_string;
443   __ bind(&check_sequential);
444   STATIC_ASSERT(kSeqStringTag == 0);
445   __ testb(result, Immediate(kStringRepresentationMask));
446   __ j(zero, &seq_string, Label::kNear);
447 
448   // Handle external strings.
449   Label one_byte_external, done;
450   if (FLAG_debug_code) {
451     // Assert that we do not have a cons or slice (indirect strings) here.
452     // Sequential strings have already been ruled out.
453     __ testb(result, Immediate(kIsIndirectStringMask));
454     __ Assert(zero, kExternalStringExpectedButNotFound);
455   }
456   // Rule out short external strings.
457   STATIC_ASSERT(kShortExternalStringTag != 0);
458   __ testb(result, Immediate(kShortExternalStringTag));
459   __ j(not_zero, call_runtime);
460   // Check encoding.
461   STATIC_ASSERT(kTwoByteStringTag == 0);
462   __ testb(result, Immediate(kStringEncodingMask));
463   __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
464   __ j(not_equal, &one_byte_external, Label::kNear);
465   // Two-byte string.
466   __ movzxwl(result, Operand(result, index, times_2, 0));
467   __ jmp(&done, Label::kNear);
468   __ bind(&one_byte_external);
469   // One-byte string.
470   __ movzxbl(result, Operand(result, index, times_1, 0));
471   __ jmp(&done, Label::kNear);
472 
473   // Dispatch on the encoding: one-byte or two-byte.
474   Label one_byte;
475   __ bind(&seq_string);
476   STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
477   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
478   __ testb(result, Immediate(kStringEncodingMask));
479   __ j(not_zero, &one_byte, Label::kNear);
480 
481   // Two-byte string.
482   // Load the two-byte character code into the result register.
483   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
484   __ movzxwl(result, FieldOperand(string,
485                                   index,
486                                   times_2,
487                                   SeqTwoByteString::kHeaderSize));
488   __ jmp(&done, Label::kNear);
489 
490   // One-byte string.
491   // Load the byte into the result register.
492   __ bind(&one_byte);
493   __ movzxbl(result, FieldOperand(string,
494                                   index,
495                                   times_1,
496                                   SeqOneByteString::kHeaderSize));
497   __ bind(&done);
498 }
499 
500 
EmitMathExp(MacroAssembler * masm,XMMRegister input,XMMRegister result,XMMRegister double_scratch,Register temp1,Register temp2)501 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
502                                    XMMRegister input,
503                                    XMMRegister result,
504                                    XMMRegister double_scratch,
505                                    Register temp1,
506                                    Register temp2) {
507   DCHECK(!input.is(result));
508   DCHECK(!input.is(double_scratch));
509   DCHECK(!result.is(double_scratch));
510   DCHECK(!temp1.is(temp2));
511   DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
512   DCHECK(!masm->serializer_enabled());  // External references not serializable.
513 
514   Label done;
515 
516   __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
517   __ Movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
518   __ Xorpd(result, result);
519   __ Ucomisd(double_scratch, input);
520   __ j(above_equal, &done);
521   __ Ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
522   __ Movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
523   __ j(above_equal, &done);
524   __ Movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
525   __ Movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
526   __ Mulsd(double_scratch, input);
527   __ Addsd(double_scratch, result);
528   __ Movq(temp2, double_scratch);
529   __ Subsd(double_scratch, result);
530   __ Movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
531   __ leaq(temp1, Operand(temp2, 0x1ff800));
532   __ andq(temp2, Immediate(0x7ff));
533   __ shrq(temp1, Immediate(11));
534   __ Mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
535   __ Move(kScratchRegister, ExternalReference::math_exp_log_table());
536   __ shlq(temp1, Immediate(52));
537   __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
538   __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
539   __ Subsd(double_scratch, input);
540   __ Movsd(input, double_scratch);
541   __ Subsd(result, double_scratch);
542   __ Mulsd(input, double_scratch);
543   __ Mulsd(result, input);
544   __ Movq(input, temp1);
545   __ Mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
546   __ Subsd(result, double_scratch);
547   __ Addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
548   __ Mulsd(result, input);
549 
550   __ bind(&done);
551 }
552 
553 #undef __
554 
555 
CodeAgingHelper(Isolate * isolate)556 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
557   USE(isolate);
558   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
559   // The sequence of instructions that is patched out for aging code is the
560   // following boilerplate stack-building prologue that is found both in
561   // FUNCTION and OPTIMIZED_FUNCTION code:
562   CodePatcher patcher(isolate, young_sequence_.start(),
563                       young_sequence_.length());
564   patcher.masm()->pushq(rbp);
565   patcher.masm()->movp(rbp, rsp);
566   patcher.masm()->Push(rsi);
567   patcher.masm()->Push(rdi);
568 }
569 
570 
571 #ifdef DEBUG
IsOld(byte * candidate) const572 bool CodeAgingHelper::IsOld(byte* candidate) const {
573   return *candidate == kCallOpcode;
574 }
575 #endif
576 
577 
IsYoungSequence(Isolate * isolate,byte * sequence)578 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
579   bool result = isolate->code_aging_helper()->IsYoung(sequence);
580   DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
581   return result;
582 }
583 
584 
GetCodeAgeAndParity(Isolate * isolate,byte * sequence,Age * age,MarkingParity * parity)585 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
586                                MarkingParity* parity) {
587   if (IsYoungSequence(isolate, sequence)) {
588     *age = kNoAgeCodeAge;
589     *parity = NO_MARKING_PARITY;
590   } else {
591     sequence++;  // Skip the kCallOpcode byte
592     Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
593         Assembler::kCallTargetAddressOffset;
594     Code* stub = GetCodeFromTargetAddress(target_address);
595     GetCodeAgeAndParity(stub, age, parity);
596   }
597 }
598 
599 
PatchPlatformCodeAge(Isolate * isolate,byte * sequence,Code::Age age,MarkingParity parity)600 void Code::PatchPlatformCodeAge(Isolate* isolate,
601                                 byte* sequence,
602                                 Code::Age age,
603                                 MarkingParity parity) {
604   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
605   if (age == kNoAgeCodeAge) {
606     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
607     Assembler::FlushICache(isolate, sequence, young_length);
608   } else {
609     Code* stub = GetCodeAgeStub(isolate, age, parity);
610     CodePatcher patcher(isolate, sequence, young_length);
611     patcher.masm()->call(stub->instruction_start());
612     patcher.masm()->Nop(
613         kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
614   }
615 }
616 
617 
GetArgumentOperand(int index)618 Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
619   DCHECK(index >= 0);
620   int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
621   int displacement_to_last_argument = base_reg_.is(rsp) ?
622       kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
623   displacement_to_last_argument += extra_displacement_to_last_argument_;
624   if (argument_count_reg_.is(no_reg)) {
625     // argument[0] is at base_reg_ + displacement_to_last_argument +
626     // (argument_count_immediate_ + receiver - 1) * kPointerSize.
627     DCHECK(argument_count_immediate_ + receiver > 0);
628     return Operand(base_reg_, displacement_to_last_argument +
629         (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
630   } else {
631     // argument[0] is at base_reg_ + displacement_to_last_argument +
632     // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
633     return Operand(base_reg_, argument_count_reg_, times_pointer_size,
634         displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
635   }
636 }
637 
638 
639 }  // namespace internal
640 }  // namespace v8
641 
642 #endif  // V8_TARGET_ARCH_X64
643