1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/arm/codegen-arm.h"
6 
7 #if V8_TARGET_ARCH_ARM
8 
9 #include "src/arm/simulator-arm.h"
10 #include "src/codegen.h"
11 #include "src/macro-assembler.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 
17 #define __ masm.
18 
19 
20 #if defined(USE_SIMULATOR)
21 byte* fast_exp_arm_machine_code = nullptr;
fast_exp_simulator(double x,Isolate * isolate)22 double fast_exp_simulator(double x, Isolate* isolate) {
23   return Simulator::current(isolate)
24       ->CallFPReturnsDouble(fast_exp_arm_machine_code, x, 0);
25 }
26 #endif
27 
28 
CreateExpFunction(Isolate * isolate)29 UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
30   size_t actual_size;
31   byte* buffer =
32       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
33   if (buffer == nullptr) return nullptr;
34   ExternalReference::InitializeMathExpData();
35 
36   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
37                       CodeObjectRequired::kNo);
38 
39   {
40     DwVfpRegister input = d0;
41     DwVfpRegister result = d1;
42     DwVfpRegister double_scratch1 = d2;
43     DwVfpRegister double_scratch2 = d3;
44     Register temp1 = r4;
45     Register temp2 = r5;
46     Register temp3 = r6;
47 
48     if (masm.use_eabi_hardfloat()) {
49       // Input value is in d0 anyway, nothing to do.
50     } else {
51       __ vmov(input, r0, r1);
52     }
53     __ Push(temp3, temp2, temp1);
54     MathExpGenerator::EmitMathExp(
55         &masm, input, result, double_scratch1, double_scratch2,
56         temp1, temp2, temp3);
57     __ Pop(temp3, temp2, temp1);
58     if (masm.use_eabi_hardfloat()) {
59       __ vmov(d0, result);
60     } else {
61       __ vmov(r0, r1, result);
62     }
63     __ Ret();
64   }
65 
66   CodeDesc desc;
67   masm.GetCode(&desc);
68   DCHECK(!RelocInfo::RequiresRelocation(desc));
69 
70   Assembler::FlushICache(isolate, buffer, actual_size);
71   base::OS::ProtectCode(buffer, actual_size);
72 
73 #if !defined(USE_SIMULATOR)
74   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
75 #else
76   fast_exp_arm_machine_code = buffer;
77   return &fast_exp_simulator;
78 #endif
79 }
80 
81 #if defined(V8_HOST_ARCH_ARM)
CreateMemCopyUint8Function(Isolate * isolate,MemCopyUint8Function stub)82 MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
83                                                 MemCopyUint8Function stub) {
84 #if defined(USE_SIMULATOR)
85   return stub;
86 #else
87   if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
88   size_t actual_size;
89   byte* buffer =
90       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
91   if (buffer == nullptr) return stub;
92 
93   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
94                       CodeObjectRequired::kNo);
95 
96   Register dest = r0;
97   Register src = r1;
98   Register chars = r2;
99   Register temp1 = r3;
100   Label less_4;
101 
102   if (CpuFeatures::IsSupported(NEON)) {
103     Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
104     Label size_less_than_8;
105     __ pld(MemOperand(src, 0));
106 
107     __ cmp(chars, Operand(8));
108     __ b(lt, &size_less_than_8);
109     __ cmp(chars, Operand(32));
110     __ b(lt, &less_32);
111     if (CpuFeatures::cache_line_size() == 32) {
112       __ pld(MemOperand(src, 32));
113     }
114     __ cmp(chars, Operand(64));
115     __ b(lt, &less_64);
116     __ pld(MemOperand(src, 64));
117     if (CpuFeatures::cache_line_size() == 32) {
118       __ pld(MemOperand(src, 96));
119     }
120     __ cmp(chars, Operand(128));
121     __ b(lt, &less_128);
122     __ pld(MemOperand(src, 128));
123     if (CpuFeatures::cache_line_size() == 32) {
124       __ pld(MemOperand(src, 160));
125     }
126     __ pld(MemOperand(src, 192));
127     if (CpuFeatures::cache_line_size() == 32) {
128       __ pld(MemOperand(src, 224));
129     }
130     __ cmp(chars, Operand(256));
131     __ b(lt, &less_256);
132     __ sub(chars, chars, Operand(256));
133 
134     __ bind(&loop);
135     __ pld(MemOperand(src, 256));
136     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
137     if (CpuFeatures::cache_line_size() == 32) {
138       __ pld(MemOperand(src, 256));
139     }
140     __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
141     __ sub(chars, chars, Operand(64), SetCC);
142     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
143     __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
144     __ b(ge, &loop);
145     __ add(chars, chars, Operand(256));
146 
147     __ bind(&less_256);
148     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
149     __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
150     __ sub(chars, chars, Operand(128));
151     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
152     __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
153     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
154     __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
155     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
156     __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
157     __ cmp(chars, Operand(64));
158     __ b(lt, &less_64);
159 
160     __ bind(&less_128);
161     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
162     __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
163     __ sub(chars, chars, Operand(64));
164     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
165     __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
166 
167     __ bind(&less_64);
168     __ cmp(chars, Operand(32));
169     __ b(lt, &less_32);
170     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
171     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
172     __ sub(chars, chars, Operand(32));
173 
174     __ bind(&less_32);
175     __ cmp(chars, Operand(16));
176     __ b(le, &_16_or_less);
177     __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
178     __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
179     __ sub(chars, chars, Operand(16));
180 
181     __ bind(&_16_or_less);
182     __ cmp(chars, Operand(8));
183     __ b(le, &_8_or_less);
184     __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
185     __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
186     __ sub(chars, chars, Operand(8));
187 
188     // Do a last copy which may overlap with the previous copy (up to 8 bytes).
189     __ bind(&_8_or_less);
190     __ rsb(chars, chars, Operand(8));
191     __ sub(src, src, Operand(chars));
192     __ sub(dest, dest, Operand(chars));
193     __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
194     __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
195 
196     __ Ret();
197 
198     __ bind(&size_less_than_8);
199 
200     __ bic(temp1, chars, Operand(0x3), SetCC);
201     __ b(&less_4, eq);
202     __ ldr(temp1, MemOperand(src, 4, PostIndex));
203     __ str(temp1, MemOperand(dest, 4, PostIndex));
204   } else {
205     Register temp2 = ip;
206     Label loop;
207 
208     __ bic(temp2, chars, Operand(0x3), SetCC);
209     __ b(&less_4, eq);
210     __ add(temp2, dest, temp2);
211 
212     __ bind(&loop);
213     __ ldr(temp1, MemOperand(src, 4, PostIndex));
214     __ str(temp1, MemOperand(dest, 4, PostIndex));
215     __ cmp(dest, temp2);
216     __ b(&loop, ne);
217   }
218 
219   __ bind(&less_4);
220   __ mov(chars, Operand(chars, LSL, 31), SetCC);
221   // bit0 => Z (ne), bit1 => C (cs)
222   __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
223   __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
224   __ ldrb(temp1, MemOperand(src), ne);
225   __ strb(temp1, MemOperand(dest), ne);
226   __ Ret();
227 
228   CodeDesc desc;
229   masm.GetCode(&desc);
230   DCHECK(!RelocInfo::RequiresRelocation(desc));
231 
232   Assembler::FlushICache(isolate, buffer, actual_size);
233   base::OS::ProtectCode(buffer, actual_size);
234   return FUNCTION_CAST<MemCopyUint8Function>(buffer);
235 #endif
236 }
237 
238 
239 // Convert 8 to 16. The number of character to copy must be at least 8.
CreateMemCopyUint16Uint8Function(Isolate * isolate,MemCopyUint16Uint8Function stub)240 MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
241     Isolate* isolate, MemCopyUint16Uint8Function stub) {
242 #if defined(USE_SIMULATOR)
243   return stub;
244 #else
245   if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
246   size_t actual_size;
247   byte* buffer =
248       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
249   if (buffer == nullptr) return stub;
250 
251   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
252                       CodeObjectRequired::kNo);
253 
254   Register dest = r0;
255   Register src = r1;
256   Register chars = r2;
257   if (CpuFeatures::IsSupported(NEON)) {
258     Register temp = r3;
259     Label loop;
260 
261     __ bic(temp, chars, Operand(0x7));
262     __ sub(chars, chars, Operand(temp));
263     __ add(temp, dest, Operand(temp, LSL, 1));
264 
265     __ bind(&loop);
266     __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
267     __ vmovl(NeonU8, q0, d0);
268     __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
269     __ cmp(dest, temp);
270     __ b(&loop, ne);
271 
272     // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
273     __ rsb(chars, chars, Operand(8));
274     __ sub(src, src, Operand(chars));
275     __ sub(dest, dest, Operand(chars, LSL, 1));
276     __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
277     __ vmovl(NeonU8, q0, d0);
278     __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
279     __ Ret();
280   } else {
281     Register temp1 = r3;
282     Register temp2 = ip;
283     Register temp3 = lr;
284     Register temp4 = r4;
285     Label loop;
286     Label not_two;
287 
288     __ Push(lr, r4);
289     __ bic(temp2, chars, Operand(0x3));
290     __ add(temp2, dest, Operand(temp2, LSL, 1));
291 
292     __ bind(&loop);
293     __ ldr(temp1, MemOperand(src, 4, PostIndex));
294     __ uxtb16(temp3, temp1);
295     __ uxtb16(temp4, temp1, 8);
296     __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
297     __ str(temp1, MemOperand(dest));
298     __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
299     __ str(temp1, MemOperand(dest, 4));
300     __ add(dest, dest, Operand(8));
301     __ cmp(dest, temp2);
302     __ b(&loop, ne);
303 
304     __ mov(chars, Operand(chars, LSL, 31), SetCC);  // bit0 => ne, bit1 => cs
305     __ b(&not_two, cc);
306     __ ldrh(temp1, MemOperand(src, 2, PostIndex));
307     __ uxtb(temp3, temp1, 8);
308     __ mov(temp3, Operand(temp3, LSL, 16));
309     __ uxtab(temp3, temp3, temp1);
310     __ str(temp3, MemOperand(dest, 4, PostIndex));
311     __ bind(&not_two);
312     __ ldrb(temp1, MemOperand(src), ne);
313     __ strh(temp1, MemOperand(dest), ne);
314     __ Pop(pc, r4);
315   }
316 
317   CodeDesc desc;
318   masm.GetCode(&desc);
319 
320   Assembler::FlushICache(isolate, buffer, actual_size);
321   base::OS::ProtectCode(buffer, actual_size);
322 
323   return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
324 #endif
325 }
326 #endif
327 
CreateSqrtFunction(Isolate * isolate)328 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
329 #if defined(USE_SIMULATOR)
330   return nullptr;
331 #else
332   size_t actual_size;
333   byte* buffer =
334       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
335   if (buffer == nullptr) return nullptr;
336 
337   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
338                       CodeObjectRequired::kNo);
339 
340   __ MovFromFloatParameter(d0);
341   __ vsqrt(d0, d0);
342   __ MovToFloatResult(d0);
343   __ Ret();
344 
345   CodeDesc desc;
346   masm.GetCode(&desc);
347   DCHECK(!RelocInfo::RequiresRelocation(desc));
348 
349   Assembler::FlushICache(isolate, buffer, actual_size);
350   base::OS::ProtectCode(buffer, actual_size);
351   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
352 #endif
353 }
354 
355 #undef __
356 
357 
358 // -------------------------------------------------------------------------
359 // Platform-specific RuntimeCallHelper functions.
360 
BeforeCall(MacroAssembler * masm) const361 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
362   masm->EnterFrame(StackFrame::INTERNAL);
363   DCHECK(!masm->has_frame());
364   masm->set_has_frame(true);
365 }
366 
367 
AfterCall(MacroAssembler * masm) const368 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
369   masm->LeaveFrame(StackFrame::INTERNAL);
370   DCHECK(masm->has_frame());
371   masm->set_has_frame(false);
372 }
373 
374 
375 // -------------------------------------------------------------------------
376 // Code generators
377 
378 #define __ ACCESS_MASM(masm)
379 
GenerateMapChangeElementsTransition(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * allocation_memento_found)380 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
381     MacroAssembler* masm,
382     Register receiver,
383     Register key,
384     Register value,
385     Register target_map,
386     AllocationSiteMode mode,
387     Label* allocation_memento_found) {
388   Register scratch_elements = r4;
389   DCHECK(!AreAliased(receiver, key, value, target_map,
390                      scratch_elements));
391 
392   if (mode == TRACK_ALLOCATION_SITE) {
393     DCHECK(allocation_memento_found != NULL);
394     __ JumpIfJSArrayHasAllocationMemento(
395         receiver, scratch_elements, allocation_memento_found);
396   }
397 
398   // Set transitioned map.
399   __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
400   __ RecordWriteField(receiver,
401                       HeapObject::kMapOffset,
402                       target_map,
403                       r9,
404                       kLRHasNotBeenSaved,
405                       kDontSaveFPRegs,
406                       EMIT_REMEMBERED_SET,
407                       OMIT_SMI_CHECK);
408 }
409 
410 
GenerateSmiToDouble(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)411 void ElementsTransitionGenerator::GenerateSmiToDouble(
412     MacroAssembler* masm,
413     Register receiver,
414     Register key,
415     Register value,
416     Register target_map,
417     AllocationSiteMode mode,
418     Label* fail) {
419   // Register lr contains the return address.
420   Label loop, entry, convert_hole, gc_required, only_change_map, done;
421   Register elements = r4;
422   Register length = r5;
423   Register array = r6;
424   Register array_end = array;
425 
426   // target_map parameter can be clobbered.
427   Register scratch1 = target_map;
428   Register scratch2 = r9;
429 
430   // Verify input registers don't conflict with locals.
431   DCHECK(!AreAliased(receiver, key, value, target_map,
432                      elements, length, array, scratch2));
433 
434   if (mode == TRACK_ALLOCATION_SITE) {
435     __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
436   }
437 
438   // Check for empty arrays, which only require a map transition and no changes
439   // to the backing store.
440   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
441   __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
442   __ b(eq, &only_change_map);
443 
444   __ push(lr);
445   __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
446   // length: number of elements (smi-tagged)
447 
448   // Allocate new FixedDoubleArray.
449   // Use lr as a temporary register.
450   __ mov(lr, Operand(length, LSL, 2));
451   __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
452   __ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
453   // array: destination FixedDoubleArray, not tagged as heap object.
454   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
455   // r4: source FixedArray.
456 
457   // Set destination FixedDoubleArray's length and map.
458   __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
459   __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
460   // Update receiver's map.
461   __ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
462 
463   __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
464   __ RecordWriteField(receiver,
465                       HeapObject::kMapOffset,
466                       target_map,
467                       scratch2,
468                       kLRHasBeenSaved,
469                       kDontSaveFPRegs,
470                       OMIT_REMEMBERED_SET,
471                       OMIT_SMI_CHECK);
472   // Replace receiver's backing store with newly created FixedDoubleArray.
473   __ add(scratch1, array, Operand(kHeapObjectTag));
474   __ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
475   __ RecordWriteField(receiver,
476                       JSObject::kElementsOffset,
477                       scratch1,
478                       scratch2,
479                       kLRHasBeenSaved,
480                       kDontSaveFPRegs,
481                       EMIT_REMEMBERED_SET,
482                       OMIT_SMI_CHECK);
483 
484   // Prepare for conversion loop.
485   __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
486   __ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
487   __ add(array_end, scratch2, Operand(length, LSL, 2));
488 
489   // Repurpose registers no longer in use.
490   Register hole_lower = elements;
491   Register hole_upper = length;
492 
493   __ mov(hole_lower, Operand(kHoleNanLower32));
494   __ mov(hole_upper, Operand(kHoleNanUpper32));
495   // scratch1: begin of source FixedArray element fields, not tagged
496   // hole_lower: kHoleNanLower32
497   // hole_upper: kHoleNanUpper32
498   // array_end: end of destination FixedDoubleArray, not tagged
499   // scratch2: begin of FixedDoubleArray element fields, not tagged
500 
501   __ b(&entry);
502 
503   __ bind(&only_change_map);
504   __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
505   __ RecordWriteField(receiver,
506                       HeapObject::kMapOffset,
507                       target_map,
508                       scratch2,
509                       kLRHasNotBeenSaved,
510                       kDontSaveFPRegs,
511                       OMIT_REMEMBERED_SET,
512                       OMIT_SMI_CHECK);
513   __ b(&done);
514 
515   // Call into runtime if GC is required.
516   __ bind(&gc_required);
517   __ pop(lr);
518   __ b(fail);
519 
520   // Convert and copy elements.
521   __ bind(&loop);
522   __ ldr(lr, MemOperand(scratch1, 4, PostIndex));
523   // lr: current element
524   __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
525 
526   // Normal smi, convert to double and store.
527   __ vmov(s0, lr);
528   __ vcvt_f64_s32(d0, s0);
529   __ vstr(d0, scratch2, 0);
530   __ add(scratch2, scratch2, Operand(8));
531   __ b(&entry);
532 
533   // Hole found, store the-hole NaN.
534   __ bind(&convert_hole);
535   if (FLAG_debug_code) {
536     // Restore a "smi-untagged" heap object.
537     __ SmiTag(lr);
538     __ orr(lr, lr, Operand(1));
539     __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
540     __ Assert(eq, kObjectFoundInSmiOnlyArray);
541   }
542   __ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
543 
544   __ bind(&entry);
545   __ cmp(scratch2, array_end);
546   __ b(lt, &loop);
547 
548   __ pop(lr);
549   __ bind(&done);
550 }
551 
552 
GenerateDoubleToObject(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)553 void ElementsTransitionGenerator::GenerateDoubleToObject(
554     MacroAssembler* masm,
555     Register receiver,
556     Register key,
557     Register value,
558     Register target_map,
559     AllocationSiteMode mode,
560     Label* fail) {
561   // Register lr contains the return address.
562   Label entry, loop, convert_hole, gc_required, only_change_map;
563   Register elements = r4;
564   Register array = r6;
565   Register length = r5;
566   Register scratch = r9;
567 
568   // Verify input registers don't conflict with locals.
569   DCHECK(!AreAliased(receiver, key, value, target_map,
570                      elements, array, length, scratch));
571 
572   if (mode == TRACK_ALLOCATION_SITE) {
573     __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
574   }
575 
576   // Check for empty arrays, which only require a map transition and no changes
577   // to the backing store.
578   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
579   __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
580   __ b(eq, &only_change_map);
581 
582   __ push(lr);
583   __ Push(target_map, receiver, key, value);
584   __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
585   // elements: source FixedDoubleArray
586   // length: number of elements (smi-tagged)
587 
588   // Allocate new FixedArray.
589   // Re-use value and target_map registers, as they have been saved on the
590   // stack.
591   Register array_size = value;
592   Register allocate_scratch = target_map;
593   __ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
594   __ add(array_size, array_size, Operand(length, LSL, 1));
595   __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
596               NO_ALLOCATION_FLAGS);
597   // array: destination FixedArray, not tagged as heap object
598   // Set destination FixedDoubleArray's length and map.
599   __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
600   __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
601   __ str(scratch, MemOperand(array, HeapObject::kMapOffset));
602 
603   // Prepare for conversion loop.
604   Register src_elements = elements;
605   Register dst_elements = target_map;
606   Register dst_end = length;
607   Register heap_number_map = scratch;
608   __ add(src_elements, elements,
609          Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
610   __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
611   __ add(dst_end, dst_elements, Operand(length, LSL, 1));
612 
613   // Allocating heap numbers in the loop below can fail and cause a jump to
614   // gc_required. We can't leave a partly initialized FixedArray behind,
615   // so pessimistically fill it with holes now.
616   Label initialization_loop, initialization_loop_entry;
617   __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
618   __ b(&initialization_loop_entry);
619   __ bind(&initialization_loop);
620   __ str(scratch, MemOperand(dst_elements, kPointerSize, PostIndex));
621   __ bind(&initialization_loop_entry);
622   __ cmp(dst_elements, dst_end);
623   __ b(lt, &initialization_loop);
624 
625   __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
626   __ add(array, array, Operand(kHeapObjectTag));
627   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
628   // Using offsetted addresses in src_elements to fully take advantage of
629   // post-indexing.
630   // dst_elements: begin of destination FixedArray element fields, not tagged
631   // src_elements: begin of source FixedDoubleArray element fields,
632   //               not tagged, +4
633   // dst_end: end of destination FixedArray, not tagged
634   // array: destination FixedArray
635   // heap_number_map: heap number map
636   __ b(&entry);
637 
638   // Call into runtime if GC is required.
639   __ bind(&gc_required);
640   __ Pop(target_map, receiver, key, value);
641   __ pop(lr);
642   __ b(fail);
643 
644   __ bind(&loop);
645   Register upper_bits = key;
646   __ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
647   // upper_bits: current element's upper 32 bit
648   // src_elements: address of next element's upper 32 bit
649   __ cmp(upper_bits, Operand(kHoleNanUpper32));
650   __ b(eq, &convert_hole);
651 
652   // Non-hole double, copy value into a heap number.
653   Register heap_number = receiver;
654   Register scratch2 = value;
655   __ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
656                         &gc_required);
657   // heap_number: new heap number
658   __ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
659   __ Strd(scratch2, upper_bits,
660           FieldMemOperand(heap_number, HeapNumber::kValueOffset));
661   __ mov(scratch2, dst_elements);
662   __ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
663   __ RecordWrite(array,
664                  scratch2,
665                  heap_number,
666                  kLRHasBeenSaved,
667                  kDontSaveFPRegs,
668                  EMIT_REMEMBERED_SET,
669                  OMIT_SMI_CHECK);
670   __ b(&entry);
671 
672   // Replace the-hole NaN with the-hole pointer.
673   __ bind(&convert_hole);
674   __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
675   __ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
676 
677   __ bind(&entry);
678   __ cmp(dst_elements, dst_end);
679   __ b(lt, &loop);
680 
681   __ Pop(target_map, receiver, key, value);
682   // Replace receiver's backing store with newly created and filled FixedArray.
683   __ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
684   __ RecordWriteField(receiver,
685                       JSObject::kElementsOffset,
686                       array,
687                       scratch,
688                       kLRHasBeenSaved,
689                       kDontSaveFPRegs,
690                       EMIT_REMEMBERED_SET,
691                       OMIT_SMI_CHECK);
692   __ pop(lr);
693 
694   __ bind(&only_change_map);
695   // Update receiver's map.
696   __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
697   __ RecordWriteField(receiver,
698                       HeapObject::kMapOffset,
699                       target_map,
700                       scratch,
701                       kLRHasNotBeenSaved,
702                       kDontSaveFPRegs,
703                       OMIT_REMEMBERED_SET,
704                       OMIT_SMI_CHECK);
705 }
706 
707 
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)708 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
709                                        Register string,
710                                        Register index,
711                                        Register result,
712                                        Label* call_runtime) {
713   // Fetch the instance type of the receiver into result register.
714   __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
715   __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
716 
717   // We need special handling for indirect strings.
718   Label check_sequential;
719   __ tst(result, Operand(kIsIndirectStringMask));
720   __ b(eq, &check_sequential);
721 
722   // Dispatch on the indirect string shape: slice or cons.
723   Label cons_string;
724   __ tst(result, Operand(kSlicedNotConsMask));
725   __ b(eq, &cons_string);
726 
727   // Handle slices.
728   Label indirect_string_loaded;
729   __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
730   __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
731   __ add(index, index, Operand::SmiUntag(result));
732   __ jmp(&indirect_string_loaded);
733 
734   // Handle cons strings.
735   // Check whether the right hand side is the empty string (i.e. if
736   // this is really a flat string in a cons string). If that is not
737   // the case we would rather go to the runtime system now to flatten
738   // the string.
739   __ bind(&cons_string);
740   __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
741   __ CompareRoot(result, Heap::kempty_stringRootIndex);
742   __ b(ne, call_runtime);
743   // Get the first of the two strings and load its instance type.
744   __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
745 
746   __ bind(&indirect_string_loaded);
747   __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
748   __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
749 
750   // Distinguish sequential and external strings. Only these two string
751   // representations can reach here (slices and flat cons strings have been
752   // reduced to the underlying sequential or external string).
753   Label external_string, check_encoding;
754   __ bind(&check_sequential);
755   STATIC_ASSERT(kSeqStringTag == 0);
756   __ tst(result, Operand(kStringRepresentationMask));
757   __ b(ne, &external_string);
758 
759   // Prepare sequential strings
760   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
761   __ add(string,
762          string,
763          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
764   __ jmp(&check_encoding);
765 
766   // Handle external strings.
767   __ bind(&external_string);
768   if (FLAG_debug_code) {
769     // Assert that we do not have a cons or slice (indirect strings) here.
770     // Sequential strings have already been ruled out.
771     __ tst(result, Operand(kIsIndirectStringMask));
772     __ Assert(eq, kExternalStringExpectedButNotFound);
773   }
774   // Rule out short external strings.
775   STATIC_ASSERT(kShortExternalStringTag != 0);
776   __ tst(result, Operand(kShortExternalStringMask));
777   __ b(ne, call_runtime);
778   __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
779 
780   Label one_byte, done;
781   __ bind(&check_encoding);
782   STATIC_ASSERT(kTwoByteStringTag == 0);
783   __ tst(result, Operand(kStringEncodingMask));
784   __ b(ne, &one_byte);
785   // Two-byte string.
786   __ ldrh(result, MemOperand(string, index, LSL, 1));
787   __ jmp(&done);
788   __ bind(&one_byte);
789   // One-byte string.
790   __ ldrb(result, MemOperand(string, index));
791   __ bind(&done);
792 }
793 
794 
ExpConstant(int index,Register base)795 static MemOperand ExpConstant(int index, Register base) {
796   return MemOperand(base, index * kDoubleSize);
797 }
798 
799 
EmitMathExp(MacroAssembler * masm,DwVfpRegister input,DwVfpRegister result,DwVfpRegister double_scratch1,DwVfpRegister double_scratch2,Register temp1,Register temp2,Register temp3)800 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
801                                    DwVfpRegister input,
802                                    DwVfpRegister result,
803                                    DwVfpRegister double_scratch1,
804                                    DwVfpRegister double_scratch2,
805                                    Register temp1,
806                                    Register temp2,
807                                    Register temp3) {
808   DCHECK(!input.is(result));
809   DCHECK(!input.is(double_scratch1));
810   DCHECK(!input.is(double_scratch2));
811   DCHECK(!result.is(double_scratch1));
812   DCHECK(!result.is(double_scratch2));
813   DCHECK(!double_scratch1.is(double_scratch2));
814   DCHECK(!temp1.is(temp2));
815   DCHECK(!temp1.is(temp3));
816   DCHECK(!temp2.is(temp3));
817   DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
818   DCHECK(!masm->serializer_enabled());  // External references not serializable.
819 
820   Label zero, infinity, done;
821 
822   __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
823 
824   __ vldr(double_scratch1, ExpConstant(0, temp3));
825   __ VFPCompareAndSetFlags(double_scratch1, input);
826   __ b(ge, &zero);
827 
828   __ vldr(double_scratch2, ExpConstant(1, temp3));
829   __ VFPCompareAndSetFlags(input, double_scratch2);
830   __ b(ge, &infinity);
831 
832   __ vldr(double_scratch1, ExpConstant(3, temp3));
833   __ vldr(result, ExpConstant(4, temp3));
834   __ vmul(double_scratch1, double_scratch1, input);
835   __ vadd(double_scratch1, double_scratch1, result);
836   __ VmovLow(temp2, double_scratch1);
837   __ vsub(double_scratch1, double_scratch1, result);
838   __ vldr(result, ExpConstant(6, temp3));
839   __ vldr(double_scratch2, ExpConstant(5, temp3));
840   __ vmul(double_scratch1, double_scratch1, double_scratch2);
841   __ vsub(double_scratch1, double_scratch1, input);
842   __ vsub(result, result, double_scratch1);
843   __ vmul(double_scratch2, double_scratch1, double_scratch1);
844   __ vmul(result, result, double_scratch2);
845   __ vldr(double_scratch2, ExpConstant(7, temp3));
846   __ vmul(result, result, double_scratch2);
847   __ vsub(result, result, double_scratch1);
848   // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
849   DCHECK(*reinterpret_cast<double*>
850          (ExternalReference::math_exp_constants(8).address()) == 1);
851   __ vmov(double_scratch2, 1);
852   __ vadd(result, result, double_scratch2);
853   __ mov(temp1, Operand(temp2, LSR, 11));
854   __ Ubfx(temp2, temp2, 0, 11);
855   __ add(temp1, temp1, Operand(0x3ff));
856 
857   // Must not call ExpConstant() after overwriting temp3!
858   __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
859   __ add(temp3, temp3, Operand(temp2, LSL, 3));
860   __ ldm(ia, temp3, temp2.bit() | temp3.bit());
861   // The first word is loaded is the lower number register.
862   if (temp2.code() < temp3.code()) {
863     __ orr(temp1, temp3, Operand(temp1, LSL, 20));
864     __ vmov(double_scratch1, temp2, temp1);
865   } else {
866     __ orr(temp1, temp2, Operand(temp1, LSL, 20));
867     __ vmov(double_scratch1, temp3, temp1);
868   }
869   __ vmul(result, result, double_scratch1);
870   __ b(&done);
871 
872   __ bind(&zero);
873   __ vmov(result, kDoubleRegZero);
874   __ b(&done);
875 
876   __ bind(&infinity);
877   __ vldr(result, ExpConstant(2, temp3));
878 
879   __ bind(&done);
880 }
881 
882 #undef __
883 
884 #ifdef DEBUG
885 // add(r0, pc, Operand(-8))
886 static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
887 #endif
888 
CodeAgingHelper(Isolate * isolate)889 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
890   USE(isolate);
891   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
892   // Since patcher is a large object, allocate it dynamically when needed,
893   // to avoid overloading the stack in stress conditions.
894   // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
895   // the process, before ARM simulator ICache is setup.
896   base::SmartPointer<CodePatcher> patcher(
897       new CodePatcher(isolate, young_sequence_.start(),
898                       young_sequence_.length() / Assembler::kInstrSize,
899                       CodePatcher::DONT_FLUSH));
900   PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
901   patcher->masm()->PushFixedFrame(r1);
902   patcher->masm()->nop(ip.code());
903   patcher->masm()->add(
904       fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
905 }
906 
907 
908 #ifdef DEBUG
IsOld(byte * candidate) const909 bool CodeAgingHelper::IsOld(byte* candidate) const {
910   return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
911 }
912 #endif
913 
914 
IsYoungSequence(Isolate * isolate,byte * sequence)915 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
916   bool result = isolate->code_aging_helper()->IsYoung(sequence);
917   DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
918   return result;
919 }
920 
921 
GetCodeAgeAndParity(Isolate * isolate,byte * sequence,Age * age,MarkingParity * parity)922 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
923                                MarkingParity* parity) {
924   if (IsYoungSequence(isolate, sequence)) {
925     *age = kNoAgeCodeAge;
926     *parity = NO_MARKING_PARITY;
927   } else {
928     Address target_address = Memory::Address_at(
929         sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
930     Code* stub = GetCodeFromTargetAddress(target_address);
931     GetCodeAgeAndParity(stub, age, parity);
932   }
933 }
934 
935 
PatchPlatformCodeAge(Isolate * isolate,byte * sequence,Code::Age age,MarkingParity parity)936 void Code::PatchPlatformCodeAge(Isolate* isolate,
937                                 byte* sequence,
938                                 Code::Age age,
939                                 MarkingParity parity) {
940   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
941   if (age == kNoAgeCodeAge) {
942     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
943     Assembler::FlushICache(isolate, sequence, young_length);
944   } else {
945     Code* stub = GetCodeAgeStub(isolate, age, parity);
946     CodePatcher patcher(isolate, sequence,
947                         young_length / Assembler::kInstrSize);
948     patcher.masm()->add(r0, pc, Operand(-8));
949     patcher.masm()->ldr(pc, MemOperand(pc, -4));
950     patcher.masm()->emit_code_stub_address(stub);
951   }
952 }
953 
954 
955 }  // namespace internal
956 }  // namespace v8
957 
958 #endif  // V8_TARGET_ARCH_ARM
959