1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/arm/codegen-arm.h"
6
7 #if V8_TARGET_ARCH_ARM
8
9 #include <memory>
10
11 #include "src/arm/simulator-arm.h"
12 #include "src/codegen.h"
13 #include "src/macro-assembler.h"
14
15 namespace v8 {
16 namespace internal {
17
18
19 #define __ masm.
20
21 #if defined(V8_HOST_ARCH_ARM)
CreateMemCopyUint8Function(Isolate * isolate,MemCopyUint8Function stub)22 MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
23 MemCopyUint8Function stub) {
24 #if defined(USE_SIMULATOR)
25 return stub;
26 #else
27 size_t actual_size;
28 byte* buffer =
29 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
30 if (buffer == nullptr) return stub;
31
32 MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
33 CodeObjectRequired::kNo);
34
35 Register dest = r0;
36 Register src = r1;
37 Register chars = r2;
38 Register temp1 = r3;
39 Label less_4;
40
41 if (CpuFeatures::IsSupported(NEON)) {
42 CpuFeatureScope scope(&masm, NEON);
43 Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
44 Label size_less_than_8;
45 __ pld(MemOperand(src, 0));
46
47 __ cmp(chars, Operand(8));
48 __ b(lt, &size_less_than_8);
49 __ cmp(chars, Operand(32));
50 __ b(lt, &less_32);
51 if (CpuFeatures::dcache_line_size() == 32) {
52 __ pld(MemOperand(src, 32));
53 }
54 __ cmp(chars, Operand(64));
55 __ b(lt, &less_64);
56 __ pld(MemOperand(src, 64));
57 if (CpuFeatures::dcache_line_size() == 32) {
58 __ pld(MemOperand(src, 96));
59 }
60 __ cmp(chars, Operand(128));
61 __ b(lt, &less_128);
62 __ pld(MemOperand(src, 128));
63 if (CpuFeatures::dcache_line_size() == 32) {
64 __ pld(MemOperand(src, 160));
65 }
66 __ pld(MemOperand(src, 192));
67 if (CpuFeatures::dcache_line_size() == 32) {
68 __ pld(MemOperand(src, 224));
69 }
70 __ cmp(chars, Operand(256));
71 __ b(lt, &less_256);
72 __ sub(chars, chars, Operand(256));
73
74 __ bind(&loop);
75 __ pld(MemOperand(src, 256));
76 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
77 if (CpuFeatures::dcache_line_size() == 32) {
78 __ pld(MemOperand(src, 256));
79 }
80 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
81 __ sub(chars, chars, Operand(64), SetCC);
82 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
83 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
84 __ b(ge, &loop);
85 __ add(chars, chars, Operand(256));
86
87 __ bind(&less_256);
88 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
89 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
90 __ sub(chars, chars, Operand(128));
91 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
92 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
93 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
94 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
95 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
96 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
97 __ cmp(chars, Operand(64));
98 __ b(lt, &less_64);
99
100 __ bind(&less_128);
101 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
102 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
103 __ sub(chars, chars, Operand(64));
104 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
105 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
106
107 __ bind(&less_64);
108 __ cmp(chars, Operand(32));
109 __ b(lt, &less_32);
110 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
111 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
112 __ sub(chars, chars, Operand(32));
113
114 __ bind(&less_32);
115 __ cmp(chars, Operand(16));
116 __ b(le, &_16_or_less);
117 __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
118 __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
119 __ sub(chars, chars, Operand(16));
120
121 __ bind(&_16_or_less);
122 __ cmp(chars, Operand(8));
123 __ b(le, &_8_or_less);
124 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
125 __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
126 __ sub(chars, chars, Operand(8));
127
128 // Do a last copy which may overlap with the previous copy (up to 8 bytes).
129 __ bind(&_8_or_less);
130 __ rsb(chars, chars, Operand(8));
131 __ sub(src, src, Operand(chars));
132 __ sub(dest, dest, Operand(chars));
133 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
134 __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
135
136 __ Ret();
137
138 __ bind(&size_less_than_8);
139
140 __ bic(temp1, chars, Operand(0x3), SetCC);
141 __ b(&less_4, eq);
142 __ ldr(temp1, MemOperand(src, 4, PostIndex));
143 __ str(temp1, MemOperand(dest, 4, PostIndex));
144 } else {
145 Register temp2 = ip;
146 Label loop;
147
148 __ bic(temp2, chars, Operand(0x3), SetCC);
149 __ b(&less_4, eq);
150 __ add(temp2, dest, temp2);
151
152 __ bind(&loop);
153 __ ldr(temp1, MemOperand(src, 4, PostIndex));
154 __ str(temp1, MemOperand(dest, 4, PostIndex));
155 __ cmp(dest, temp2);
156 __ b(&loop, ne);
157 }
158
159 __ bind(&less_4);
160 __ mov(chars, Operand(chars, LSL, 31), SetCC);
161 // bit0 => Z (ne), bit1 => C (cs)
162 __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
163 __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
164 __ ldrb(temp1, MemOperand(src), ne);
165 __ strb(temp1, MemOperand(dest), ne);
166 __ Ret();
167
168 CodeDesc desc;
169 masm.GetCode(&desc);
170 DCHECK(!RelocInfo::RequiresRelocation(desc));
171
172 Assembler::FlushICache(isolate, buffer, actual_size);
173 base::OS::ProtectCode(buffer, actual_size);
174 return FUNCTION_CAST<MemCopyUint8Function>(buffer);
175 #endif
176 }
177
178
179 // Convert 8 to 16. The number of character to copy must be at least 8.
CreateMemCopyUint16Uint8Function(Isolate * isolate,MemCopyUint16Uint8Function stub)180 MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
181 Isolate* isolate, MemCopyUint16Uint8Function stub) {
182 #if defined(USE_SIMULATOR)
183 return stub;
184 #else
185 size_t actual_size;
186 byte* buffer =
187 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
188 if (buffer == nullptr) return stub;
189
190 MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
191 CodeObjectRequired::kNo);
192
193 Register dest = r0;
194 Register src = r1;
195 Register chars = r2;
196 if (CpuFeatures::IsSupported(NEON)) {
197 CpuFeatureScope scope(&masm, NEON);
198 Register temp = r3;
199 Label loop;
200
201 __ bic(temp, chars, Operand(0x7));
202 __ sub(chars, chars, Operand(temp));
203 __ add(temp, dest, Operand(temp, LSL, 1));
204
205 __ bind(&loop);
206 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
207 __ vmovl(NeonU8, q0, d0);
208 __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
209 __ cmp(dest, temp);
210 __ b(&loop, ne);
211
212 // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
213 __ rsb(chars, chars, Operand(8));
214 __ sub(src, src, Operand(chars));
215 __ sub(dest, dest, Operand(chars, LSL, 1));
216 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
217 __ vmovl(NeonU8, q0, d0);
218 __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
219 __ Ret();
220 } else {
221 Register temp1 = r3;
222 Register temp2 = ip;
223 Register temp3 = lr;
224 Register temp4 = r4;
225 Label loop;
226 Label not_two;
227
228 __ Push(lr, r4);
229 __ bic(temp2, chars, Operand(0x3));
230 __ add(temp2, dest, Operand(temp2, LSL, 1));
231
232 __ bind(&loop);
233 __ ldr(temp1, MemOperand(src, 4, PostIndex));
234 __ uxtb16(temp3, temp1);
235 __ uxtb16(temp4, temp1, 8);
236 __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
237 __ str(temp1, MemOperand(dest));
238 __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
239 __ str(temp1, MemOperand(dest, 4));
240 __ add(dest, dest, Operand(8));
241 __ cmp(dest, temp2);
242 __ b(&loop, ne);
243
244 __ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
245 __ b(¬_two, cc);
246 __ ldrh(temp1, MemOperand(src, 2, PostIndex));
247 __ uxtb(temp3, temp1, 8);
248 __ mov(temp3, Operand(temp3, LSL, 16));
249 __ uxtab(temp3, temp3, temp1);
250 __ str(temp3, MemOperand(dest, 4, PostIndex));
251 __ bind(¬_two);
252 __ ldrb(temp1, MemOperand(src), ne);
253 __ strh(temp1, MemOperand(dest), ne);
254 __ Pop(pc, r4);
255 }
256
257 CodeDesc desc;
258 masm.GetCode(&desc);
259
260 Assembler::FlushICache(isolate, buffer, actual_size);
261 base::OS::ProtectCode(buffer, actual_size);
262
263 return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
264 #endif
265 }
266 #endif
267
CreateSqrtFunction(Isolate * isolate)268 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
269 #if defined(USE_SIMULATOR)
270 return nullptr;
271 #else
272 size_t actual_size;
273 byte* buffer =
274 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
275 if (buffer == nullptr) return nullptr;
276
277 MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
278 CodeObjectRequired::kNo);
279
280 __ MovFromFloatParameter(d0);
281 __ vsqrt(d0, d0);
282 __ MovToFloatResult(d0);
283 __ Ret();
284
285 CodeDesc desc;
286 masm.GetCode(&desc);
287 DCHECK(!RelocInfo::RequiresRelocation(desc));
288
289 Assembler::FlushICache(isolate, buffer, actual_size);
290 base::OS::ProtectCode(buffer, actual_size);
291 return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
292 #endif
293 }
294
295 #undef __
296
297
298 // -------------------------------------------------------------------------
299 // Platform-specific RuntimeCallHelper functions.
300
BeforeCall(MacroAssembler * masm) const301 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
302 masm->EnterFrame(StackFrame::INTERNAL);
303 DCHECK(!masm->has_frame());
304 masm->set_has_frame(true);
305 }
306
307
AfterCall(MacroAssembler * masm) const308 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
309 masm->LeaveFrame(StackFrame::INTERNAL);
310 DCHECK(masm->has_frame());
311 masm->set_has_frame(false);
312 }
313
314
315 // -------------------------------------------------------------------------
316 // Code generators
317
318 #define __ ACCESS_MASM(masm)
319
GenerateMapChangeElementsTransition(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * allocation_memento_found)320 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
321 MacroAssembler* masm,
322 Register receiver,
323 Register key,
324 Register value,
325 Register target_map,
326 AllocationSiteMode mode,
327 Label* allocation_memento_found) {
328 Register scratch_elements = r4;
329 DCHECK(!AreAliased(receiver, key, value, target_map,
330 scratch_elements));
331
332 if (mode == TRACK_ALLOCATION_SITE) {
333 DCHECK(allocation_memento_found != NULL);
334 __ JumpIfJSArrayHasAllocationMemento(
335 receiver, scratch_elements, allocation_memento_found);
336 }
337
338 // Set transitioned map.
339 __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
340 __ RecordWriteField(receiver,
341 HeapObject::kMapOffset,
342 target_map,
343 r9,
344 kLRHasNotBeenSaved,
345 kDontSaveFPRegs,
346 EMIT_REMEMBERED_SET,
347 OMIT_SMI_CHECK);
348 }
349
350
GenerateSmiToDouble(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)351 void ElementsTransitionGenerator::GenerateSmiToDouble(
352 MacroAssembler* masm,
353 Register receiver,
354 Register key,
355 Register value,
356 Register target_map,
357 AllocationSiteMode mode,
358 Label* fail) {
359 // Register lr contains the return address.
360 Label loop, entry, convert_hole, gc_required, only_change_map, done;
361 Register elements = r4;
362 Register length = r5;
363 Register array = r6;
364 Register array_end = array;
365
366 // target_map parameter can be clobbered.
367 Register scratch1 = target_map;
368 Register scratch2 = r9;
369
370 // Verify input registers don't conflict with locals.
371 DCHECK(!AreAliased(receiver, key, value, target_map,
372 elements, length, array, scratch2));
373
374 if (mode == TRACK_ALLOCATION_SITE) {
375 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
376 }
377
378 // Check for empty arrays, which only require a map transition and no changes
379 // to the backing store.
380 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
381 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
382 __ b(eq, &only_change_map);
383
384 __ push(lr);
385 __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
386 // length: number of elements (smi-tagged)
387
388 // Allocate new FixedDoubleArray.
389 // Use lr as a temporary register.
390 __ mov(lr, Operand(length, LSL, 2));
391 __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
392 __ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
393 __ sub(array, array, Operand(kHeapObjectTag));
394 // array: destination FixedDoubleArray, not tagged as heap object.
395 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
396 // r4: source FixedArray.
397
398 // Set destination FixedDoubleArray's length and map.
399 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
400 __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
401 // Update receiver's map.
402 __ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
403
404 __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
405 __ RecordWriteField(receiver,
406 HeapObject::kMapOffset,
407 target_map,
408 scratch2,
409 kLRHasBeenSaved,
410 kDontSaveFPRegs,
411 OMIT_REMEMBERED_SET,
412 OMIT_SMI_CHECK);
413 // Replace receiver's backing store with newly created FixedDoubleArray.
414 __ add(scratch1, array, Operand(kHeapObjectTag));
415 __ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
416 __ RecordWriteField(receiver,
417 JSObject::kElementsOffset,
418 scratch1,
419 scratch2,
420 kLRHasBeenSaved,
421 kDontSaveFPRegs,
422 EMIT_REMEMBERED_SET,
423 OMIT_SMI_CHECK);
424
425 // Prepare for conversion loop.
426 __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
427 __ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
428 __ add(array_end, scratch2, Operand(length, LSL, 2));
429
430 // Repurpose registers no longer in use.
431 Register hole_lower = elements;
432 Register hole_upper = length;
433
434 __ mov(hole_lower, Operand(kHoleNanLower32));
435 __ mov(hole_upper, Operand(kHoleNanUpper32));
436 // scratch1: begin of source FixedArray element fields, not tagged
437 // hole_lower: kHoleNanLower32
438 // hole_upper: kHoleNanUpper32
439 // array_end: end of destination FixedDoubleArray, not tagged
440 // scratch2: begin of FixedDoubleArray element fields, not tagged
441
442 __ b(&entry);
443
444 __ bind(&only_change_map);
445 __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
446 __ RecordWriteField(receiver,
447 HeapObject::kMapOffset,
448 target_map,
449 scratch2,
450 kLRHasNotBeenSaved,
451 kDontSaveFPRegs,
452 OMIT_REMEMBERED_SET,
453 OMIT_SMI_CHECK);
454 __ b(&done);
455
456 // Call into runtime if GC is required.
457 __ bind(&gc_required);
458 __ pop(lr);
459 __ b(fail);
460
461 // Convert and copy elements.
462 __ bind(&loop);
463 __ ldr(lr, MemOperand(scratch1, 4, PostIndex));
464 // lr: current element
465 __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
466
467 // Normal smi, convert to double and store.
468 __ vmov(s0, lr);
469 __ vcvt_f64_s32(d0, s0);
470 __ vstr(d0, scratch2, 0);
471 __ add(scratch2, scratch2, Operand(8));
472 __ b(&entry);
473
474 // Hole found, store the-hole NaN.
475 __ bind(&convert_hole);
476 if (FLAG_debug_code) {
477 // Restore a "smi-untagged" heap object.
478 __ SmiTag(lr);
479 __ orr(lr, lr, Operand(1));
480 __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
481 __ Assert(eq, kObjectFoundInSmiOnlyArray);
482 }
483 __ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
484
485 __ bind(&entry);
486 __ cmp(scratch2, array_end);
487 __ b(lt, &loop);
488
489 __ pop(lr);
490 __ bind(&done);
491 }
492
493
GenerateDoubleToObject(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)494 void ElementsTransitionGenerator::GenerateDoubleToObject(
495 MacroAssembler* masm,
496 Register receiver,
497 Register key,
498 Register value,
499 Register target_map,
500 AllocationSiteMode mode,
501 Label* fail) {
502 // Register lr contains the return address.
503 Label entry, loop, convert_hole, gc_required, only_change_map;
504 Register elements = r4;
505 Register array = r6;
506 Register length = r5;
507 Register scratch = r9;
508
509 // Verify input registers don't conflict with locals.
510 DCHECK(!AreAliased(receiver, key, value, target_map,
511 elements, array, length, scratch));
512
513 if (mode == TRACK_ALLOCATION_SITE) {
514 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
515 }
516
517 // Check for empty arrays, which only require a map transition and no changes
518 // to the backing store.
519 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
520 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
521 __ b(eq, &only_change_map);
522
523 __ push(lr);
524 __ Push(target_map, receiver, key, value);
525 __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
526 // elements: source FixedDoubleArray
527 // length: number of elements (smi-tagged)
528
529 // Allocate new FixedArray.
530 // Re-use value and target_map registers, as they have been saved on the
531 // stack.
532 Register array_size = value;
533 Register allocate_scratch = target_map;
534 __ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
535 __ add(array_size, array_size, Operand(length, LSL, 1));
536 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
537 NO_ALLOCATION_FLAGS);
538 // array: destination FixedArray, tagged as heap object
539 // Set destination FixedDoubleArray's length and map.
540 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
541 __ str(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
542 __ str(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
543
544 __ sub(array, array, Operand(kHeapObjectTag));
545
546 // Prepare for conversion loop.
547 Register src_elements = elements;
548 Register dst_elements = target_map;
549 Register dst_end = length;
550 Register heap_number_map = scratch;
551 __ add(src_elements, elements,
552 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
553 __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
554 __ add(dst_end, dst_elements, Operand(length, LSL, 1));
555
556 // Allocating heap numbers in the loop below can fail and cause a jump to
557 // gc_required. We can't leave a partly initialized FixedArray behind,
558 // so pessimistically fill it with holes now.
559 Label initialization_loop, initialization_loop_entry;
560 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
561 __ b(&initialization_loop_entry);
562 __ bind(&initialization_loop);
563 __ str(scratch, MemOperand(dst_elements, kPointerSize, PostIndex));
564 __ bind(&initialization_loop_entry);
565 __ cmp(dst_elements, dst_end);
566 __ b(lt, &initialization_loop);
567
568 __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
569 __ add(array, array, Operand(kHeapObjectTag));
570 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
571 // Using offsetted addresses in src_elements to fully take advantage of
572 // post-indexing.
573 // dst_elements: begin of destination FixedArray element fields, not tagged
574 // src_elements: begin of source FixedDoubleArray element fields,
575 // not tagged, +4
576 // dst_end: end of destination FixedArray, not tagged
577 // array: destination FixedArray
578 // heap_number_map: heap number map
579 __ b(&entry);
580
581 // Call into runtime if GC is required.
582 __ bind(&gc_required);
583 __ Pop(target_map, receiver, key, value);
584 __ pop(lr);
585 __ b(fail);
586
587 __ bind(&loop);
588 Register upper_bits = key;
589 __ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
590 // upper_bits: current element's upper 32 bit
591 // src_elements: address of next element's upper 32 bit
592 __ cmp(upper_bits, Operand(kHoleNanUpper32));
593 __ b(eq, &convert_hole);
594
595 // Non-hole double, copy value into a heap number.
596 Register heap_number = receiver;
597 Register scratch2 = value;
598 __ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
599 &gc_required);
600 // heap_number: new heap number
601 __ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
602 __ Strd(scratch2, upper_bits,
603 FieldMemOperand(heap_number, HeapNumber::kValueOffset));
604 __ mov(scratch2, dst_elements);
605 __ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
606 __ RecordWrite(array,
607 scratch2,
608 heap_number,
609 kLRHasBeenSaved,
610 kDontSaveFPRegs,
611 EMIT_REMEMBERED_SET,
612 OMIT_SMI_CHECK);
613 __ b(&entry);
614
615 // Replace the-hole NaN with the-hole pointer.
616 __ bind(&convert_hole);
617 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
618 __ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
619
620 __ bind(&entry);
621 __ cmp(dst_elements, dst_end);
622 __ b(lt, &loop);
623
624 __ Pop(target_map, receiver, key, value);
625 // Replace receiver's backing store with newly created and filled FixedArray.
626 __ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
627 __ RecordWriteField(receiver,
628 JSObject::kElementsOffset,
629 array,
630 scratch,
631 kLRHasBeenSaved,
632 kDontSaveFPRegs,
633 EMIT_REMEMBERED_SET,
634 OMIT_SMI_CHECK);
635 __ pop(lr);
636
637 __ bind(&only_change_map);
638 // Update receiver's map.
639 __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
640 __ RecordWriteField(receiver,
641 HeapObject::kMapOffset,
642 target_map,
643 scratch,
644 kLRHasNotBeenSaved,
645 kDontSaveFPRegs,
646 OMIT_REMEMBERED_SET,
647 OMIT_SMI_CHECK);
648 }
649
650
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)651 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
652 Register string,
653 Register index,
654 Register result,
655 Label* call_runtime) {
656 // Fetch the instance type of the receiver into result register.
657 __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
658 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
659
660 // We need special handling for indirect strings.
661 Label check_sequential;
662 __ tst(result, Operand(kIsIndirectStringMask));
663 __ b(eq, &check_sequential);
664
665 // Dispatch on the indirect string shape: slice or cons.
666 Label cons_string;
667 __ tst(result, Operand(kSlicedNotConsMask));
668 __ b(eq, &cons_string);
669
670 // Handle slices.
671 Label indirect_string_loaded;
672 __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
673 __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
674 __ add(index, index, Operand::SmiUntag(result));
675 __ jmp(&indirect_string_loaded);
676
677 // Handle cons strings.
678 // Check whether the right hand side is the empty string (i.e. if
679 // this is really a flat string in a cons string). If that is not
680 // the case we would rather go to the runtime system now to flatten
681 // the string.
682 __ bind(&cons_string);
683 __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
684 __ CompareRoot(result, Heap::kempty_stringRootIndex);
685 __ b(ne, call_runtime);
686 // Get the first of the two strings and load its instance type.
687 __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
688
689 __ bind(&indirect_string_loaded);
690 __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
691 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
692
693 // Distinguish sequential and external strings. Only these two string
694 // representations can reach here (slices and flat cons strings have been
695 // reduced to the underlying sequential or external string).
696 Label external_string, check_encoding;
697 __ bind(&check_sequential);
698 STATIC_ASSERT(kSeqStringTag == 0);
699 __ tst(result, Operand(kStringRepresentationMask));
700 __ b(ne, &external_string);
701
702 // Prepare sequential strings
703 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
704 __ add(string,
705 string,
706 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
707 __ jmp(&check_encoding);
708
709 // Handle external strings.
710 __ bind(&external_string);
711 if (FLAG_debug_code) {
712 // Assert that we do not have a cons or slice (indirect strings) here.
713 // Sequential strings have already been ruled out.
714 __ tst(result, Operand(kIsIndirectStringMask));
715 __ Assert(eq, kExternalStringExpectedButNotFound);
716 }
717 // Rule out short external strings.
718 STATIC_ASSERT(kShortExternalStringTag != 0);
719 __ tst(result, Operand(kShortExternalStringMask));
720 __ b(ne, call_runtime);
721 __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
722
723 Label one_byte, done;
724 __ bind(&check_encoding);
725 STATIC_ASSERT(kTwoByteStringTag == 0);
726 __ tst(result, Operand(kStringEncodingMask));
727 __ b(ne, &one_byte);
728 // Two-byte string.
729 __ ldrh(result, MemOperand(string, index, LSL, 1));
730 __ jmp(&done);
731 __ bind(&one_byte);
732 // One-byte string.
733 __ ldrb(result, MemOperand(string, index));
734 __ bind(&done);
735 }
736
737 #undef __
738
739 #ifdef DEBUG
740 // add(r0, pc, Operand(-8))
741 static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
742 #endif
743
CodeAgingHelper(Isolate * isolate)744 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
745 USE(isolate);
746 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
747 // Since patcher is a large object, allocate it dynamically when needed,
748 // to avoid overloading the stack in stress conditions.
749 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
750 // the process, before ARM simulator ICache is setup.
751 std::unique_ptr<CodePatcher> patcher(
752 new CodePatcher(isolate, young_sequence_.start(),
753 young_sequence_.length() / Assembler::kInstrSize,
754 CodePatcher::DONT_FLUSH));
755 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
756 patcher->masm()->PushStandardFrame(r1);
757 patcher->masm()->nop(ip.code());
758 }
759
760
761 #ifdef DEBUG
IsOld(byte * candidate) const762 bool CodeAgingHelper::IsOld(byte* candidate) const {
763 return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
764 }
765 #endif
766
767
IsYoungSequence(Isolate * isolate,byte * sequence)768 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
769 bool result = isolate->code_aging_helper()->IsYoung(sequence);
770 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
771 return result;
772 }
773
774
GetCodeAgeAndParity(Isolate * isolate,byte * sequence,Age * age,MarkingParity * parity)775 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
776 MarkingParity* parity) {
777 if (IsYoungSequence(isolate, sequence)) {
778 *age = kNoAgeCodeAge;
779 *parity = NO_MARKING_PARITY;
780 } else {
781 Address target_address = Memory::Address_at(
782 sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
783 Code* stub = GetCodeFromTargetAddress(target_address);
784 GetCodeAgeAndParity(stub, age, parity);
785 }
786 }
787
788
PatchPlatformCodeAge(Isolate * isolate,byte * sequence,Code::Age age,MarkingParity parity)789 void Code::PatchPlatformCodeAge(Isolate* isolate,
790 byte* sequence,
791 Code::Age age,
792 MarkingParity parity) {
793 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
794 if (age == kNoAgeCodeAge) {
795 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
796 Assembler::FlushICache(isolate, sequence, young_length);
797 } else {
798 Code* stub = GetCodeAgeStub(isolate, age, parity);
799 CodePatcher patcher(isolate, sequence,
800 young_length / Assembler::kInstrSize);
801 patcher.masm()->add(r0, pc, Operand(-8));
802 patcher.masm()->ldr(pc, MemOperand(pc, -4));
803 patcher.masm()->emit_code_stub_address(stub);
804 }
805 }
806
807
808 } // namespace internal
809 } // namespace v8
810
811 #endif // V8_TARGET_ARCH_ARM
812