1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/ppc/codegen-ppc.h"
6
7 #if V8_TARGET_ARCH_PPC
8
9 #include <memory>
10
11 #include "src/codegen.h"
12 #include "src/macro-assembler.h"
13 #include "src/ppc/simulator-ppc.h"
14
15 namespace v8 {
16 namespace internal {
17
18
19 #define __ masm.
20
CreateSqrtFunction(Isolate * isolate)21 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
22 #if defined(USE_SIMULATOR)
23 return nullptr;
24 #else
25 size_t actual_size;
26 byte* buffer =
27 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
28 if (buffer == nullptr) return nullptr;
29
30 MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
31 CodeObjectRequired::kNo);
32
33 // Called from C
34 __ function_descriptor();
35
36 __ MovFromFloatParameter(d1);
37 __ fsqrt(d1, d1);
38 __ MovToFloatResult(d1);
39 __ Ret();
40
41 CodeDesc desc;
42 masm.GetCode(&desc);
43 DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
44
45 Assembler::FlushICache(isolate, buffer, actual_size);
46 base::OS::ProtectCode(buffer, actual_size);
47 return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
48 #endif
49 }
50
51 #undef __
52
53
54 // -------------------------------------------------------------------------
55 // Platform-specific RuntimeCallHelper functions.
56
BeforeCall(MacroAssembler * masm) const57 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
58 masm->EnterFrame(StackFrame::INTERNAL);
59 DCHECK(!masm->has_frame());
60 masm->set_has_frame(true);
61 }
62
63
AfterCall(MacroAssembler * masm) const64 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
65 masm->LeaveFrame(StackFrame::INTERNAL);
66 DCHECK(masm->has_frame());
67 masm->set_has_frame(false);
68 }
69
70
71 // -------------------------------------------------------------------------
72 // Code generators
73
74 #define __ ACCESS_MASM(masm)
75
GenerateMapChangeElementsTransition(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * allocation_memento_found)76 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
77 MacroAssembler* masm, Register receiver, Register key, Register value,
78 Register target_map, AllocationSiteMode mode,
79 Label* allocation_memento_found) {
80 Register scratch_elements = r7;
81 DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
82
83 if (mode == TRACK_ALLOCATION_SITE) {
84 DCHECK(allocation_memento_found != NULL);
85 __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r11,
86 allocation_memento_found);
87 }
88
89 // Set transitioned map.
90 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
91 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11,
92 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
93 OMIT_SMI_CHECK);
94 }
95
96
GenerateSmiToDouble(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)97 void ElementsTransitionGenerator::GenerateSmiToDouble(
98 MacroAssembler* masm, Register receiver, Register key, Register value,
99 Register target_map, AllocationSiteMode mode, Label* fail) {
100 // lr contains the return address
101 Label loop, entry, convert_hole, only_change_map, done;
102 Register elements = r7;
103 Register length = r8;
104 Register array = r9;
105 Register array_end = array;
106
107 // target_map parameter can be clobbered.
108 Register scratch1 = target_map;
109 Register scratch2 = r10;
110 Register scratch3 = r11;
111 Register scratch4 = r14;
112
113 // Verify input registers don't conflict with locals.
114 DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
115 scratch2));
116
117 if (mode == TRACK_ALLOCATION_SITE) {
118 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
119 }
120
121 // Check for empty arrays, which only require a map transition and no changes
122 // to the backing store.
123 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
124 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
125 __ beq(&only_change_map);
126
127 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
128 // length: number of elements (smi-tagged)
129
130 // Allocate new FixedDoubleArray.
131 __ SmiToDoubleArrayOffset(scratch3, length);
132 __ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
133 __ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
134 __ subi(array, array, Operand(kHeapObjectTag));
135 // array: destination FixedDoubleArray, not tagged as heap object.
136 // elements: source FixedArray.
137
138 // Set destination FixedDoubleArray's length and map.
139 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
140 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
141 // Update receiver's map.
142 __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
143
144 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
145 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
146 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
147 OMIT_SMI_CHECK);
148 // Replace receiver's backing store with newly created FixedDoubleArray.
149 __ addi(scratch1, array, Operand(kHeapObjectTag));
150 __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
151 __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
152 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
153 OMIT_SMI_CHECK);
154
155 // Prepare for conversion loop.
156 __ addi(scratch1, elements,
157 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
158 __ addi(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
159 __ SmiToDoubleArrayOffset(array_end, length);
160 __ add(array_end, scratch2, array_end);
161 // Repurpose registers no longer in use.
162 #if V8_TARGET_ARCH_PPC64
163 Register hole_int64 = elements;
164 __ mov(hole_int64, Operand(kHoleNanInt64));
165 #else
166 Register hole_lower = elements;
167 Register hole_upper = length;
168 __ mov(hole_lower, Operand(kHoleNanLower32));
169 __ mov(hole_upper, Operand(kHoleNanUpper32));
170 #endif
171 // scratch1: begin of source FixedArray element fields, not tagged
172 // hole_lower: kHoleNanLower32 OR hol_int64
173 // hole_upper: kHoleNanUpper32
174 // array_end: end of destination FixedDoubleArray, not tagged
175 // scratch2: begin of FixedDoubleArray element fields, not tagged
176
177 __ b(&entry);
178
179 __ bind(&only_change_map);
180 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
181 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
182 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
183 OMIT_SMI_CHECK);
184 __ b(&done);
185
186 // Convert and copy elements.
187 __ bind(&loop);
188 __ LoadP(scratch3, MemOperand(scratch1));
189 __ addi(scratch1, scratch1, Operand(kPointerSize));
190 // scratch3: current element
191 __ UntagAndJumpIfNotSmi(scratch3, scratch3, &convert_hole);
192
193 // Normal smi, convert to double and store.
194 __ ConvertIntToDouble(scratch3, d0);
195 __ stfd(d0, MemOperand(scratch2, 0));
196 __ addi(scratch2, scratch2, Operand(8));
197 __ b(&entry);
198
199 // Hole found, store the-hole NaN.
200 __ bind(&convert_hole);
201 if (FLAG_debug_code) {
202 __ LoadP(scratch3, MemOperand(scratch1, -kPointerSize));
203 __ CompareRoot(scratch3, Heap::kTheHoleValueRootIndex);
204 __ Assert(eq, kObjectFoundInSmiOnlyArray);
205 }
206 #if V8_TARGET_ARCH_PPC64
207 __ std(hole_int64, MemOperand(scratch2, 0));
208 #else
209 __ stw(hole_upper, MemOperand(scratch2, Register::kExponentOffset));
210 __ stw(hole_lower, MemOperand(scratch2, Register::kMantissaOffset));
211 #endif
212 __ addi(scratch2, scratch2, Operand(8));
213
214 __ bind(&entry);
215 __ cmp(scratch2, array_end);
216 __ blt(&loop);
217
218 __ bind(&done);
219 }
220
221
GenerateDoubleToObject(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)222 void ElementsTransitionGenerator::GenerateDoubleToObject(
223 MacroAssembler* masm, Register receiver, Register key, Register value,
224 Register target_map, AllocationSiteMode mode, Label* fail) {
225 // Register lr contains the return address.
226 Label loop, convert_hole, gc_required, only_change_map;
227 Register elements = r7;
228 Register array = r9;
229 Register length = r8;
230 Register scratch = r10;
231 Register scratch3 = r11;
232 Register hole_value = r14;
233
234 // Verify input registers don't conflict with locals.
235 DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
236 scratch));
237
238 if (mode == TRACK_ALLOCATION_SITE) {
239 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
240 }
241
242 // Check for empty arrays, which only require a map transition and no changes
243 // to the backing store.
244 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
245 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
246 __ beq(&only_change_map);
247
248 __ Push(target_map, receiver, key, value);
249 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
250 // elements: source FixedDoubleArray
251 // length: number of elements (smi-tagged)
252
253 // Allocate new FixedArray.
254 // Re-use value and target_map registers, as they have been saved on the
255 // stack.
256 Register array_size = value;
257 Register allocate_scratch = target_map;
258 __ li(array_size, Operand(FixedDoubleArray::kHeaderSize));
259 __ SmiToPtrArrayOffset(r0, length);
260 __ add(array_size, array_size, r0);
261 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
262 NO_ALLOCATION_FLAGS);
263 // array: destination FixedArray, tagged as heap object
264 // Set destination FixedDoubleArray's length and map.
265 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
266 __ StoreP(length, FieldMemOperand(array,
267 FixedDoubleArray::kLengthOffset), r0);
268 __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
269
270 // Prepare for conversion loop.
271 Register src_elements = elements;
272 Register dst_elements = target_map;
273 Register dst_end = length;
274 Register heap_number_map = scratch;
275 __ addi(src_elements, elements,
276 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
277 __ SmiToPtrArrayOffset(length, length);
278 __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
279
280 Label initialization_loop, loop_done;
281 __ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
282 __ beq(&loop_done, cr0);
283
284 // Allocating heap numbers in the loop below can fail and cause a jump to
285 // gc_required. We can't leave a partly initialized FixedArray behind,
286 // so pessimistically fill it with holes now.
287 __ mtctr(r0);
288 __ addi(dst_elements, array,
289 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
290 __ bind(&initialization_loop);
291 __ StorePU(hole_value, MemOperand(dst_elements, kPointerSize));
292 __ bdnz(&initialization_loop);
293
294 __ addi(dst_elements, array,
295 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
296 __ add(dst_end, dst_elements, length);
297 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
298 // Using offsetted addresses in src_elements to fully take advantage of
299 // post-indexing.
300 // dst_elements: begin of destination FixedArray element fields, not tagged
301 // src_elements: begin of source FixedDoubleArray element fields,
302 // not tagged, +4
303 // dst_end: end of destination FixedArray, not tagged
304 // array: destination FixedArray
305 // hole_value: the-hole pointer
306 // heap_number_map: heap number map
307 __ b(&loop);
308
309 // Call into runtime if GC is required.
310 __ bind(&gc_required);
311 __ Pop(target_map, receiver, key, value);
312 __ b(fail);
313
314 // Replace the-hole NaN with the-hole pointer.
315 __ bind(&convert_hole);
316 __ StoreP(hole_value, MemOperand(dst_elements));
317 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
318 __ cmpl(dst_elements, dst_end);
319 __ bge(&loop_done);
320
321 __ bind(&loop);
322 Register upper_bits = key;
323 __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
324 __ addi(src_elements, src_elements, Operand(kDoubleSize));
325 // upper_bits: current element's upper 32 bit
326 // src_elements: address of next element's upper 32 bit
327 __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
328 __ beq(&convert_hole);
329
330 // Non-hole double, copy value into a heap number.
331 Register heap_number = receiver;
332 Register scratch2 = value;
333 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
334 &gc_required);
335 // heap_number: new heap number
336 #if V8_TARGET_ARCH_PPC64
337 __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
338 // subtract tag for std
339 __ addi(upper_bits, heap_number, Operand(-kHeapObjectTag));
340 __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
341 #else
342 __ lwz(scratch2,
343 MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
344 __ lwz(upper_bits,
345 MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
346 __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
347 __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
348 #endif
349 __ mr(scratch2, dst_elements);
350 __ StoreP(heap_number, MemOperand(dst_elements));
351 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
352 __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
353 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
354 __ cmpl(dst_elements, dst_end);
355 __ blt(&loop);
356 __ bind(&loop_done);
357
358 __ Pop(target_map, receiver, key, value);
359 // Replace receiver's backing store with newly created and filled FixedArray.
360 __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
361 __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
362 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
363 OMIT_SMI_CHECK);
364
365 __ bind(&only_change_map);
366 // Update receiver's map.
367 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
368 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
369 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
370 OMIT_SMI_CHECK);
371 }
372
373
374 // assume ip can be used as a scratch register below
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)375 void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
376 Register index, Register result,
377 Label* call_runtime) {
378 // Fetch the instance type of the receiver into result register.
379 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
380 __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
381
382 // We need special handling for indirect strings.
383 Label check_sequential;
384 __ andi(r0, result, Operand(kIsIndirectStringMask));
385 __ beq(&check_sequential, cr0);
386
387 // Dispatch on the indirect string shape: slice or cons.
388 Label cons_string;
389 __ mov(ip, Operand(kSlicedNotConsMask));
390 __ and_(r0, result, ip, SetRC);
391 __ beq(&cons_string, cr0);
392
393 // Handle slices.
394 Label indirect_string_loaded;
395 __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
396 __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
397 __ SmiUntag(ip, result);
398 __ add(index, index, ip);
399 __ b(&indirect_string_loaded);
400
401 // Handle cons strings.
402 // Check whether the right hand side is the empty string (i.e. if
403 // this is really a flat string in a cons string). If that is not
404 // the case we would rather go to the runtime system now to flatten
405 // the string.
406 __ bind(&cons_string);
407 __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
408 __ CompareRoot(result, Heap::kempty_stringRootIndex);
409 __ bne(call_runtime);
410 // Get the first of the two strings and load its instance type.
411 __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
412
413 __ bind(&indirect_string_loaded);
414 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
415 __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
416
417 // Distinguish sequential and external strings. Only these two string
418 // representations can reach here (slices and flat cons strings have been
419 // reduced to the underlying sequential or external string).
420 Label external_string, check_encoding;
421 __ bind(&check_sequential);
422 STATIC_ASSERT(kSeqStringTag == 0);
423 __ andi(r0, result, Operand(kStringRepresentationMask));
424 __ bne(&external_string, cr0);
425
426 // Prepare sequential strings
427 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
428 __ addi(string, string,
429 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
430 __ b(&check_encoding);
431
432 // Handle external strings.
433 __ bind(&external_string);
434 if (FLAG_debug_code) {
435 // Assert that we do not have a cons or slice (indirect strings) here.
436 // Sequential strings have already been ruled out.
437 __ andi(r0, result, Operand(kIsIndirectStringMask));
438 __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
439 }
440 // Rule out short external strings.
441 STATIC_ASSERT(kShortExternalStringTag != 0);
442 __ andi(r0, result, Operand(kShortExternalStringMask));
443 __ bne(call_runtime, cr0);
444 __ LoadP(string,
445 FieldMemOperand(string, ExternalString::kResourceDataOffset));
446
447 Label one_byte, done;
448 __ bind(&check_encoding);
449 STATIC_ASSERT(kTwoByteStringTag == 0);
450 __ andi(r0, result, Operand(kStringEncodingMask));
451 __ bne(&one_byte, cr0);
452 // Two-byte string.
453 __ ShiftLeftImm(result, index, Operand(1));
454 __ lhzx(result, MemOperand(string, result));
455 __ b(&done);
456 __ bind(&one_byte);
457 // One-byte string.
458 __ lbzx(result, MemOperand(string, index));
459 __ bind(&done);
460 }
461
462 #undef __
463
CodeAgingHelper(Isolate * isolate)464 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
465 USE(isolate);
466 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
467 // Since patcher is a large object, allocate it dynamically when needed,
468 // to avoid overloading the stack in stress conditions.
469 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
470 // the process, before ARM simulator ICache is setup.
471 std::unique_ptr<CodePatcher> patcher(
472 new CodePatcher(isolate, young_sequence_.start(),
473 young_sequence_.length() / Assembler::kInstrSize,
474 CodePatcher::DONT_FLUSH));
475 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
476 patcher->masm()->PushStandardFrame(r4);
477 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
478 patcher->masm()->nop();
479 }
480 }
481
482
483 #ifdef DEBUG
IsOld(byte * candidate) const484 bool CodeAgingHelper::IsOld(byte* candidate) const {
485 return Assembler::IsNop(Assembler::instr_at(candidate));
486 }
487 #endif
488
489
IsYoungSequence(Isolate * isolate,byte * sequence)490 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
491 bool result = isolate->code_aging_helper()->IsYoung(sequence);
492 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
493 return result;
494 }
495
496
GetCodeAgeAndParity(Isolate * isolate,byte * sequence,Age * age,MarkingParity * parity)497 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
498 MarkingParity* parity) {
499 if (IsYoungSequence(isolate, sequence)) {
500 *age = kNoAgeCodeAge;
501 *parity = NO_MARKING_PARITY;
502 } else {
503 Code* code = NULL;
504 Address target_address =
505 Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
506 Code* stub = GetCodeFromTargetAddress(target_address);
507 GetCodeAgeAndParity(stub, age, parity);
508 }
509 }
510
511
PatchPlatformCodeAge(Isolate * isolate,byte * sequence,Code::Age age,MarkingParity parity)512 void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
513 MarkingParity parity) {
514 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
515 if (age == kNoAgeCodeAge) {
516 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
517 Assembler::FlushICache(isolate, sequence, young_length);
518 } else {
519 // FIXED_SEQUENCE
520 Code* stub = GetCodeAgeStub(isolate, age, parity);
521 CodePatcher patcher(isolate, sequence,
522 young_length / Assembler::kInstrSize);
523 Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
524 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
525 // Don't use Call -- we need to preserve ip and lr.
526 // GenerateMakeCodeYoungAgainCommon for the stub code.
527 patcher.masm()->nop(); // marker to detect sequence (see IsOld)
528 patcher.masm()->mov(r3, Operand(target));
529 patcher.masm()->Jump(r3);
530 for (int i = 0; i < kCodeAgingSequenceNops; i++) {
531 patcher.masm()->nop();
532 }
533 }
534 }
535 } // namespace internal
536 } // namespace v8
537
538 #endif // V8_TARGET_ARCH_PPC
539