1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <assert.h> // For assert
6 #include <limits.h> // For LONG_MIN, LONG_MAX.
7
8 #if V8_TARGET_ARCH_PPC
9
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/debug/debug.h"
15 #include "src/register-configuration.h"
16 #include "src/runtime/runtime.h"
17
18 #include "src/ppc/macro-assembler-ppc.h"
19
20 namespace v8 {
21 namespace internal {
22
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
24 CodeObjectRequired create_code_object)
25 : Assembler(arg_isolate, buffer, size),
26 generating_stub_(false),
27 has_frame_(false) {
28 if (create_code_object == CodeObjectRequired::kYes) {
29 code_object_ =
30 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
31 }
32 }
33
34
Jump(Register target)35 void MacroAssembler::Jump(Register target) {
36 mtctr(target);
37 bctr();
38 }
39
40
JumpToJSEntry(Register target)41 void MacroAssembler::JumpToJSEntry(Register target) {
42 Move(ip, target);
43 Jump(ip);
44 }
45
46
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,CRegister cr)47 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
48 Condition cond, CRegister cr) {
49 Label skip;
50
51 if (cond != al) b(NegateCondition(cond), &skip, cr);
52
53 DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
54
55 mov(ip, Operand(target, rmode));
56 mtctr(ip);
57 bctr();
58
59 bind(&skip);
60 }
61
62
Jump(Address target,RelocInfo::Mode rmode,Condition cond,CRegister cr)63 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
64 CRegister cr) {
65 DCHECK(!RelocInfo::IsCodeTarget(rmode));
66 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
67 }
68
69
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)70 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
71 Condition cond) {
72 DCHECK(RelocInfo::IsCodeTarget(rmode));
73 // 'code' is always generated ppc code, never THUMB code
74 AllowDeferredHandleDereference embedding_raw_address;
75 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
76 }
77
78
CallSize(Register target)79 int MacroAssembler::CallSize(Register target) { return 2 * kInstrSize; }
80
81
Call(Register target)82 void MacroAssembler::Call(Register target) {
83 BlockTrampolinePoolScope block_trampoline_pool(this);
84 Label start;
85 bind(&start);
86
87 // Statement positions are expected to be recorded when the target
88 // address is loaded.
89 positions_recorder()->WriteRecordedPositions();
90
91 // branch via link register and set LK bit for return point
92 mtctr(target);
93 bctrl();
94
95 DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
96 }
97
98
CallJSEntry(Register target)99 void MacroAssembler::CallJSEntry(Register target) {
100 DCHECK(target.is(ip));
101 Call(target);
102 }
103
104
CallSize(Address target,RelocInfo::Mode rmode,Condition cond)105 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
106 Condition cond) {
107 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
108 return (2 + instructions_required_for_mov(ip, mov_operand)) * kInstrSize;
109 }
110
111
CallSizeNotPredictableCodeSize(Address target,RelocInfo::Mode rmode,Condition cond)112 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
113 RelocInfo::Mode rmode,
114 Condition cond) {
115 return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
116 }
117
118
Call(Address target,RelocInfo::Mode rmode,Condition cond)119 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
120 Condition cond) {
121 BlockTrampolinePoolScope block_trampoline_pool(this);
122 DCHECK(cond == al);
123
124 #ifdef DEBUG
125 // Check the expected size before generating code to ensure we assume the same
126 // constant pool availability (e.g., whether constant pool is full or not).
127 int expected_size = CallSize(target, rmode, cond);
128 Label start;
129 bind(&start);
130 #endif
131
132 // Statement positions are expected to be recorded when the target
133 // address is loaded.
134 positions_recorder()->WriteRecordedPositions();
135
136 // This can likely be optimized to make use of bc() with 24bit relative
137 //
138 // RecordRelocInfo(x.rmode_, x.imm_);
139 // bc( BA, .... offset, LKset);
140 //
141
142 mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
143 mtctr(ip);
144 bctrl();
145
146 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
147 }
148
149
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)150 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
151 TypeFeedbackId ast_id, Condition cond) {
152 AllowDeferredHandleDereference using_raw_address;
153 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
154 }
155
156
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)157 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
158 TypeFeedbackId ast_id, Condition cond) {
159 BlockTrampolinePoolScope block_trampoline_pool(this);
160 DCHECK(RelocInfo::IsCodeTarget(rmode));
161
162 #ifdef DEBUG
163 // Check the expected size before generating code to ensure we assume the same
164 // constant pool availability (e.g., whether constant pool is full or not).
165 int expected_size = CallSize(code, rmode, ast_id, cond);
166 Label start;
167 bind(&start);
168 #endif
169
170 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
171 SetRecordedAstId(ast_id);
172 rmode = RelocInfo::CODE_TARGET_WITH_ID;
173 }
174 AllowDeferredHandleDereference using_raw_address;
175 Call(reinterpret_cast<Address>(code.location()), rmode, cond);
176 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
177 }
178
179
Drop(int count)180 void MacroAssembler::Drop(int count) {
181 if (count > 0) {
182 Add(sp, sp, count * kPointerSize, r0);
183 }
184 }
185
186
Call(Label * target)187 void MacroAssembler::Call(Label* target) { b(target, SetLK); }
188
189
Push(Handle<Object> handle)190 void MacroAssembler::Push(Handle<Object> handle) {
191 mov(r0, Operand(handle));
192 push(r0);
193 }
194
195
Move(Register dst,Handle<Object> value)196 void MacroAssembler::Move(Register dst, Handle<Object> value) {
197 AllowDeferredHandleDereference smi_check;
198 if (value->IsSmi()) {
199 LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
200 } else {
201 DCHECK(value->IsHeapObject());
202 if (isolate()->heap()->InNewSpace(*value)) {
203 Handle<Cell> cell = isolate()->factory()->NewCell(value);
204 mov(dst, Operand(cell));
205 LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
206 } else {
207 mov(dst, Operand(value));
208 }
209 }
210 }
211
212
Move(Register dst,Register src,Condition cond)213 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
214 DCHECK(cond == al);
215 if (!dst.is(src)) {
216 mr(dst, src);
217 }
218 }
219
220
Move(DoubleRegister dst,DoubleRegister src)221 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
222 if (!dst.is(src)) {
223 fmr(dst, src);
224 }
225 }
226
227
MultiPush(RegList regs,Register location)228 void MacroAssembler::MultiPush(RegList regs, Register location) {
229 int16_t num_to_push = NumberOfBitsSet(regs);
230 int16_t stack_offset = num_to_push * kPointerSize;
231
232 subi(location, location, Operand(stack_offset));
233 for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
234 if ((regs & (1 << i)) != 0) {
235 stack_offset -= kPointerSize;
236 StoreP(ToRegister(i), MemOperand(location, stack_offset));
237 }
238 }
239 }
240
241
MultiPop(RegList regs,Register location)242 void MacroAssembler::MultiPop(RegList regs, Register location) {
243 int16_t stack_offset = 0;
244
245 for (int16_t i = 0; i < Register::kNumRegisters; i++) {
246 if ((regs & (1 << i)) != 0) {
247 LoadP(ToRegister(i), MemOperand(location, stack_offset));
248 stack_offset += kPointerSize;
249 }
250 }
251 addi(location, location, Operand(stack_offset));
252 }
253
254
MultiPushDoubles(RegList dregs,Register location)255 void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
256 int16_t num_to_push = NumberOfBitsSet(dregs);
257 int16_t stack_offset = num_to_push * kDoubleSize;
258
259 subi(location, location, Operand(stack_offset));
260 for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
261 if ((dregs & (1 << i)) != 0) {
262 DoubleRegister dreg = DoubleRegister::from_code(i);
263 stack_offset -= kDoubleSize;
264 stfd(dreg, MemOperand(location, stack_offset));
265 }
266 }
267 }
268
269
MultiPopDoubles(RegList dregs,Register location)270 void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
271 int16_t stack_offset = 0;
272
273 for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
274 if ((dregs & (1 << i)) != 0) {
275 DoubleRegister dreg = DoubleRegister::from_code(i);
276 lfd(dreg, MemOperand(location, stack_offset));
277 stack_offset += kDoubleSize;
278 }
279 }
280 addi(location, location, Operand(stack_offset));
281 }
282
283
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond)284 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
285 Condition cond) {
286 DCHECK(cond == al);
287 LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
288 }
289
290
StoreRoot(Register source,Heap::RootListIndex index,Condition cond)291 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
292 Condition cond) {
293 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
294 DCHECK(cond == al);
295 StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
296 }
297
298
InNewSpace(Register object,Register scratch,Condition cond,Label * branch)299 void MacroAssembler::InNewSpace(Register object, Register scratch,
300 Condition cond, Label* branch) {
301 // N.B. scratch may be same register as object
302 DCHECK(cond == eq || cond == ne);
303 mov(r0, Operand(ExternalReference::new_space_mask(isolate())));
304 and_(scratch, object, r0);
305 mov(r0, Operand(ExternalReference::new_space_start(isolate())));
306 cmp(scratch, r0);
307 b(cond, branch);
308 }
309
310
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)311 void MacroAssembler::RecordWriteField(
312 Register object, int offset, Register value, Register dst,
313 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
314 RememberedSetAction remembered_set_action, SmiCheck smi_check,
315 PointersToHereCheck pointers_to_here_check_for_value) {
316 // First, check if a write barrier is even needed. The tests below
317 // catch stores of Smis.
318 Label done;
319
320 // Skip barrier if writing a smi.
321 if (smi_check == INLINE_SMI_CHECK) {
322 JumpIfSmi(value, &done);
323 }
324
325 // Although the object register is tagged, the offset is relative to the start
326 // of the object, so so offset must be a multiple of kPointerSize.
327 DCHECK(IsAligned(offset, kPointerSize));
328
329 Add(dst, object, offset - kHeapObjectTag, r0);
330 if (emit_debug_code()) {
331 Label ok;
332 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
333 beq(&ok, cr0);
334 stop("Unaligned cell in write barrier");
335 bind(&ok);
336 }
337
338 RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
339 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
340
341 bind(&done);
342
343 // Clobber clobbered input registers when running with the debug-code flag
344 // turned on to provoke errors.
345 if (emit_debug_code()) {
346 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
347 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
348 }
349 }
350
351
352 // Will clobber 4 registers: object, map, dst, ip. The
353 // register 'object' contains a heap object pointer.
RecordWriteForMap(Register object,Register map,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode)354 void MacroAssembler::RecordWriteForMap(Register object, Register map,
355 Register dst,
356 LinkRegisterStatus lr_status,
357 SaveFPRegsMode fp_mode) {
358 if (emit_debug_code()) {
359 LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
360 Cmpi(dst, Operand(isolate()->factory()->meta_map()), r0);
361 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
362 }
363
364 if (!FLAG_incremental_marking) {
365 return;
366 }
367
368 if (emit_debug_code()) {
369 LoadP(ip, FieldMemOperand(object, HeapObject::kMapOffset));
370 cmp(ip, map);
371 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
372 }
373
374 Label done;
375
376 // A single check of the map's pages interesting flag suffices, since it is
377 // only set during incremental collection, and then it's also guaranteed that
378 // the from object's page's interesting flag is also set. This optimization
379 // relies on the fact that maps can never be in new space.
380 CheckPageFlag(map,
381 map, // Used as scratch.
382 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
383
384 addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
385 if (emit_debug_code()) {
386 Label ok;
387 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
388 beq(&ok, cr0);
389 stop("Unaligned cell in write barrier");
390 bind(&ok);
391 }
392
393 // Record the actual write.
394 if (lr_status == kLRHasNotBeenSaved) {
395 mflr(r0);
396 push(r0);
397 }
398 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
399 fp_mode);
400 CallStub(&stub);
401 if (lr_status == kLRHasNotBeenSaved) {
402 pop(r0);
403 mtlr(r0);
404 }
405
406 bind(&done);
407
408 // Count number of write barriers in generated code.
409 isolate()->counters()->write_barriers_static()->Increment();
410 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
411
412 // Clobber clobbered registers when running with the debug-code flag
413 // turned on to provoke errors.
414 if (emit_debug_code()) {
415 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
416 mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
417 }
418 }
419
420
421 // Will clobber 4 registers: object, address, scratch, ip. The
422 // register 'object' contains a heap object pointer. The heap object
423 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)424 void MacroAssembler::RecordWrite(
425 Register object, Register address, Register value,
426 LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
427 RememberedSetAction remembered_set_action, SmiCheck smi_check,
428 PointersToHereCheck pointers_to_here_check_for_value) {
429 DCHECK(!object.is(value));
430 if (emit_debug_code()) {
431 LoadP(r0, MemOperand(address));
432 cmp(r0, value);
433 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
434 }
435
436 if (remembered_set_action == OMIT_REMEMBERED_SET &&
437 !FLAG_incremental_marking) {
438 return;
439 }
440
441 // First, check if a write barrier is even needed. The tests below
442 // catch stores of smis and stores into the young generation.
443 Label done;
444
445 if (smi_check == INLINE_SMI_CHECK) {
446 JumpIfSmi(value, &done);
447 }
448
449 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
450 CheckPageFlag(value,
451 value, // Used as scratch.
452 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
453 }
454 CheckPageFlag(object,
455 value, // Used as scratch.
456 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
457
458 // Record the actual write.
459 if (lr_status == kLRHasNotBeenSaved) {
460 mflr(r0);
461 push(r0);
462 }
463 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
464 fp_mode);
465 CallStub(&stub);
466 if (lr_status == kLRHasNotBeenSaved) {
467 pop(r0);
468 mtlr(r0);
469 }
470
471 bind(&done);
472
473 // Count number of write barriers in generated code.
474 isolate()->counters()->write_barriers_static()->Increment();
475 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
476 value);
477
478 // Clobber clobbered registers when running with the debug-code flag
479 // turned on to provoke errors.
480 if (emit_debug_code()) {
481 mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
482 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
483 }
484 }
485
486
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)487 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
488 Register address, Register scratch,
489 SaveFPRegsMode fp_mode,
490 RememberedSetFinalAction and_then) {
491 Label done;
492 if (emit_debug_code()) {
493 Label ok;
494 JumpIfNotInNewSpace(object, scratch, &ok);
495 stop("Remembered set pointer is in new space");
496 bind(&ok);
497 }
498 // Load store buffer top.
499 ExternalReference store_buffer =
500 ExternalReference::store_buffer_top(isolate());
501 mov(ip, Operand(store_buffer));
502 LoadP(scratch, MemOperand(ip));
503 // Store pointer to buffer and increment buffer top.
504 StoreP(address, MemOperand(scratch));
505 addi(scratch, scratch, Operand(kPointerSize));
506 // Write back new top of buffer.
507 StoreP(scratch, MemOperand(ip));
508 // Call stub on end of buffer.
509 // Check for end of buffer.
510 mov(r0, Operand(StoreBuffer::kStoreBufferOverflowBit));
511 and_(r0, scratch, r0, SetRC);
512
513 if (and_then == kFallThroughAtEnd) {
514 beq(&done, cr0);
515 } else {
516 DCHECK(and_then == kReturnAtEnd);
517 Ret(eq, cr0);
518 }
519 mflr(r0);
520 push(r0);
521 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
522 CallStub(&store_buffer_overflow);
523 pop(r0);
524 mtlr(r0);
525 bind(&done);
526 if (and_then == kReturnAtEnd) {
527 Ret();
528 }
529 }
530
531
PushFixedFrame(Register marker_reg)532 void MacroAssembler::PushFixedFrame(Register marker_reg) {
533 mflr(r0);
534 if (FLAG_enable_embedded_constant_pool) {
535 if (marker_reg.is_valid()) {
536 Push(r0, fp, kConstantPoolRegister, cp, marker_reg);
537 } else {
538 Push(r0, fp, kConstantPoolRegister, cp);
539 }
540 } else {
541 if (marker_reg.is_valid()) {
542 Push(r0, fp, cp, marker_reg);
543 } else {
544 Push(r0, fp, cp);
545 }
546 }
547 }
548
549
PopFixedFrame(Register marker_reg)550 void MacroAssembler::PopFixedFrame(Register marker_reg) {
551 if (FLAG_enable_embedded_constant_pool) {
552 if (marker_reg.is_valid()) {
553 Pop(r0, fp, kConstantPoolRegister, cp, marker_reg);
554 } else {
555 Pop(r0, fp, kConstantPoolRegister, cp);
556 }
557 } else {
558 if (marker_reg.is_valid()) {
559 Pop(r0, fp, cp, marker_reg);
560 } else {
561 Pop(r0, fp, cp);
562 }
563 }
564 mtlr(r0);
565 }
566
567
568 const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
569 const int MacroAssembler::kNumSafepointSavedRegisters =
570 Register::kNumAllocatable;
571
572 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()573 void MacroAssembler::PushSafepointRegisters() {
574 // Safepoints expect a block of kNumSafepointRegisters values on the
575 // stack, so adjust the stack for unsaved registers.
576 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
577 DCHECK(num_unsaved >= 0);
578 if (num_unsaved > 0) {
579 subi(sp, sp, Operand(num_unsaved * kPointerSize));
580 }
581 MultiPush(kSafepointSavedRegisters);
582 }
583
584
PopSafepointRegisters()585 void MacroAssembler::PopSafepointRegisters() {
586 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
587 MultiPop(kSafepointSavedRegisters);
588 if (num_unsaved > 0) {
589 addi(sp, sp, Operand(num_unsaved * kPointerSize));
590 }
591 }
592
593
StoreToSafepointRegisterSlot(Register src,Register dst)594 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
595 StoreP(src, SafepointRegisterSlot(dst));
596 }
597
598
LoadFromSafepointRegisterSlot(Register dst,Register src)599 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
600 LoadP(dst, SafepointRegisterSlot(src));
601 }
602
603
SafepointRegisterStackIndex(int reg_code)604 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
605 // The registers are pushed starting with the highest encoding,
606 // which means that lowest encodings are closest to the stack pointer.
607 RegList regs = kSafepointSavedRegisters;
608 int index = 0;
609
610 DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
611
612 for (int16_t i = 0; i < reg_code; i++) {
613 if ((regs & (1 << i)) != 0) {
614 index++;
615 }
616 }
617
618 return index;
619 }
620
621
SafepointRegisterSlot(Register reg)622 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
623 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
624 }
625
626
SafepointRegistersAndDoublesSlot(Register reg)627 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
628 // General purpose registers are pushed last on the stack.
629 const RegisterConfiguration* config =
630 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
631 int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
632 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
633 return MemOperand(sp, doubles_size + register_offset);
634 }
635
636
CanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)637 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
638 const DoubleRegister src) {
639 // Turn potential sNaN into qNaN.
640 fsub(dst, src, kDoubleRegZero);
641 }
642
643
ConvertIntToDouble(Register src,DoubleRegister double_dst)644 void MacroAssembler::ConvertIntToDouble(Register src,
645 DoubleRegister double_dst) {
646 MovIntToDouble(double_dst, src, r0);
647 fcfid(double_dst, double_dst);
648 }
649
650
ConvertUnsignedIntToDouble(Register src,DoubleRegister double_dst)651 void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
652 DoubleRegister double_dst) {
653 MovUnsignedIntToDouble(double_dst, src, r0);
654 fcfid(double_dst, double_dst);
655 }
656
657
ConvertIntToFloat(const DoubleRegister dst,const Register src,const Register int_scratch)658 void MacroAssembler::ConvertIntToFloat(const DoubleRegister dst,
659 const Register src,
660 const Register int_scratch) {
661 MovIntToDouble(dst, src, int_scratch);
662 fcfids(dst, dst);
663 }
664
665
666 #if V8_TARGET_ARCH_PPC64
ConvertInt64ToDouble(Register src,DoubleRegister double_dst)667 void MacroAssembler::ConvertInt64ToDouble(Register src,
668 DoubleRegister double_dst) {
669 MovInt64ToDouble(double_dst, src);
670 fcfid(double_dst, double_dst);
671 }
672
673
ConvertUnsignedInt64ToFloat(Register src,DoubleRegister double_dst)674 void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
675 DoubleRegister double_dst) {
676 MovInt64ToDouble(double_dst, src);
677 fcfidus(double_dst, double_dst);
678 }
679
680
ConvertUnsignedInt64ToDouble(Register src,DoubleRegister double_dst)681 void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
682 DoubleRegister double_dst) {
683 MovInt64ToDouble(double_dst, src);
684 fcfidu(double_dst, double_dst);
685 }
686
687
ConvertInt64ToFloat(Register src,DoubleRegister double_dst)688 void MacroAssembler::ConvertInt64ToFloat(Register src,
689 DoubleRegister double_dst) {
690 MovInt64ToDouble(double_dst, src);
691 fcfids(double_dst, double_dst);
692 }
693 #endif
694
695
ConvertDoubleToInt64(const DoubleRegister double_input,const Register dst_hi,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)696 void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
697 #if !V8_TARGET_ARCH_PPC64
698 const Register dst_hi,
699 #endif
700 const Register dst,
701 const DoubleRegister double_dst,
702 FPRoundingMode rounding_mode) {
703 if (rounding_mode == kRoundToZero) {
704 fctidz(double_dst, double_input);
705 } else {
706 SetRoundingMode(rounding_mode);
707 fctid(double_dst, double_input);
708 ResetRoundingMode();
709 }
710
711 MovDoubleToInt64(
712 #if !V8_TARGET_ARCH_PPC64
713 dst_hi,
714 #endif
715 dst, double_dst);
716 }
717
718 #if V8_TARGET_ARCH_PPC64
ConvertDoubleToUnsignedInt64(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)719 void MacroAssembler::ConvertDoubleToUnsignedInt64(
720 const DoubleRegister double_input, const Register dst,
721 const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
722 if (rounding_mode == kRoundToZero) {
723 fctiduz(double_dst, double_input);
724 } else {
725 SetRoundingMode(rounding_mode);
726 fctidu(double_dst, double_input);
727 ResetRoundingMode();
728 }
729
730 MovDoubleToInt64(dst, double_dst);
731 }
732 #endif
733
734
LoadConstantPoolPointerRegisterFromCodeTargetAddress(Register code_target_address)735 void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
736 Register code_target_address) {
737 lwz(kConstantPoolRegister,
738 MemOperand(code_target_address,
739 Code::kConstantPoolOffset - Code::kHeaderSize));
740 add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
741 }
742
743
LoadConstantPoolPointerRegister(Register base,int code_start_delta)744 void MacroAssembler::LoadConstantPoolPointerRegister(Register base,
745 int code_start_delta) {
746 add_label_offset(kConstantPoolRegister, base, ConstantPoolPosition(),
747 code_start_delta);
748 }
749
750
LoadConstantPoolPointerRegister()751 void MacroAssembler::LoadConstantPoolPointerRegister() {
752 mov_label_addr(kConstantPoolRegister, ConstantPoolPosition());
753 }
754
755
StubPrologue(Register base,int prologue_offset)756 void MacroAssembler::StubPrologue(Register base, int prologue_offset) {
757 LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB));
758 PushFixedFrame(r11);
759 // Adjust FP to point to saved FP.
760 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
761 if (FLAG_enable_embedded_constant_pool) {
762 if (!base.is(no_reg)) {
763 // base contains prologue address
764 LoadConstantPoolPointerRegister(base, -prologue_offset);
765 } else {
766 LoadConstantPoolPointerRegister();
767 }
768 set_constant_pool_available(true);
769 }
770 }
771
772
Prologue(bool code_pre_aging,Register base,int prologue_offset)773 void MacroAssembler::Prologue(bool code_pre_aging, Register base,
774 int prologue_offset) {
775 DCHECK(!base.is(no_reg));
776 {
777 PredictableCodeSizeScope predictible_code_size_scope(
778 this, kNoCodeAgeSequenceLength);
779 Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
780 // The following instructions must remain together and unmodified
781 // for code aging to work properly.
782 if (code_pre_aging) {
783 // Pre-age the code.
784 // This matches the code found in PatchPlatformCodeAge()
785 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
786 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
787 // Don't use Call -- we need to preserve ip and lr
788 nop(); // marker to detect sequence (see IsOld)
789 mov(r3, Operand(target));
790 Jump(r3);
791 for (int i = 0; i < kCodeAgingSequenceNops; i++) {
792 nop();
793 }
794 } else {
795 // This matches the code found in GetNoCodeAgeSequence()
796 PushFixedFrame(r4);
797 // Adjust fp to point to saved fp.
798 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
799 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
800 nop();
801 }
802 }
803 }
804 if (FLAG_enable_embedded_constant_pool) {
805 // base contains prologue address
806 LoadConstantPoolPointerRegister(base, -prologue_offset);
807 set_constant_pool_available(true);
808 }
809 }
810
811
EmitLoadTypeFeedbackVector(Register vector)812 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
813 LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
814 LoadP(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
815 LoadP(vector,
816 FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
817 }
818
819
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)820 void MacroAssembler::EnterFrame(StackFrame::Type type,
821 bool load_constant_pool_pointer_reg) {
822 if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
823 PushFixedFrame();
824 // This path should not rely on ip containing code entry.
825 LoadConstantPoolPointerRegister();
826 LoadSmiLiteral(ip, Smi::FromInt(type));
827 push(ip);
828 } else {
829 LoadSmiLiteral(ip, Smi::FromInt(type));
830 PushFixedFrame(ip);
831 }
832 // Adjust FP to point to saved FP.
833 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
834
835 mov(r0, Operand(CodeObject()));
836 push(r0);
837 }
838
839
LeaveFrame(StackFrame::Type type,int stack_adjustment)840 int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
841 ConstantPoolUnavailableScope constant_pool_unavailable(this);
842 // r3: preserved
843 // r4: preserved
844 // r5: preserved
845
846 // Drop the execution stack down to the frame pointer and restore
847 // the caller's state.
848 int frame_ends;
849 LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
850 LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
851 if (FLAG_enable_embedded_constant_pool) {
852 const int exitOffset = ExitFrameConstants::kConstantPoolOffset;
853 const int standardOffset = StandardFrameConstants::kConstantPoolOffset;
854 const int offset =
855 ((type == StackFrame::EXIT) ? exitOffset : standardOffset);
856 LoadP(kConstantPoolRegister, MemOperand(fp, offset));
857 }
858 mtlr(r0);
859 frame_ends = pc_offset();
860 Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
861 mr(fp, ip);
862 return frame_ends;
863 }
864
865
866 // ExitFrame layout (probably wrongish.. needs updating)
867 //
868 // SP -> previousSP
869 // LK reserved
870 // code
871 // sp_on_exit (for debug?)
872 // oldSP->prev SP
873 // LK
874 // <parameters on stack>
875
876 // Prior to calling EnterExitFrame, we've got a bunch of parameters
877 // on the stack that we need to wrap a real frame around.. so first
878 // we reserve a slot for LK and push the previous SP which is captured
879 // in the fp register (r31)
880 // Then - we buy a new frame
881
EnterExitFrame(bool save_doubles,int stack_space)882 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
883 // Set up the frame structure on the stack.
884 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
885 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
886 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
887 DCHECK(stack_space > 0);
888
889 // This is an opportunity to build a frame to wrap
890 // all of the pushes that have happened inside of V8
891 // since we were called from C code
892
893 // replicate ARM frame - TODO make this more closely follow PPC ABI
894 mflr(r0);
895 Push(r0, fp);
896 mr(fp, sp);
897 // Reserve room for saved entry sp and code object.
898 subi(sp, sp, Operand(ExitFrameConstants::kFrameSize));
899
900 if (emit_debug_code()) {
901 li(r8, Operand::Zero());
902 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
903 }
904 if (FLAG_enable_embedded_constant_pool) {
905 StoreP(kConstantPoolRegister,
906 MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
907 }
908 mov(r8, Operand(CodeObject()));
909 StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
910
911 // Save the frame pointer and the context in top.
912 mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
913 StoreP(fp, MemOperand(r8));
914 mov(r8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
915 StoreP(cp, MemOperand(r8));
916
917 // Optionally save all volatile double registers.
918 if (save_doubles) {
919 MultiPushDoubles(kCallerSavedDoubles);
920 // Note that d0 will be accessible at
921 // fp - ExitFrameConstants::kFrameSize -
922 // kNumCallerSavedDoubles * kDoubleSize,
923 // since the sp slot and code slot were pushed after the fp.
924 }
925
926 addi(sp, sp, Operand(-stack_space * kPointerSize));
927
928 // Allocate and align the frame preparing for calling the runtime
929 // function.
930 const int frame_alignment = ActivationFrameAlignment();
931 if (frame_alignment > kPointerSize) {
932 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
933 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
934 }
935 li(r0, Operand::Zero());
936 StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
937
938 // Set the exit frame sp value to point just before the return address
939 // location.
940 addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
941 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
942 }
943
944
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)945 void MacroAssembler::InitializeNewString(Register string, Register length,
946 Heap::RootListIndex map_index,
947 Register scratch1, Register scratch2) {
948 SmiTag(scratch1, length);
949 LoadRoot(scratch2, map_index);
950 StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0);
951 li(scratch1, Operand(String::kEmptyHashField));
952 StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0);
953 StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0);
954 }
955
956
ActivationFrameAlignment()957 int MacroAssembler::ActivationFrameAlignment() {
958 #if !defined(USE_SIMULATOR)
959 // Running on the real platform. Use the alignment as mandated by the local
960 // environment.
961 // Note: This will break if we ever start generating snapshots on one PPC
962 // platform for another PPC platform with a different alignment.
963 return base::OS::ActivationFrameAlignment();
964 #else // Simulated
965 // If we are using the simulator then we should always align to the expected
966 // alignment. As the simulator is used to generate snapshots we do not know
967 // if the target platform will need alignment, so this is controlled from a
968 // flag.
969 return FLAG_sim_stack_alignment;
970 #endif
971 }
972
973
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool argument_count_is_length)974 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
975 bool restore_context,
976 bool argument_count_is_length) {
977 ConstantPoolUnavailableScope constant_pool_unavailable(this);
978 // Optionally restore all double registers.
979 if (save_doubles) {
980 // Calculate the stack location of the saved doubles and restore them.
981 const int kNumRegs = kNumCallerSavedDoubles;
982 const int offset =
983 (ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize);
984 addi(r6, fp, Operand(-offset));
985 MultiPopDoubles(kCallerSavedDoubles, r6);
986 }
987
988 // Clear top frame.
989 li(r6, Operand::Zero());
990 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
991 StoreP(r6, MemOperand(ip));
992
993 // Restore current context from top and clear it in debug mode.
994 if (restore_context) {
995 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
996 LoadP(cp, MemOperand(ip));
997 }
998 #ifdef DEBUG
999 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1000 StoreP(r6, MemOperand(ip));
1001 #endif
1002
1003 // Tear down the exit frame, pop the arguments, and return.
1004 LeaveFrame(StackFrame::EXIT);
1005
1006 if (argument_count.is_valid()) {
1007 if (!argument_count_is_length) {
1008 ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
1009 }
1010 add(sp, sp, argument_count);
1011 }
1012 }
1013
1014
MovFromFloatResult(const DoubleRegister dst)1015 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
1016 Move(dst, d1);
1017 }
1018
1019
MovFromFloatParameter(const DoubleRegister dst)1020 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1021 Move(dst, d1);
1022 }
1023
1024
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)1025 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1026 const ParameterCount& actual, Label* done,
1027 bool* definitely_mismatches,
1028 InvokeFlag flag,
1029 const CallWrapper& call_wrapper) {
1030 bool definitely_matches = false;
1031 *definitely_mismatches = false;
1032 Label regular_invoke;
1033
1034 // Check whether the expected and actual arguments count match. If not,
1035 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1036 // r3: actual arguments count
1037 // r4: function (passed through to callee)
1038 // r5: expected arguments count
1039
1040 // The code below is made a lot easier because the calling code already sets
1041 // up actual and expected registers according to the contract if values are
1042 // passed in registers.
1043
1044 // ARM has some sanity checks as per below, considering add them for PPC
1045 // DCHECK(actual.is_immediate() || actual.reg().is(r3));
1046 // DCHECK(expected.is_immediate() || expected.reg().is(r5));
1047
1048 if (expected.is_immediate()) {
1049 DCHECK(actual.is_immediate());
1050 mov(r3, Operand(actual.immediate()));
1051 if (expected.immediate() == actual.immediate()) {
1052 definitely_matches = true;
1053 } else {
1054 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1055 if (expected.immediate() == sentinel) {
1056 // Don't worry about adapting arguments for builtins that
1057 // don't want that done. Skip adaption code by making it look
1058 // like we have a match between expected and actual number of
1059 // arguments.
1060 definitely_matches = true;
1061 } else {
1062 *definitely_mismatches = true;
1063 mov(r5, Operand(expected.immediate()));
1064 }
1065 }
1066 } else {
1067 if (actual.is_immediate()) {
1068 mov(r3, Operand(actual.immediate()));
1069 cmpi(expected.reg(), Operand(actual.immediate()));
1070 beq(®ular_invoke);
1071 } else {
1072 cmp(expected.reg(), actual.reg());
1073 beq(®ular_invoke);
1074 }
1075 }
1076
1077 if (!definitely_matches) {
1078 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
1079 if (flag == CALL_FUNCTION) {
1080 call_wrapper.BeforeCall(CallSize(adaptor));
1081 Call(adaptor);
1082 call_wrapper.AfterCall();
1083 if (!*definitely_mismatches) {
1084 b(done);
1085 }
1086 } else {
1087 Jump(adaptor, RelocInfo::CODE_TARGET);
1088 }
1089 bind(®ular_invoke);
1090 }
1091 }
1092
1093
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)1094 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
1095 const ParameterCount& expected,
1096 const ParameterCount& actual) {
1097 Label skip_flooding;
1098 ExternalReference step_in_enabled =
1099 ExternalReference::debug_step_in_enabled_address(isolate());
1100 mov(r7, Operand(step_in_enabled));
1101 lbz(r7, MemOperand(r7));
1102 cmpi(r7, Operand::Zero());
1103 beq(&skip_flooding);
1104 {
1105 FrameScope frame(this,
1106 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1107 if (expected.is_reg()) {
1108 SmiTag(expected.reg());
1109 Push(expected.reg());
1110 }
1111 if (actual.is_reg()) {
1112 SmiTag(actual.reg());
1113 Push(actual.reg());
1114 }
1115 if (new_target.is_valid()) {
1116 Push(new_target);
1117 }
1118 Push(fun, fun);
1119 CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
1120 Pop(fun);
1121 if (new_target.is_valid()) {
1122 Pop(new_target);
1123 }
1124 if (actual.is_reg()) {
1125 Pop(actual.reg());
1126 SmiUntag(actual.reg());
1127 }
1128 if (expected.is_reg()) {
1129 Pop(expected.reg());
1130 SmiUntag(expected.reg());
1131 }
1132 }
1133 bind(&skip_flooding);
1134 }
1135
1136
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1137 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1138 const ParameterCount& expected,
1139 const ParameterCount& actual,
1140 InvokeFlag flag,
1141 const CallWrapper& call_wrapper) {
1142 // You can't call a function without a valid frame.
1143 DCHECK(flag == JUMP_FUNCTION || has_frame());
1144 DCHECK(function.is(r4));
1145 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r6));
1146
1147 if (call_wrapper.NeedsDebugStepCheck()) {
1148 FloodFunctionIfStepping(function, new_target, expected, actual);
1149 }
1150
1151 // Clear the new.target register if not given.
1152 if (!new_target.is_valid()) {
1153 LoadRoot(r6, Heap::kUndefinedValueRootIndex);
1154 }
1155
1156 Label done;
1157 bool definitely_mismatches = false;
1158 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
1159 call_wrapper);
1160 if (!definitely_mismatches) {
1161 // We call indirectly through the code field in the function to
1162 // allow recompilation to take effect without changing any of the
1163 // call sites.
1164 Register code = ip;
1165 LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1166 if (flag == CALL_FUNCTION) {
1167 call_wrapper.BeforeCall(CallSize(code));
1168 CallJSEntry(code);
1169 call_wrapper.AfterCall();
1170 } else {
1171 DCHECK(flag == JUMP_FUNCTION);
1172 JumpToJSEntry(code);
1173 }
1174
1175 // Continue here if InvokePrologue does handle the invocation due to
1176 // mismatched parameter counts.
1177 bind(&done);
1178 }
1179 }
1180
1181
InvokeFunction(Register fun,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1182 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1183 const ParameterCount& actual,
1184 InvokeFlag flag,
1185 const CallWrapper& call_wrapper) {
1186 // You can't call a function without a valid frame.
1187 DCHECK(flag == JUMP_FUNCTION || has_frame());
1188
1189 // Contract with called JS functions requires that function is passed in r4.
1190 DCHECK(fun.is(r4));
1191
1192 Register expected_reg = r5;
1193 Register temp_reg = r7;
1194
1195 LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1196 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1197 LoadWordArith(expected_reg,
1198 FieldMemOperand(
1199 temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
1200 #if !defined(V8_TARGET_ARCH_PPC64)
1201 SmiUntag(expected_reg);
1202 #endif
1203
1204 ParameterCount expected(expected_reg);
1205 InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
1206 }
1207
1208
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1209 void MacroAssembler::InvokeFunction(Register function,
1210 const ParameterCount& expected,
1211 const ParameterCount& actual,
1212 InvokeFlag flag,
1213 const CallWrapper& call_wrapper) {
1214 // You can't call a function without a valid frame.
1215 DCHECK(flag == JUMP_FUNCTION || has_frame());
1216
1217 // Contract with called JS functions requires that function is passed in r4.
1218 DCHECK(function.is(r4));
1219
1220 // Get the function and setup the context.
1221 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1222
1223 InvokeFunctionCode(r4, no_reg, expected, actual, flag, call_wrapper);
1224 }
1225
1226
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1227 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1228 const ParameterCount& expected,
1229 const ParameterCount& actual,
1230 InvokeFlag flag,
1231 const CallWrapper& call_wrapper) {
1232 Move(r4, function);
1233 InvokeFunction(r4, expected, actual, flag, call_wrapper);
1234 }
1235
1236
IsObjectJSStringType(Register object,Register scratch,Label * fail)1237 void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
1238 Label* fail) {
1239 DCHECK(kNotStringTag != 0);
1240
1241 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1242 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1243 andi(r0, scratch, Operand(kIsNotStringMask));
1244 bne(fail, cr0);
1245 }
1246
1247
IsObjectNameType(Register object,Register scratch,Label * fail)1248 void MacroAssembler::IsObjectNameType(Register object, Register scratch,
1249 Label* fail) {
1250 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1251 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1252 cmpi(scratch, Operand(LAST_NAME_TYPE));
1253 bgt(fail);
1254 }
1255
1256
DebugBreak()1257 void MacroAssembler::DebugBreak() {
1258 li(r3, Operand::Zero());
1259 mov(r4,
1260 Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
1261 CEntryStub ces(isolate(), 1);
1262 DCHECK(AllowThisStubCall(&ces));
1263 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
1264 }
1265
1266
PushStackHandler()1267 void MacroAssembler::PushStackHandler() {
1268 // Adjust this code if not the case.
1269 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1270 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1271
1272 // Link the current handler as the next handler.
1273 // Preserve r3-r7.
1274 mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1275 LoadP(r0, MemOperand(r8));
1276 push(r0);
1277
1278 // Set this new handler as the current one.
1279 StoreP(sp, MemOperand(r8));
1280 }
1281
1282
PopStackHandler()1283 void MacroAssembler::PopStackHandler() {
1284 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1285 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1286
1287 pop(r4);
1288 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1289 StoreP(r4, MemOperand(ip));
1290 }
1291
1292
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)1293 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1294 Register scratch, Label* miss) {
1295 Label same_contexts;
1296
1297 DCHECK(!holder_reg.is(scratch));
1298 DCHECK(!holder_reg.is(ip));
1299 DCHECK(!scratch.is(ip));
1300
1301 // Load current lexical context from the stack frame.
1302 LoadP(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1303 // In debug mode, make sure the lexical context is set.
1304 #ifdef DEBUG
1305 cmpi(scratch, Operand::Zero());
1306 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1307 #endif
1308
1309 // Load the native context of the current context.
1310 LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
1311
1312 // Check the context is a native context.
1313 if (emit_debug_code()) {
1314 // Cannot use ip as a temporary in this verification code. Due to the fact
1315 // that ip is clobbered as part of cmp with an object Operand.
1316 push(holder_reg); // Temporarily save holder on the stack.
1317 // Read the first word and compare to the native_context_map.
1318 LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1319 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1320 cmp(holder_reg, ip);
1321 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1322 pop(holder_reg); // Restore holder.
1323 }
1324
1325 // Check if both contexts are the same.
1326 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1327 cmp(scratch, ip);
1328 beq(&same_contexts);
1329
1330 // Check the context is a native context.
1331 if (emit_debug_code()) {
1332 // Cannot use ip as a temporary in this verification code. Due to the fact
1333 // that ip is clobbered as part of cmp with an object Operand.
1334 push(holder_reg); // Temporarily save holder on the stack.
1335 mr(holder_reg, ip); // Move ip to its holding place.
1336 LoadRoot(ip, Heap::kNullValueRootIndex);
1337 cmp(holder_reg, ip);
1338 Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1339
1340 LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1341 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1342 cmp(holder_reg, ip);
1343 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1344 // Restore ip is not needed. ip is reloaded below.
1345 pop(holder_reg); // Restore holder.
1346 // Restore ip to holder's context.
1347 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1348 }
1349
1350 // Check that the security token in the calling global object is
1351 // compatible with the security token in the receiving global
1352 // object.
1353 int token_offset =
1354 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
1355
1356 LoadP(scratch, FieldMemOperand(scratch, token_offset));
1357 LoadP(ip, FieldMemOperand(ip, token_offset));
1358 cmp(scratch, ip);
1359 bne(miss);
1360
1361 bind(&same_contexts);
1362 }
1363
1364
1365 // Compute the hash code from the untagged key. This must be kept in sync with
1366 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1367 // code-stub-hydrogen.cc
GetNumberHash(Register t0,Register scratch)1368 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1369 // First of all we assign the hash seed to scratch.
1370 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1371 SmiUntag(scratch);
1372
1373 // Xor original key with a seed.
1374 xor_(t0, t0, scratch);
1375
1376 // Compute the hash code from the untagged key. This must be kept in sync
1377 // with ComputeIntegerHash in utils.h.
1378 //
1379 // hash = ~hash + (hash << 15);
1380 notx(scratch, t0);
1381 slwi(t0, t0, Operand(15));
1382 add(t0, scratch, t0);
1383 // hash = hash ^ (hash >> 12);
1384 srwi(scratch, t0, Operand(12));
1385 xor_(t0, t0, scratch);
1386 // hash = hash + (hash << 2);
1387 slwi(scratch, t0, Operand(2));
1388 add(t0, t0, scratch);
1389 // hash = hash ^ (hash >> 4);
1390 srwi(scratch, t0, Operand(4));
1391 xor_(t0, t0, scratch);
1392 // hash = hash * 2057;
1393 mr(r0, t0);
1394 slwi(scratch, t0, Operand(3));
1395 add(t0, t0, scratch);
1396 slwi(scratch, r0, Operand(11));
1397 add(t0, t0, scratch);
1398 // hash = hash ^ (hash >> 16);
1399 srwi(scratch, t0, Operand(16));
1400 xor_(t0, t0, scratch);
1401 // hash & 0x3fffffff
1402 ExtractBitRange(t0, t0, 29, 0);
1403 }
1404
1405
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register result,Register t0,Register t1,Register t2)1406 void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
1407 Register key, Register result,
1408 Register t0, Register t1,
1409 Register t2) {
1410 // Register use:
1411 //
1412 // elements - holds the slow-case elements of the receiver on entry.
1413 // Unchanged unless 'result' is the same register.
1414 //
1415 // key - holds the smi key on entry.
1416 // Unchanged unless 'result' is the same register.
1417 //
1418 // result - holds the result on exit if the load succeeded.
1419 // Allowed to be the same as 'key' or 'result'.
1420 // Unchanged on bailout so 'key' or 'result' can be used
1421 // in further computation.
1422 //
1423 // Scratch registers:
1424 //
1425 // t0 - holds the untagged key on entry and holds the hash once computed.
1426 //
1427 // t1 - used to hold the capacity mask of the dictionary
1428 //
1429 // t2 - used for the index into the dictionary.
1430 Label done;
1431
1432 GetNumberHash(t0, t1);
1433
1434 // Compute the capacity mask.
1435 LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1436 SmiUntag(t1);
1437 subi(t1, t1, Operand(1));
1438
1439 // Generate an unrolled loop that performs a few probes before giving up.
1440 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1441 // Use t2 for index calculations and keep the hash intact in t0.
1442 mr(t2, t0);
1443 // Compute the masked index: (hash + i + i * i) & mask.
1444 if (i > 0) {
1445 addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1446 }
1447 and_(t2, t2, t1);
1448
1449 // Scale the index by multiplying by the element size.
1450 DCHECK(SeededNumberDictionary::kEntrySize == 3);
1451 slwi(ip, t2, Operand(1));
1452 add(t2, t2, ip); // t2 = t2 * 3
1453
1454 // Check if the key is identical to the name.
1455 slwi(t2, t2, Operand(kPointerSizeLog2));
1456 add(t2, elements, t2);
1457 LoadP(ip,
1458 FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1459 cmp(key, ip);
1460 if (i != kNumberDictionaryProbes - 1) {
1461 beq(&done);
1462 } else {
1463 bne(miss);
1464 }
1465 }
1466
1467 bind(&done);
1468 // Check that the value is a field property.
1469 // t2: elements + (index * kPointerSize)
1470 const int kDetailsOffset =
1471 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1472 LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
1473 LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
1474 DCHECK_EQ(DATA, 0);
1475 and_(r0, t1, ip, SetRC);
1476 bne(miss, cr0);
1477
1478 // Get the value at the masked, scaled index and return.
1479 const int kValueOffset =
1480 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1481 LoadP(result, FieldMemOperand(t2, kValueOffset));
1482 }
1483
1484
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)1485 void MacroAssembler::Allocate(int object_size, Register result,
1486 Register scratch1, Register scratch2,
1487 Label* gc_required, AllocationFlags flags) {
1488 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1489 if (!FLAG_inline_new) {
1490 if (emit_debug_code()) {
1491 // Trash the registers to simulate an allocation failure.
1492 li(result, Operand(0x7091));
1493 li(scratch1, Operand(0x7191));
1494 li(scratch2, Operand(0x7291));
1495 }
1496 b(gc_required);
1497 return;
1498 }
1499
1500 DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1501
1502 // Make object size into bytes.
1503 if ((flags & SIZE_IN_WORDS) != 0) {
1504 object_size *= kPointerSize;
1505 }
1506 DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
1507
1508 // Check relative positions of allocation top and limit addresses.
1509 ExternalReference allocation_top =
1510 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1511 ExternalReference allocation_limit =
1512 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1513
1514 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1515 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1516 DCHECK((limit - top) == kPointerSize);
1517
1518 // Set up allocation top address register.
1519 Register top_address = scratch1;
1520 // This code stores a temporary value in ip. This is OK, as the code below
1521 // does not need ip for implicit literal generation.
1522 Register alloc_limit = ip;
1523 Register result_end = scratch2;
1524 mov(top_address, Operand(allocation_top));
1525
1526 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1527 // Load allocation top into result and allocation limit into ip.
1528 LoadP(result, MemOperand(top_address));
1529 LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
1530 } else {
1531 if (emit_debug_code()) {
1532 // Assert that result actually contains top on entry.
1533 LoadP(alloc_limit, MemOperand(top_address));
1534 cmp(result, alloc_limit);
1535 Check(eq, kUnexpectedAllocationTop);
1536 }
1537 // Load allocation limit. Result already contains allocation top.
1538 LoadP(alloc_limit, MemOperand(top_address, limit - top));
1539 }
1540
1541 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1542 // Align the next allocation. Storing the filler map without checking top is
1543 // safe in new-space because the limit of the heap is aligned there.
1544 #if V8_TARGET_ARCH_PPC64
1545 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1546 #else
1547 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1548 andi(result_end, result, Operand(kDoubleAlignmentMask));
1549 Label aligned;
1550 beq(&aligned, cr0);
1551 if ((flags & PRETENURE) != 0) {
1552 cmpl(result, alloc_limit);
1553 bge(gc_required);
1554 }
1555 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1556 stw(result_end, MemOperand(result));
1557 addi(result, result, Operand(kDoubleSize / 2));
1558 bind(&aligned);
1559 #endif
1560 }
1561
1562 // Calculate new top and bail out if new space is exhausted. Use result
1563 // to calculate the new top.
1564 sub(r0, alloc_limit, result);
1565 if (is_int16(object_size)) {
1566 cmpi(r0, Operand(object_size));
1567 blt(gc_required);
1568 addi(result_end, result, Operand(object_size));
1569 } else {
1570 Cmpi(r0, Operand(object_size), result_end);
1571 blt(gc_required);
1572 add(result_end, result, result_end);
1573 }
1574 StoreP(result_end, MemOperand(top_address));
1575
1576 // Tag object if requested.
1577 if ((flags & TAG_OBJECT) != 0) {
1578 addi(result, result, Operand(kHeapObjectTag));
1579 }
1580 }
1581
1582
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)1583 void MacroAssembler::Allocate(Register object_size, Register result,
1584 Register result_end, Register scratch,
1585 Label* gc_required, AllocationFlags flags) {
1586 if (!FLAG_inline_new) {
1587 if (emit_debug_code()) {
1588 // Trash the registers to simulate an allocation failure.
1589 li(result, Operand(0x7091));
1590 li(scratch, Operand(0x7191));
1591 li(result_end, Operand(0x7291));
1592 }
1593 b(gc_required);
1594 return;
1595 }
1596
1597 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
1598 // is not specified. Other registers must not overlap.
1599 DCHECK(!AreAliased(object_size, result, scratch, ip));
1600 DCHECK(!AreAliased(result_end, result, scratch, ip));
1601 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
1602
1603 // Check relative positions of allocation top and limit addresses.
1604 ExternalReference allocation_top =
1605 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1606 ExternalReference allocation_limit =
1607 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1608 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1609 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1610 DCHECK((limit - top) == kPointerSize);
1611
1612 // Set up allocation top address and allocation limit registers.
1613 Register top_address = scratch;
1614 // This code stores a temporary value in ip. This is OK, as the code below
1615 // does not need ip for implicit literal generation.
1616 Register alloc_limit = ip;
1617 mov(top_address, Operand(allocation_top));
1618
1619 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1620 // Load allocation top into result and allocation limit into alloc_limit..
1621 LoadP(result, MemOperand(top_address));
1622 LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
1623 } else {
1624 if (emit_debug_code()) {
1625 // Assert that result actually contains top on entry.
1626 LoadP(alloc_limit, MemOperand(top_address));
1627 cmp(result, alloc_limit);
1628 Check(eq, kUnexpectedAllocationTop);
1629 }
1630 // Load allocation limit. Result already contains allocation top.
1631 LoadP(alloc_limit, MemOperand(top_address, limit - top));
1632 }
1633
1634 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1635 // Align the next allocation. Storing the filler map without checking top is
1636 // safe in new-space because the limit of the heap is aligned there.
1637 #if V8_TARGET_ARCH_PPC64
1638 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1639 #else
1640 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1641 andi(result_end, result, Operand(kDoubleAlignmentMask));
1642 Label aligned;
1643 beq(&aligned, cr0);
1644 if ((flags & PRETENURE) != 0) {
1645 cmpl(result, alloc_limit);
1646 bge(gc_required);
1647 }
1648 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1649 stw(result_end, MemOperand(result));
1650 addi(result, result, Operand(kDoubleSize / 2));
1651 bind(&aligned);
1652 #endif
1653 }
1654
1655 // Calculate new top and bail out if new space is exhausted. Use result
1656 // to calculate the new top. Object size may be in words so a shift is
1657 // required to get the number of bytes.
1658 sub(r0, alloc_limit, result);
1659 if ((flags & SIZE_IN_WORDS) != 0) {
1660 ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
1661 cmp(r0, result_end);
1662 blt(gc_required);
1663 add(result_end, result, result_end);
1664 } else {
1665 cmp(r0, object_size);
1666 blt(gc_required);
1667 add(result_end, result, object_size);
1668 }
1669
1670 // Update allocation top. result temporarily holds the new top.
1671 if (emit_debug_code()) {
1672 andi(r0, result_end, Operand(kObjectAlignmentMask));
1673 Check(eq, kUnalignedAllocationInNewSpace, cr0);
1674 }
1675 StoreP(result_end, MemOperand(top_address));
1676
1677 // Tag object if requested.
1678 if ((flags & TAG_OBJECT) != 0) {
1679 addi(result, result, Operand(kHeapObjectTag));
1680 }
1681 }
1682
1683
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)1684 void MacroAssembler::AllocateTwoByteString(Register result, Register length,
1685 Register scratch1, Register scratch2,
1686 Register scratch3,
1687 Label* gc_required) {
1688 // Calculate the number of bytes needed for the characters in the string while
1689 // observing object alignment.
1690 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1691 slwi(scratch1, length, Operand(1)); // Length in bytes, not chars.
1692 addi(scratch1, scratch1,
1693 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1694 mov(r0, Operand(~kObjectAlignmentMask));
1695 and_(scratch1, scratch1, r0);
1696
1697 // Allocate two-byte string in new space.
1698 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
1699
1700 // Set the map, length and hash field.
1701 InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
1702 scratch2);
1703 }
1704
1705
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)1706 void MacroAssembler::AllocateOneByteString(Register result, Register length,
1707 Register scratch1, Register scratch2,
1708 Register scratch3,
1709 Label* gc_required) {
1710 // Calculate the number of bytes needed for the characters in the string while
1711 // observing object alignment.
1712 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1713 DCHECK(kCharSize == 1);
1714 addi(scratch1, length,
1715 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1716 li(r0, Operand(~kObjectAlignmentMask));
1717 and_(scratch1, scratch1, r0);
1718
1719 // Allocate one-byte string in new space.
1720 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
1721
1722 // Set the map, length and hash field.
1723 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
1724 scratch1, scratch2);
1725 }
1726
1727
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)1728 void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
1729 Register scratch1,
1730 Register scratch2,
1731 Label* gc_required) {
1732 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1733 TAG_OBJECT);
1734
1735 InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
1736 scratch2);
1737 }
1738
1739
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)1740 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
1741 Register scratch1,
1742 Register scratch2,
1743 Label* gc_required) {
1744 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1745 TAG_OBJECT);
1746
1747 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
1748 scratch1, scratch2);
1749 }
1750
1751
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)1752 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1753 Register length,
1754 Register scratch1,
1755 Register scratch2,
1756 Label* gc_required) {
1757 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1758 TAG_OBJECT);
1759
1760 InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
1761 scratch2);
1762 }
1763
1764
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)1765 void MacroAssembler::AllocateOneByteSlicedString(Register result,
1766 Register length,
1767 Register scratch1,
1768 Register scratch2,
1769 Label* gc_required) {
1770 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1771 TAG_OBJECT);
1772
1773 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
1774 scratch1, scratch2);
1775 }
1776
1777
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)1778 void MacroAssembler::CompareObjectType(Register object, Register map,
1779 Register type_reg, InstanceType type) {
1780 const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
1781
1782 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1783 CompareInstanceType(map, temp, type);
1784 }
1785
1786
CompareInstanceType(Register map,Register type_reg,InstanceType type)1787 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1788 InstanceType type) {
1789 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1790 STATIC_ASSERT(LAST_TYPE < 256);
1791 lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1792 cmpi(type_reg, Operand(type));
1793 }
1794
1795
CompareRoot(Register obj,Heap::RootListIndex index)1796 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
1797 DCHECK(!obj.is(r0));
1798 LoadRoot(r0, index);
1799 cmp(obj, r0);
1800 }
1801
1802
CheckFastElements(Register map,Register scratch,Label * fail)1803 void MacroAssembler::CheckFastElements(Register map, Register scratch,
1804 Label* fail) {
1805 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1806 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1807 STATIC_ASSERT(FAST_ELEMENTS == 2);
1808 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
1809 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1810 STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
1811 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1812 bgt(fail);
1813 }
1814
1815
CheckFastObjectElements(Register map,Register scratch,Label * fail)1816 void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
1817 Label* fail) {
1818 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1819 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1820 STATIC_ASSERT(FAST_ELEMENTS == 2);
1821 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
1822 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1823 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1824 ble(fail);
1825 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1826 bgt(fail);
1827 }
1828
1829
CheckFastSmiElements(Register map,Register scratch,Label * fail)1830 void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
1831 Label* fail) {
1832 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1833 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1834 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1835 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1836 bgt(fail);
1837 }
1838
1839
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,DoubleRegister double_scratch,Label * fail,int elements_offset)1840 void MacroAssembler::StoreNumberToDoubleElements(
1841 Register value_reg, Register key_reg, Register elements_reg,
1842 Register scratch1, DoubleRegister double_scratch, Label* fail,
1843 int elements_offset) {
1844 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
1845 Label smi_value, store;
1846
1847 // Handle smi values specially.
1848 JumpIfSmi(value_reg, &smi_value);
1849
1850 // Ensure that the object is a heap number
1851 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
1852 DONT_DO_SMI_CHECK);
1853
1854 lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
1855 // Double value, turn potential sNaN into qNaN.
1856 CanonicalizeNaN(double_scratch);
1857 b(&store);
1858
1859 bind(&smi_value);
1860 SmiToDouble(double_scratch, value_reg);
1861
1862 bind(&store);
1863 SmiToDoubleArrayOffset(scratch1, key_reg);
1864 add(scratch1, elements_reg, scratch1);
1865 stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize -
1866 elements_offset));
1867 }
1868
1869
AddAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)1870 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
1871 Register right,
1872 Register overflow_dst,
1873 Register scratch) {
1874 DCHECK(!dst.is(overflow_dst));
1875 DCHECK(!dst.is(scratch));
1876 DCHECK(!overflow_dst.is(scratch));
1877 DCHECK(!overflow_dst.is(left));
1878 DCHECK(!overflow_dst.is(right));
1879
1880 bool left_is_right = left.is(right);
1881 RCBit xorRC = left_is_right ? SetRC : LeaveRC;
1882
1883 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1884 if (dst.is(left)) {
1885 mr(scratch, left); // Preserve left.
1886 add(dst, left, right); // Left is overwritten.
1887 xor_(overflow_dst, dst, scratch, xorRC); // Original left.
1888 if (!left_is_right) xor_(scratch, dst, right);
1889 } else if (dst.is(right)) {
1890 mr(scratch, right); // Preserve right.
1891 add(dst, left, right); // Right is overwritten.
1892 xor_(overflow_dst, dst, left, xorRC);
1893 if (!left_is_right) xor_(scratch, dst, scratch); // Original right.
1894 } else {
1895 add(dst, left, right);
1896 xor_(overflow_dst, dst, left, xorRC);
1897 if (!left_is_right) xor_(scratch, dst, right);
1898 }
1899 if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
1900 }
1901
1902
AddAndCheckForOverflow(Register dst,Register left,intptr_t right,Register overflow_dst,Register scratch)1903 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
1904 intptr_t right,
1905 Register overflow_dst,
1906 Register scratch) {
1907 Register original_left = left;
1908 DCHECK(!dst.is(overflow_dst));
1909 DCHECK(!dst.is(scratch));
1910 DCHECK(!overflow_dst.is(scratch));
1911 DCHECK(!overflow_dst.is(left));
1912
1913 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1914 if (dst.is(left)) {
1915 // Preserve left.
1916 original_left = overflow_dst;
1917 mr(original_left, left);
1918 }
1919 Add(dst, left, right, scratch);
1920 xor_(overflow_dst, dst, original_left);
1921 if (right >= 0) {
1922 and_(overflow_dst, overflow_dst, dst, SetRC);
1923 } else {
1924 andc(overflow_dst, overflow_dst, dst, SetRC);
1925 }
1926 }
1927
1928
SubAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)1929 void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
1930 Register right,
1931 Register overflow_dst,
1932 Register scratch) {
1933 DCHECK(!dst.is(overflow_dst));
1934 DCHECK(!dst.is(scratch));
1935 DCHECK(!overflow_dst.is(scratch));
1936 DCHECK(!overflow_dst.is(left));
1937 DCHECK(!overflow_dst.is(right));
1938
1939 // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
1940 if (dst.is(left)) {
1941 mr(scratch, left); // Preserve left.
1942 sub(dst, left, right); // Left is overwritten.
1943 xor_(overflow_dst, dst, scratch);
1944 xor_(scratch, scratch, right);
1945 and_(overflow_dst, overflow_dst, scratch, SetRC);
1946 } else if (dst.is(right)) {
1947 mr(scratch, right); // Preserve right.
1948 sub(dst, left, right); // Right is overwritten.
1949 xor_(overflow_dst, dst, left);
1950 xor_(scratch, left, scratch);
1951 and_(overflow_dst, overflow_dst, scratch, SetRC);
1952 } else {
1953 sub(dst, left, right);
1954 xor_(overflow_dst, dst, left);
1955 xor_(scratch, left, right);
1956 and_(overflow_dst, scratch, overflow_dst, SetRC);
1957 }
1958 }
1959
1960
CompareMap(Register obj,Register scratch,Handle<Map> map,Label * early_success)1961 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
1962 Label* early_success) {
1963 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1964 CompareMap(scratch, map, early_success);
1965 }
1966
1967
CompareMap(Register obj_map,Handle<Map> map,Label * early_success)1968 void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
1969 Label* early_success) {
1970 mov(r0, Operand(map));
1971 cmp(obj_map, r0);
1972 }
1973
1974
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)1975 void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
1976 Label* fail, SmiCheckType smi_check_type) {
1977 if (smi_check_type == DO_SMI_CHECK) {
1978 JumpIfSmi(obj, fail);
1979 }
1980
1981 Label success;
1982 CompareMap(obj, scratch, map, &success);
1983 bne(fail);
1984 bind(&success);
1985 }
1986
1987
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)1988 void MacroAssembler::CheckMap(Register obj, Register scratch,
1989 Heap::RootListIndex index, Label* fail,
1990 SmiCheckType smi_check_type) {
1991 if (smi_check_type == DO_SMI_CHECK) {
1992 JumpIfSmi(obj, fail);
1993 }
1994 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1995 LoadRoot(r0, index);
1996 cmp(scratch, r0);
1997 bne(fail);
1998 }
1999
2000
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)2001 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
2002 Register scratch2, Handle<WeakCell> cell,
2003 Handle<Code> success,
2004 SmiCheckType smi_check_type) {
2005 Label fail;
2006 if (smi_check_type == DO_SMI_CHECK) {
2007 JumpIfSmi(obj, &fail);
2008 }
2009 LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
2010 CmpWeakValue(scratch1, cell, scratch2);
2011 Jump(success, RelocInfo::CODE_TARGET, eq);
2012 bind(&fail);
2013 }
2014
2015
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch,CRegister cr)2016 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2017 Register scratch, CRegister cr) {
2018 mov(scratch, Operand(cell));
2019 LoadP(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
2020 cmp(value, scratch, cr);
2021 }
2022
2023
GetWeakValue(Register value,Handle<WeakCell> cell)2024 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2025 mov(value, Operand(cell));
2026 LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
2027 }
2028
2029
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)2030 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2031 Label* miss) {
2032 GetWeakValue(value, cell);
2033 JumpIfSmi(value, miss);
2034 }
2035
2036
GetMapConstructor(Register result,Register map,Register temp,Register temp2)2037 void MacroAssembler::GetMapConstructor(Register result, Register map,
2038 Register temp, Register temp2) {
2039 Label done, loop;
2040 LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
2041 bind(&loop);
2042 JumpIfSmi(result, &done);
2043 CompareObjectType(result, temp, temp2, MAP_TYPE);
2044 bne(&done);
2045 LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
2046 b(&loop);
2047 bind(&done);
2048 }
2049
2050
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)2051 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2052 Register scratch, Label* miss) {
2053 // Get the prototype or initial map from the function.
2054 LoadP(result,
2055 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2056
2057 // If the prototype or initial map is the hole, don't return it and
2058 // simply miss the cache instead. This will allow us to allocate a
2059 // prototype object on-demand in the runtime system.
2060 LoadRoot(r0, Heap::kTheHoleValueRootIndex);
2061 cmp(result, r0);
2062 beq(miss);
2063
2064 // If the function does not have an initial map, we're done.
2065 Label done;
2066 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2067 bne(&done);
2068
2069 // Get the prototype from the initial map.
2070 LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2071
2072 // All done.
2073 bind(&done);
2074 }
2075
2076
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)2077 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
2078 Condition cond) {
2079 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2080 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2081 }
2082
2083
TailCallStub(CodeStub * stub,Condition cond)2084 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2085 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2086 }
2087
2088
AllowThisStubCall(CodeStub * stub)2089 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2090 return has_frame_ || !stub->SometimesSetsUpAFrame();
2091 }
2092
2093
IndexFromHash(Register hash,Register index)2094 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2095 // If the hash field contains an array index pick it out. The assert checks
2096 // that the constants for the maximum number of digits for an array index
2097 // cached in the hash field and the number of bits reserved for it does not
2098 // conflict.
2099 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2100 (1 << String::kArrayIndexValueBits));
2101 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
2102 }
2103
2104
SmiToDouble(DoubleRegister value,Register smi)2105 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
2106 SmiUntag(ip, smi);
2107 ConvertIntToDouble(ip, value);
2108 }
2109
2110
TestDoubleIsInt32(DoubleRegister double_input,Register scratch1,Register scratch2,DoubleRegister double_scratch)2111 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
2112 Register scratch1, Register scratch2,
2113 DoubleRegister double_scratch) {
2114 TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
2115 }
2116
2117
TryDoubleToInt32Exact(Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch)2118 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2119 DoubleRegister double_input,
2120 Register scratch,
2121 DoubleRegister double_scratch) {
2122 Label done;
2123 DCHECK(!double_input.is(double_scratch));
2124
2125 ConvertDoubleToInt64(double_input,
2126 #if !V8_TARGET_ARCH_PPC64
2127 scratch,
2128 #endif
2129 result, double_scratch);
2130
2131 #if V8_TARGET_ARCH_PPC64
2132 TestIfInt32(result, r0);
2133 #else
2134 TestIfInt32(scratch, result, r0);
2135 #endif
2136 bne(&done);
2137
2138 // convert back and compare
2139 fcfid(double_scratch, double_scratch);
2140 fcmpu(double_scratch, double_input);
2141 bind(&done);
2142 }
2143
2144
TryInt32Floor(Register result,DoubleRegister double_input,Register input_high,Register scratch,DoubleRegister double_scratch,Label * done,Label * exact)2145 void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
2146 Register input_high, Register scratch,
2147 DoubleRegister double_scratch, Label* done,
2148 Label* exact) {
2149 DCHECK(!result.is(input_high));
2150 DCHECK(!double_input.is(double_scratch));
2151 Label exception;
2152
2153 MovDoubleHighToInt(input_high, double_input);
2154
2155 // Test for NaN/Inf
2156 ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
2157 cmpli(result, Operand(0x7ff));
2158 beq(&exception);
2159
2160 // Convert (rounding to -Inf)
2161 ConvertDoubleToInt64(double_input,
2162 #if !V8_TARGET_ARCH_PPC64
2163 scratch,
2164 #endif
2165 result, double_scratch, kRoundToMinusInf);
2166
2167 // Test for overflow
2168 #if V8_TARGET_ARCH_PPC64
2169 TestIfInt32(result, r0);
2170 #else
2171 TestIfInt32(scratch, result, r0);
2172 #endif
2173 bne(&exception);
2174
2175 // Test for exactness
2176 fcfid(double_scratch, double_scratch);
2177 fcmpu(double_scratch, double_input);
2178 beq(exact);
2179 b(done);
2180
2181 bind(&exception);
2182 }
2183
2184
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)2185 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2186 DoubleRegister double_input,
2187 Label* done) {
2188 DoubleRegister double_scratch = kScratchDoubleReg;
2189 #if !V8_TARGET_ARCH_PPC64
2190 Register scratch = ip;
2191 #endif
2192
2193 ConvertDoubleToInt64(double_input,
2194 #if !V8_TARGET_ARCH_PPC64
2195 scratch,
2196 #endif
2197 result, double_scratch);
2198
2199 // Test for overflow
2200 #if V8_TARGET_ARCH_PPC64
2201 TestIfInt32(result, r0);
2202 #else
2203 TestIfInt32(scratch, result, r0);
2204 #endif
2205 beq(done);
2206 }
2207
2208
TruncateDoubleToI(Register result,DoubleRegister double_input)2209 void MacroAssembler::TruncateDoubleToI(Register result,
2210 DoubleRegister double_input) {
2211 Label done;
2212
2213 TryInlineTruncateDoubleToI(result, double_input, &done);
2214
2215 // If we fell through then inline version didn't succeed - call stub instead.
2216 mflr(r0);
2217 push(r0);
2218 // Put input on stack.
2219 stfdu(double_input, MemOperand(sp, -kDoubleSize));
2220
2221 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2222 CallStub(&stub);
2223
2224 addi(sp, sp, Operand(kDoubleSize));
2225 pop(r0);
2226 mtlr(r0);
2227
2228 bind(&done);
2229 }
2230
2231
TruncateHeapNumberToI(Register result,Register object)2232 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2233 Label done;
2234 DoubleRegister double_scratch = kScratchDoubleReg;
2235 DCHECK(!result.is(object));
2236
2237 lfd(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2238 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2239
2240 // If we fell through then inline version didn't succeed - call stub instead.
2241 mflr(r0);
2242 push(r0);
2243 DoubleToIStub stub(isolate(), object, result,
2244 HeapNumber::kValueOffset - kHeapObjectTag, true, true);
2245 CallStub(&stub);
2246 pop(r0);
2247 mtlr(r0);
2248
2249 bind(&done);
2250 }
2251
2252
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch1,Label * not_number)2253 void MacroAssembler::TruncateNumberToI(Register object, Register result,
2254 Register heap_number_map,
2255 Register scratch1, Label* not_number) {
2256 Label done;
2257 DCHECK(!result.is(object));
2258
2259 UntagAndJumpIfSmi(result, object, &done);
2260 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2261 TruncateHeapNumberToI(result, object);
2262
2263 bind(&done);
2264 }
2265
2266
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2267 void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
2268 int num_least_bits) {
2269 #if V8_TARGET_ARCH_PPC64
2270 rldicl(dst, src, kBitsPerPointer - kSmiShift,
2271 kBitsPerPointer - num_least_bits);
2272 #else
2273 rlwinm(dst, src, kBitsPerPointer - kSmiShift,
2274 kBitsPerPointer - num_least_bits, 31);
2275 #endif
2276 }
2277
2278
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2279 void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
2280 int num_least_bits) {
2281 rlwinm(dst, src, 0, 32 - num_least_bits, 31);
2282 }
2283
2284
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)2285 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
2286 SaveFPRegsMode save_doubles) {
2287 // All parameters are on the stack. r3 has the return value after call.
2288
2289 // If the expected number of arguments of the runtime function is
2290 // constant, we check that the actual number of arguments match the
2291 // expectation.
2292 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2293
2294 // TODO(1236192): Most runtime routines don't need the number of
2295 // arguments passed in because it is constant. At some point we
2296 // should remove this need and make the runtime routine entry code
2297 // smarter.
2298 mov(r3, Operand(num_arguments));
2299 mov(r4, Operand(ExternalReference(f, isolate())));
2300 CEntryStub stub(isolate(),
2301 #if V8_TARGET_ARCH_PPC64
2302 f->result_size,
2303 #else
2304 1,
2305 #endif
2306 save_doubles);
2307 CallStub(&stub);
2308 }
2309
2310
CallExternalReference(const ExternalReference & ext,int num_arguments)2311 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2312 int num_arguments) {
2313 mov(r3, Operand(num_arguments));
2314 mov(r4, Operand(ext));
2315
2316 CEntryStub stub(isolate(), 1);
2317 CallStub(&stub);
2318 }
2319
2320
TailCallRuntime(Runtime::FunctionId fid)2321 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
2322 const Runtime::Function* function = Runtime::FunctionForId(fid);
2323 DCHECK_EQ(1, function->result_size);
2324 if (function->nargs >= 0) {
2325 mov(r3, Operand(function->nargs));
2326 }
2327 JumpToExternalReference(ExternalReference(fid, isolate()));
2328 }
2329
2330
JumpToExternalReference(const ExternalReference & builtin)2331 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2332 mov(r4, Operand(builtin));
2333 CEntryStub stub(isolate(), 1);
2334 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2335 }
2336
2337
InvokeBuiltin(int native_context_index,InvokeFlag flag,const CallWrapper & call_wrapper)2338 void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
2339 const CallWrapper& call_wrapper) {
2340 // You can't call a builtin without a valid frame.
2341 DCHECK(flag == JUMP_FUNCTION || has_frame());
2342
2343 // Fake a parameter count to avoid emitting code to do the check.
2344 ParameterCount expected(0);
2345 LoadNativeContextSlot(native_context_index, r4);
2346 InvokeFunctionCode(r4, no_reg, expected, expected, flag, call_wrapper);
2347 }
2348
2349
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2350 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2351 Register scratch1, Register scratch2) {
2352 if (FLAG_native_code_counters && counter->Enabled()) {
2353 mov(scratch1, Operand(value));
2354 mov(scratch2, Operand(ExternalReference(counter)));
2355 stw(scratch1, MemOperand(scratch2));
2356 }
2357 }
2358
2359
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2360 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2361 Register scratch1, Register scratch2) {
2362 DCHECK(value > 0);
2363 if (FLAG_native_code_counters && counter->Enabled()) {
2364 mov(scratch2, Operand(ExternalReference(counter)));
2365 lwz(scratch1, MemOperand(scratch2));
2366 addi(scratch1, scratch1, Operand(value));
2367 stw(scratch1, MemOperand(scratch2));
2368 }
2369 }
2370
2371
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2372 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2373 Register scratch1, Register scratch2) {
2374 DCHECK(value > 0);
2375 if (FLAG_native_code_counters && counter->Enabled()) {
2376 mov(scratch2, Operand(ExternalReference(counter)));
2377 lwz(scratch1, MemOperand(scratch2));
2378 subi(scratch1, scratch1, Operand(value));
2379 stw(scratch1, MemOperand(scratch2));
2380 }
2381 }
2382
2383
Assert(Condition cond,BailoutReason reason,CRegister cr)2384 void MacroAssembler::Assert(Condition cond, BailoutReason reason,
2385 CRegister cr) {
2386 if (emit_debug_code()) Check(cond, reason, cr);
2387 }
2388
2389
AssertFastElements(Register elements)2390 void MacroAssembler::AssertFastElements(Register elements) {
2391 if (emit_debug_code()) {
2392 DCHECK(!elements.is(r0));
2393 Label ok;
2394 push(elements);
2395 LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2396 LoadRoot(r0, Heap::kFixedArrayMapRootIndex);
2397 cmp(elements, r0);
2398 beq(&ok);
2399 LoadRoot(r0, Heap::kFixedDoubleArrayMapRootIndex);
2400 cmp(elements, r0);
2401 beq(&ok);
2402 LoadRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
2403 cmp(elements, r0);
2404 beq(&ok);
2405 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2406 bind(&ok);
2407 pop(elements);
2408 }
2409 }
2410
2411
Check(Condition cond,BailoutReason reason,CRegister cr)2412 void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
2413 Label L;
2414 b(cond, &L, cr);
2415 Abort(reason);
2416 // will not return here
2417 bind(&L);
2418 }
2419
2420
Abort(BailoutReason reason)2421 void MacroAssembler::Abort(BailoutReason reason) {
2422 Label abort_start;
2423 bind(&abort_start);
2424 #ifdef DEBUG
2425 const char* msg = GetBailoutReason(reason);
2426 if (msg != NULL) {
2427 RecordComment("Abort message: ");
2428 RecordComment(msg);
2429 }
2430
2431 if (FLAG_trap_on_abort) {
2432 stop(msg);
2433 return;
2434 }
2435 #endif
2436
2437 LoadSmiLiteral(r0, Smi::FromInt(reason));
2438 push(r0);
2439 // Disable stub call restrictions to always allow calls to abort.
2440 if (!has_frame_) {
2441 // We don't actually want to generate a pile of code for this, so just
2442 // claim there is a stack frame, without generating one.
2443 FrameScope scope(this, StackFrame::NONE);
2444 CallRuntime(Runtime::kAbort, 1);
2445 } else {
2446 CallRuntime(Runtime::kAbort, 1);
2447 }
2448 // will not return here
2449 }
2450
2451
LoadContext(Register dst,int context_chain_length)2452 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2453 if (context_chain_length > 0) {
2454 // Move up the chain of contexts to the context containing the slot.
2455 LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2456 for (int i = 1; i < context_chain_length; i++) {
2457 LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2458 }
2459 } else {
2460 // Slot is in the current function context. Move it into the
2461 // destination register in case we store into it (the write barrier
2462 // cannot be allowed to destroy the context in esi).
2463 mr(dst, cp);
2464 }
2465 }
2466
2467
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)2468 void MacroAssembler::LoadTransitionedArrayMapConditional(
2469 ElementsKind expected_kind, ElementsKind transitioned_kind,
2470 Register map_in_out, Register scratch, Label* no_map_match) {
2471 DCHECK(IsFastElementsKind(expected_kind));
2472 DCHECK(IsFastElementsKind(transitioned_kind));
2473
2474 // Check that the function's map is the same as the expected cached map.
2475 LoadP(scratch, NativeContextMemOperand());
2476 LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
2477 cmp(map_in_out, ip);
2478 bne(no_map_match);
2479
2480 // Use the transitioned cached map.
2481 LoadP(map_in_out,
2482 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
2483 }
2484
2485
LoadNativeContextSlot(int index,Register dst)2486 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2487 LoadP(dst, NativeContextMemOperand());
2488 LoadP(dst, ContextMemOperand(dst, index));
2489 }
2490
2491
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)2492 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2493 Register map,
2494 Register scratch) {
2495 // Load the initial map. The global functions all have initial maps.
2496 LoadP(map,
2497 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2498 if (emit_debug_code()) {
2499 Label ok, fail;
2500 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2501 b(&ok);
2502 bind(&fail);
2503 Abort(kGlobalFunctionsMustHaveInitialMap);
2504 bind(&ok);
2505 }
2506 }
2507
2508
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)2509 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2510 Register reg, Register scratch, Label* not_power_of_two_or_zero) {
2511 subi(scratch, reg, Operand(1));
2512 cmpi(scratch, Operand::Zero());
2513 blt(not_power_of_two_or_zero);
2514 and_(r0, scratch, reg, SetRC);
2515 bne(not_power_of_two_or_zero, cr0);
2516 }
2517
2518
JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,Register scratch,Label * zero_and_neg,Label * not_power_of_two)2519 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
2520 Register scratch,
2521 Label* zero_and_neg,
2522 Label* not_power_of_two) {
2523 subi(scratch, reg, Operand(1));
2524 cmpi(scratch, Operand::Zero());
2525 blt(zero_and_neg);
2526 and_(r0, scratch, reg, SetRC);
2527 bne(not_power_of_two, cr0);
2528 }
2529
2530 #if !V8_TARGET_ARCH_PPC64
SmiTagCheckOverflow(Register reg,Register overflow)2531 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
2532 DCHECK(!reg.is(overflow));
2533 mr(overflow, reg); // Save original value.
2534 SmiTag(reg);
2535 xor_(overflow, overflow, reg, SetRC); // Overflow if (value ^ 2 * value) < 0.
2536 }
2537
2538
SmiTagCheckOverflow(Register dst,Register src,Register overflow)2539 void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
2540 Register overflow) {
2541 if (dst.is(src)) {
2542 // Fall back to slower case.
2543 SmiTagCheckOverflow(dst, overflow);
2544 } else {
2545 DCHECK(!dst.is(src));
2546 DCHECK(!dst.is(overflow));
2547 DCHECK(!src.is(overflow));
2548 SmiTag(dst, src);
2549 xor_(overflow, dst, src, SetRC); // Overflow if (value ^ 2 * value) < 0.
2550 }
2551 }
2552 #endif
2553
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)2554 void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
2555 Label* on_not_both_smi) {
2556 STATIC_ASSERT(kSmiTag == 0);
2557 orx(r0, reg1, reg2, LeaveRC);
2558 JumpIfNotSmi(r0, on_not_both_smi);
2559 }
2560
2561
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)2562 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
2563 Label* smi_case) {
2564 STATIC_ASSERT(kSmiTag == 0);
2565 TestBitRange(src, kSmiTagSize - 1, 0, r0);
2566 SmiUntag(dst, src);
2567 beq(smi_case, cr0);
2568 }
2569
2570
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)2571 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
2572 Label* non_smi_case) {
2573 STATIC_ASSERT(kSmiTag == 0);
2574 TestBitRange(src, kSmiTagSize - 1, 0, r0);
2575 SmiUntag(dst, src);
2576 bne(non_smi_case, cr0);
2577 }
2578
2579
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)2580 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
2581 Label* on_either_smi) {
2582 STATIC_ASSERT(kSmiTag == 0);
2583 JumpIfSmi(reg1, on_either_smi);
2584 JumpIfSmi(reg2, on_either_smi);
2585 }
2586
2587
AssertNotSmi(Register object)2588 void MacroAssembler::AssertNotSmi(Register object) {
2589 if (emit_debug_code()) {
2590 STATIC_ASSERT(kSmiTag == 0);
2591 TestIfSmi(object, r0);
2592 Check(ne, kOperandIsASmi, cr0);
2593 }
2594 }
2595
2596
AssertSmi(Register object)2597 void MacroAssembler::AssertSmi(Register object) {
2598 if (emit_debug_code()) {
2599 STATIC_ASSERT(kSmiTag == 0);
2600 TestIfSmi(object, r0);
2601 Check(eq, kOperandIsNotSmi, cr0);
2602 }
2603 }
2604
2605
AssertString(Register object)2606 void MacroAssembler::AssertString(Register object) {
2607 if (emit_debug_code()) {
2608 STATIC_ASSERT(kSmiTag == 0);
2609 TestIfSmi(object, r0);
2610 Check(ne, kOperandIsASmiAndNotAString, cr0);
2611 push(object);
2612 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2613 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2614 pop(object);
2615 Check(lt, kOperandIsNotAString);
2616 }
2617 }
2618
2619
AssertName(Register object)2620 void MacroAssembler::AssertName(Register object) {
2621 if (emit_debug_code()) {
2622 STATIC_ASSERT(kSmiTag == 0);
2623 TestIfSmi(object, r0);
2624 Check(ne, kOperandIsASmiAndNotAName, cr0);
2625 push(object);
2626 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2627 CompareInstanceType(object, object, LAST_NAME_TYPE);
2628 pop(object);
2629 Check(le, kOperandIsNotAName);
2630 }
2631 }
2632
2633
AssertFunction(Register object)2634 void MacroAssembler::AssertFunction(Register object) {
2635 if (emit_debug_code()) {
2636 STATIC_ASSERT(kSmiTag == 0);
2637 TestIfSmi(object, r0);
2638 Check(ne, kOperandIsASmiAndNotAFunction, cr0);
2639 push(object);
2640 CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
2641 pop(object);
2642 Check(eq, kOperandIsNotAFunction);
2643 }
2644 }
2645
2646
AssertBoundFunction(Register object)2647 void MacroAssembler::AssertBoundFunction(Register object) {
2648 if (emit_debug_code()) {
2649 STATIC_ASSERT(kSmiTag == 0);
2650 TestIfSmi(object, r0);
2651 Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
2652 push(object);
2653 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2654 pop(object);
2655 Check(eq, kOperandIsNotABoundFunction);
2656 }
2657 }
2658
2659
AssertUndefinedOrAllocationSite(Register object,Register scratch)2660 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2661 Register scratch) {
2662 if (emit_debug_code()) {
2663 Label done_checking;
2664 AssertNotSmi(object);
2665 CompareRoot(object, Heap::kUndefinedValueRootIndex);
2666 beq(&done_checking);
2667 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2668 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2669 Assert(eq, kExpectedUndefinedOrCell);
2670 bind(&done_checking);
2671 }
2672 }
2673
2674
AssertIsRoot(Register reg,Heap::RootListIndex index)2675 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2676 if (emit_debug_code()) {
2677 CompareRoot(reg, index);
2678 Check(eq, kHeapNumberMapRegisterClobbered);
2679 }
2680 }
2681
2682
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)2683 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2684 Register heap_number_map,
2685 Register scratch,
2686 Label* on_not_heap_number) {
2687 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2688 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2689 cmp(scratch, heap_number_map);
2690 bne(on_not_heap_number);
2691 }
2692
2693
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)2694 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
2695 Register first, Register second, Register scratch1, Register scratch2,
2696 Label* failure) {
2697 // Test that both first and second are sequential one-byte strings.
2698 // Assume that they are non-smis.
2699 LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2700 LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2701 lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2702 lbz(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2703
2704 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
2705 scratch2, failure);
2706 }
2707
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)2708 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
2709 Register second,
2710 Register scratch1,
2711 Register scratch2,
2712 Label* failure) {
2713 // Check that neither is a smi.
2714 and_(scratch1, first, second);
2715 JumpIfSmi(scratch1, failure);
2716 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
2717 scratch2, failure);
2718 }
2719
2720
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)2721 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2722 Label* not_unique_name) {
2723 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2724 Label succeed;
2725 andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2726 beq(&succeed, cr0);
2727 cmpi(reg, Operand(SYMBOL_TYPE));
2728 bne(not_unique_name);
2729
2730 bind(&succeed);
2731 }
2732
2733
2734 // Allocates a heap number or jumps to the need_gc label if the young space
2735 // is full and a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required,TaggingMode tagging_mode,MutableMode mode)2736 void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
2737 Register scratch2,
2738 Register heap_number_map,
2739 Label* gc_required,
2740 TaggingMode tagging_mode,
2741 MutableMode mode) {
2742 // Allocate an object in the heap for the heap number and tag it as a heap
2743 // object.
2744 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
2745 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
2746
2747 Heap::RootListIndex map_index = mode == MUTABLE
2748 ? Heap::kMutableHeapNumberMapRootIndex
2749 : Heap::kHeapNumberMapRootIndex;
2750 AssertIsRoot(heap_number_map, map_index);
2751
2752 // Store heap number map in the allocated object.
2753 if (tagging_mode == TAG_RESULT) {
2754 StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
2755 r0);
2756 } else {
2757 StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
2758 }
2759 }
2760
2761
AllocateHeapNumberWithValue(Register result,DoubleRegister value,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required)2762 void MacroAssembler::AllocateHeapNumberWithValue(
2763 Register result, DoubleRegister value, Register scratch1, Register scratch2,
2764 Register heap_number_map, Label* gc_required) {
2765 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
2766 stfd(value, FieldMemOperand(result, HeapNumber::kValueOffset));
2767 }
2768
2769
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)2770 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
2771 Register value, Register scratch1,
2772 Register scratch2, Label* gc_required) {
2773 DCHECK(!result.is(constructor));
2774 DCHECK(!result.is(scratch1));
2775 DCHECK(!result.is(scratch2));
2776 DCHECK(!result.is(value));
2777
2778 // Allocate JSValue in new space.
2779 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
2780
2781 // Initialize the JSValue.
2782 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
2783 StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
2784 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
2785 StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
2786 StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
2787 StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
2788 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
2789 }
2790
2791
CopyBytes(Register src,Register dst,Register length,Register scratch)2792 void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
2793 Register scratch) {
2794 Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done;
2795
2796 DCHECK(!scratch.is(r0));
2797
2798 cmpi(length, Operand::Zero());
2799 beq(&done);
2800
2801 // Check src alignment and length to see whether word_loop is possible
2802 andi(scratch, src, Operand(kPointerSize - 1));
2803 beq(&aligned, cr0);
2804 subfic(scratch, scratch, Operand(kPointerSize * 2));
2805 cmp(length, scratch);
2806 blt(&byte_loop);
2807
2808 // Align src before copying in word size chunks.
2809 subi(scratch, scratch, Operand(kPointerSize));
2810 mtctr(scratch);
2811 bind(&align_loop);
2812 lbz(scratch, MemOperand(src));
2813 addi(src, src, Operand(1));
2814 subi(length, length, Operand(1));
2815 stb(scratch, MemOperand(dst));
2816 addi(dst, dst, Operand(1));
2817 bdnz(&align_loop);
2818
2819 bind(&aligned);
2820
2821 // Copy bytes in word size chunks.
2822 if (emit_debug_code()) {
2823 andi(r0, src, Operand(kPointerSize - 1));
2824 Assert(eq, kExpectingAlignmentForCopyBytes, cr0);
2825 }
2826
2827 ShiftRightImm(scratch, length, Operand(kPointerSizeLog2));
2828 cmpi(scratch, Operand::Zero());
2829 beq(&byte_loop);
2830
2831 mtctr(scratch);
2832 bind(&word_loop);
2833 LoadP(scratch, MemOperand(src));
2834 addi(src, src, Operand(kPointerSize));
2835 subi(length, length, Operand(kPointerSize));
2836 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
2837 // currently false for PPC - but possible future opt
2838 StoreP(scratch, MemOperand(dst));
2839 addi(dst, dst, Operand(kPointerSize));
2840 } else {
2841 #if V8_TARGET_LITTLE_ENDIAN
2842 stb(scratch, MemOperand(dst, 0));
2843 ShiftRightImm(scratch, scratch, Operand(8));
2844 stb(scratch, MemOperand(dst, 1));
2845 ShiftRightImm(scratch, scratch, Operand(8));
2846 stb(scratch, MemOperand(dst, 2));
2847 ShiftRightImm(scratch, scratch, Operand(8));
2848 stb(scratch, MemOperand(dst, 3));
2849 #if V8_TARGET_ARCH_PPC64
2850 ShiftRightImm(scratch, scratch, Operand(8));
2851 stb(scratch, MemOperand(dst, 4));
2852 ShiftRightImm(scratch, scratch, Operand(8));
2853 stb(scratch, MemOperand(dst, 5));
2854 ShiftRightImm(scratch, scratch, Operand(8));
2855 stb(scratch, MemOperand(dst, 6));
2856 ShiftRightImm(scratch, scratch, Operand(8));
2857 stb(scratch, MemOperand(dst, 7));
2858 #endif
2859 #else
2860 #if V8_TARGET_ARCH_PPC64
2861 stb(scratch, MemOperand(dst, 7));
2862 ShiftRightImm(scratch, scratch, Operand(8));
2863 stb(scratch, MemOperand(dst, 6));
2864 ShiftRightImm(scratch, scratch, Operand(8));
2865 stb(scratch, MemOperand(dst, 5));
2866 ShiftRightImm(scratch, scratch, Operand(8));
2867 stb(scratch, MemOperand(dst, 4));
2868 ShiftRightImm(scratch, scratch, Operand(8));
2869 #endif
2870 stb(scratch, MemOperand(dst, 3));
2871 ShiftRightImm(scratch, scratch, Operand(8));
2872 stb(scratch, MemOperand(dst, 2));
2873 ShiftRightImm(scratch, scratch, Operand(8));
2874 stb(scratch, MemOperand(dst, 1));
2875 ShiftRightImm(scratch, scratch, Operand(8));
2876 stb(scratch, MemOperand(dst, 0));
2877 #endif
2878 addi(dst, dst, Operand(kPointerSize));
2879 }
2880 bdnz(&word_loop);
2881
2882 // Copy the last bytes if any left.
2883 cmpi(length, Operand::Zero());
2884 beq(&done);
2885
2886 bind(&byte_loop);
2887 mtctr(length);
2888 bind(&byte_loop_1);
2889 lbz(scratch, MemOperand(src));
2890 addi(src, src, Operand(1));
2891 stb(scratch, MemOperand(dst));
2892 addi(dst, dst, Operand(1));
2893 bdnz(&byte_loop_1);
2894
2895 bind(&done);
2896 }
2897
2898
InitializeNFieldsWithFiller(Register current_address,Register count,Register filler)2899 void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
2900 Register count,
2901 Register filler) {
2902 Label loop;
2903 mtctr(count);
2904 bind(&loop);
2905 StoreP(filler, MemOperand(current_address));
2906 addi(current_address, current_address, Operand(kPointerSize));
2907 bdnz(&loop);
2908 }
2909
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)2910 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
2911 Register end_address,
2912 Register filler) {
2913 Label done;
2914 sub(r0, end_address, current_address, LeaveOE, SetRC);
2915 beq(&done, cr0);
2916 ShiftRightImm(r0, r0, Operand(kPointerSizeLog2));
2917 InitializeNFieldsWithFiller(current_address, r0, filler);
2918 bind(&done);
2919 }
2920
2921
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)2922 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2923 Register first, Register second, Register scratch1, Register scratch2,
2924 Label* failure) {
2925 const int kFlatOneByteStringMask =
2926 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2927 const int kFlatOneByteStringTag =
2928 kStringTag | kOneByteStringTag | kSeqStringTag;
2929 andi(scratch1, first, Operand(kFlatOneByteStringMask));
2930 andi(scratch2, second, Operand(kFlatOneByteStringMask));
2931 cmpi(scratch1, Operand(kFlatOneByteStringTag));
2932 bne(failure);
2933 cmpi(scratch2, Operand(kFlatOneByteStringTag));
2934 bne(failure);
2935 }
2936
2937
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)2938 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
2939 Register scratch,
2940 Label* failure) {
2941 const int kFlatOneByteStringMask =
2942 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2943 const int kFlatOneByteStringTag =
2944 kStringTag | kOneByteStringTag | kSeqStringTag;
2945 andi(scratch, type, Operand(kFlatOneByteStringMask));
2946 cmpi(scratch, Operand(kFlatOneByteStringTag));
2947 bne(failure);
2948 }
2949
2950 static const int kRegisterPassedArguments = 8;
2951
2952
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)2953 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
2954 int num_double_arguments) {
2955 int stack_passed_words = 0;
2956 if (num_double_arguments > DoubleRegister::kNumRegisters) {
2957 stack_passed_words +=
2958 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
2959 }
2960 // Up to 8 simple arguments are passed in registers r3..r10.
2961 if (num_reg_arguments > kRegisterPassedArguments) {
2962 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
2963 }
2964 return stack_passed_words;
2965 }
2966
2967
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)2968 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
2969 Register value,
2970 uint32_t encoding_mask) {
2971 Label is_object;
2972 TestIfSmi(string, r0);
2973 Check(ne, kNonObject, cr0);
2974
2975 LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
2976 lbz(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
2977
2978 andi(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
2979 cmpi(ip, Operand(encoding_mask));
2980 Check(eq, kUnexpectedStringType);
2981
2982 // The index is assumed to be untagged coming in, tag it to compare with the
2983 // string length without using a temp register, it is restored at the end of
2984 // this function.
2985 #if !V8_TARGET_ARCH_PPC64
2986 Label index_tag_ok, index_tag_bad;
2987 JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
2988 #endif
2989 SmiTag(index, index);
2990 #if !V8_TARGET_ARCH_PPC64
2991 b(&index_tag_ok);
2992 bind(&index_tag_bad);
2993 Abort(kIndexIsTooLarge);
2994 bind(&index_tag_ok);
2995 #endif
2996
2997 LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
2998 cmp(index, ip);
2999 Check(lt, kIndexIsTooLarge);
3000
3001 DCHECK(Smi::FromInt(0) == 0);
3002 cmpi(index, Operand::Zero());
3003 Check(ge, kIndexIsNegative);
3004
3005 SmiUntag(index, index);
3006 }
3007
3008
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)3009 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3010 int num_double_arguments,
3011 Register scratch) {
3012 int frame_alignment = ActivationFrameAlignment();
3013 int stack_passed_arguments =
3014 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3015 int stack_space = kNumRequiredStackFrameSlots;
3016
3017 if (frame_alignment > kPointerSize) {
3018 // Make stack end at alignment and make room for stack arguments
3019 // -- preserving original value of sp.
3020 mr(scratch, sp);
3021 addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
3022 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3023 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
3024 StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3025 } else {
3026 // Make room for stack arguments
3027 stack_space += stack_passed_arguments;
3028 }
3029
3030 // Allocate frame with required slots to make ABI work.
3031 li(r0, Operand::Zero());
3032 StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
3033 }
3034
3035
PrepareCallCFunction(int num_reg_arguments,Register scratch)3036 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3037 Register scratch) {
3038 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3039 }
3040
3041
MovToFloatParameter(DoubleRegister src)3042 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
3043
3044
MovToFloatResult(DoubleRegister src)3045 void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
3046
3047
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)3048 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3049 DoubleRegister src2) {
3050 if (src2.is(d1)) {
3051 DCHECK(!src1.is(d2));
3052 Move(d2, src2);
3053 Move(d1, src1);
3054 } else {
3055 Move(d1, src1);
3056 Move(d2, src2);
3057 }
3058 }
3059
3060
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)3061 void MacroAssembler::CallCFunction(ExternalReference function,
3062 int num_reg_arguments,
3063 int num_double_arguments) {
3064 mov(ip, Operand(function));
3065 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3066 }
3067
3068
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)3069 void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
3070 int num_double_arguments) {
3071 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3072 }
3073
3074
CallCFunction(ExternalReference function,int num_arguments)3075 void MacroAssembler::CallCFunction(ExternalReference function,
3076 int num_arguments) {
3077 CallCFunction(function, num_arguments, 0);
3078 }
3079
3080
CallCFunction(Register function,int num_arguments)3081 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
3082 CallCFunction(function, num_arguments, 0);
3083 }
3084
3085
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)3086 void MacroAssembler::CallCFunctionHelper(Register function,
3087 int num_reg_arguments,
3088 int num_double_arguments) {
3089 DCHECK(has_frame());
3090 // Just call directly. The function called cannot cause a GC, or
3091 // allow preemption, so the return address in the link register
3092 // stays correct.
3093 Register dest = function;
3094 #if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
3095 // AIX uses a function descriptor. When calling C code be aware
3096 // of this descriptor and pick up values from it
3097 LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
3098 LoadP(ip, MemOperand(function, 0));
3099 dest = ip;
3100 #elif ABI_CALL_VIA_IP
3101 Move(ip, function);
3102 dest = ip;
3103 #endif
3104
3105 Call(dest);
3106
3107 // Remove frame bought in PrepareCallCFunction
3108 int stack_passed_arguments =
3109 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3110 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
3111 if (ActivationFrameAlignment() > kPointerSize) {
3112 LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
3113 } else {
3114 addi(sp, sp, Operand(stack_space * kPointerSize));
3115 }
3116 }
3117
3118
FlushICache(Register address,size_t size,Register scratch)3119 void MacroAssembler::FlushICache(Register address, size_t size,
3120 Register scratch) {
3121 if (CpuFeatures::IsSupported(INSTR_AND_DATA_CACHE_COHERENCY)) {
3122 sync();
3123 icbi(r0, address);
3124 isync();
3125 return;
3126 }
3127
3128 Label done;
3129
3130 dcbf(r0, address);
3131 sync();
3132 icbi(r0, address);
3133 isync();
3134
3135 // This code handles ranges which cross a single cacheline boundary.
3136 // scratch is last cacheline which intersects range.
3137 const int kCacheLineSizeLog2 = WhichPowerOf2(CpuFeatures::cache_line_size());
3138
3139 DCHECK(size > 0 && size <= (size_t)(1 << kCacheLineSizeLog2));
3140 addi(scratch, address, Operand(size - 1));
3141 ClearRightImm(scratch, scratch, Operand(kCacheLineSizeLog2));
3142 cmpl(scratch, address);
3143 ble(&done);
3144
3145 dcbf(r0, scratch);
3146 sync();
3147 icbi(r0, scratch);
3148 isync();
3149
3150 bind(&done);
3151 }
3152
3153
DecodeConstantPoolOffset(Register result,Register location)3154 void MacroAssembler::DecodeConstantPoolOffset(Register result,
3155 Register location) {
3156 Label overflow_access, done;
3157 DCHECK(!AreAliased(result, location, r0));
3158
3159 // Determine constant pool access type
3160 // Caller has already placed the instruction word at location in result.
3161 ExtractBitRange(r0, result, 31, 26);
3162 cmpi(r0, Operand(ADDIS >> 26));
3163 beq(&overflow_access);
3164
3165 // Regular constant pool access
3166 // extract the load offset
3167 andi(result, result, Operand(kImm16Mask));
3168 b(&done);
3169
3170 bind(&overflow_access);
3171 // Overflow constant pool access
3172 // shift addis immediate
3173 slwi(r0, result, Operand(16));
3174 // sign-extend and add the load offset
3175 lwz(result, MemOperand(location, kInstrSize));
3176 extsh(result, result);
3177 add(result, r0, result);
3178
3179 bind(&done);
3180 }
3181
3182
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)3183 void MacroAssembler::CheckPageFlag(
3184 Register object,
3185 Register scratch, // scratch may be same register as object
3186 int mask, Condition cc, Label* condition_met) {
3187 DCHECK(cc == ne || cc == eq);
3188 ClearRightImm(scratch, object, Operand(kPageSizeBits));
3189 LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3190
3191 And(r0, scratch, Operand(mask), SetRC);
3192
3193 if (cc == ne) {
3194 bne(condition_met, cr0);
3195 }
3196 if (cc == eq) {
3197 beq(condition_met, cr0);
3198 }
3199 }
3200
3201
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)3202 void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
3203 Register scratch1, Label* on_black) {
3204 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
3205 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3206 }
3207
3208
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)3209 void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
3210 Register mask_scratch, Label* has_color,
3211 int first_bit, int second_bit) {
3212 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3213
3214 GetMarkBits(object, bitmap_scratch, mask_scratch);
3215
3216 Label other_color, word_boundary;
3217 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3218 // Test the first bit
3219 and_(r0, ip, mask_scratch, SetRC);
3220 b(first_bit == 1 ? eq : ne, &other_color, cr0);
3221 // Shift left 1
3222 // May need to load the next cell
3223 slwi(mask_scratch, mask_scratch, Operand(1), SetRC);
3224 beq(&word_boundary, cr0);
3225 // Test the second bit
3226 and_(r0, ip, mask_scratch, SetRC);
3227 b(second_bit == 1 ? ne : eq, has_color, cr0);
3228 b(&other_color);
3229
3230 bind(&word_boundary);
3231 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
3232 andi(r0, ip, Operand(1));
3233 b(second_bit == 1 ? ne : eq, has_color, cr0);
3234 bind(&other_color);
3235 }
3236
3237
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)3238 void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
3239 Register mask_reg) {
3240 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3241 DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
3242 lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
3243 and_(bitmap_reg, addr_reg, r0);
3244 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3245 ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
3246 ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
3247 ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
3248 add(bitmap_reg, bitmap_reg, ip);
3249 li(ip, Operand(1));
3250 slw(mask_reg, ip, mask_reg);
3251 }
3252
3253
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)3254 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
3255 Register mask_scratch, Register load_scratch,
3256 Label* value_is_white) {
3257 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3258 GetMarkBits(value, bitmap_scratch, mask_scratch);
3259
3260 // If the value is black or grey we don't need to do anything.
3261 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3262 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3263 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
3264 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3265
3266 // Since both black and grey have a 1 in the first position and white does
3267 // not have a 1 there we only need to check one bit.
3268 lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3269 and_(r0, mask_scratch, load_scratch, SetRC);
3270 beq(value_is_white, cr0);
3271 }
3272
3273
3274 // Saturate a value into 8-bit unsigned integer
3275 // if input_value < 0, output_value is 0
3276 // if input_value > 255, output_value is 255
3277 // otherwise output_value is the input_value
ClampUint8(Register output_reg,Register input_reg)3278 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3279 int satval = (1 << 8) - 1;
3280
3281 if (CpuFeatures::IsSupported(ISELECT)) {
3282 // set to 0 if negative
3283 cmpi(input_reg, Operand::Zero());
3284 isel(lt, output_reg, r0, input_reg);
3285
3286 // set to satval if > satval
3287 li(r0, Operand(satval));
3288 cmpi(output_reg, Operand(satval));
3289 isel(lt, output_reg, output_reg, r0);
3290 } else {
3291 Label done, negative_label, overflow_label;
3292 cmpi(input_reg, Operand::Zero());
3293 blt(&negative_label);
3294
3295 cmpi(input_reg, Operand(satval));
3296 bgt(&overflow_label);
3297 if (!output_reg.is(input_reg)) {
3298 mr(output_reg, input_reg);
3299 }
3300 b(&done);
3301
3302 bind(&negative_label);
3303 li(output_reg, Operand::Zero()); // set to 0 if negative
3304 b(&done);
3305
3306 bind(&overflow_label); // set to satval if > satval
3307 li(output_reg, Operand(satval));
3308
3309 bind(&done);
3310 }
3311 }
3312
3313
SetRoundingMode(FPRoundingMode RN)3314 void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
3315
3316
ResetRoundingMode()3317 void MacroAssembler::ResetRoundingMode() {
3318 mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
3319 }
3320
3321
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister double_scratch)3322 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3323 DoubleRegister input_reg,
3324 DoubleRegister double_scratch) {
3325 Label above_zero;
3326 Label done;
3327 Label in_bounds;
3328
3329 LoadDoubleLiteral(double_scratch, 0.0, result_reg);
3330 fcmpu(input_reg, double_scratch);
3331 bgt(&above_zero);
3332
3333 // Double value is less than zero, NaN or Inf, return 0.
3334 LoadIntLiteral(result_reg, 0);
3335 b(&done);
3336
3337 // Double value is >= 255, return 255.
3338 bind(&above_zero);
3339 LoadDoubleLiteral(double_scratch, 255.0, result_reg);
3340 fcmpu(input_reg, double_scratch);
3341 ble(&in_bounds);
3342 LoadIntLiteral(result_reg, 255);
3343 b(&done);
3344
3345 // In 0-255 range, round and truncate.
3346 bind(&in_bounds);
3347
3348 // round to nearest (default rounding mode)
3349 fctiw(double_scratch, input_reg);
3350 MovDoubleLowToInt(result_reg, double_scratch);
3351 bind(&done);
3352 }
3353
3354
LoadInstanceDescriptors(Register map,Register descriptors)3355 void MacroAssembler::LoadInstanceDescriptors(Register map,
3356 Register descriptors) {
3357 LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3358 }
3359
3360
NumberOfOwnDescriptors(Register dst,Register map)3361 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3362 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3363 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3364 }
3365
3366
EnumLength(Register dst,Register map)3367 void MacroAssembler::EnumLength(Register dst, Register map) {
3368 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3369 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3370 ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask);
3371 SmiTag(dst);
3372 }
3373
3374
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)3375 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3376 int accessor_index,
3377 AccessorComponent accessor) {
3378 LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3379 LoadInstanceDescriptors(dst, dst);
3380 LoadP(dst,
3381 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3382 const int getterOffset = AccessorPair::kGetterOffset;
3383 const int setterOffset = AccessorPair::kSetterOffset;
3384 int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
3385 LoadP(dst, FieldMemOperand(dst, offset));
3386 }
3387
3388
CheckEnumCache(Register null_value,Label * call_runtime)3389 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3390 Register empty_fixed_array_value = r9;
3391 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3392 Label next, start;
3393 mr(r5, r3);
3394
3395 // Check if the enum length field is properly initialized, indicating that
3396 // there is an enum cache.
3397 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
3398
3399 EnumLength(r6, r4);
3400 CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
3401 beq(call_runtime);
3402
3403 b(&start);
3404
3405 bind(&next);
3406 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
3407
3408 // For all objects but the receiver, check that the cache is empty.
3409 EnumLength(r6, r4);
3410 CmpSmiLiteral(r6, Smi::FromInt(0), r0);
3411 bne(call_runtime);
3412
3413 bind(&start);
3414
3415 // Check that there are no elements. Register r5 contains the current JS
3416 // object we've reached through the prototype chain.
3417 Label no_elements;
3418 LoadP(r5, FieldMemOperand(r5, JSObject::kElementsOffset));
3419 cmp(r5, empty_fixed_array_value);
3420 beq(&no_elements);
3421
3422 // Second chance, the object may be using the empty slow element dictionary.
3423 CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
3424 bne(call_runtime);
3425
3426 bind(&no_elements);
3427 LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
3428 cmp(r5, null_value);
3429 bne(&next);
3430 }
3431
3432
3433 ////////////////////////////////////////////////////////////////////////////////
3434 //
3435 // New MacroAssembler Interfaces added for PPC
3436 //
3437 ////////////////////////////////////////////////////////////////////////////////
LoadIntLiteral(Register dst,int value)3438 void MacroAssembler::LoadIntLiteral(Register dst, int value) {
3439 mov(dst, Operand(value));
3440 }
3441
3442
LoadSmiLiteral(Register dst,Smi * smi)3443 void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
3444 mov(dst, Operand(smi));
3445 }
3446
3447
LoadDoubleLiteral(DoubleRegister result,double value,Register scratch)3448 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
3449 Register scratch) {
3450 if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
3451 !(scratch.is(r0) && ConstantPoolAccessIsInOverflow())) {
3452 ConstantPoolEntry::Access access = ConstantPoolAddEntry(value);
3453 if (access == ConstantPoolEntry::OVERFLOWED) {
3454 addis(scratch, kConstantPoolRegister, Operand::Zero());
3455 lfd(result, MemOperand(scratch, 0));
3456 } else {
3457 lfd(result, MemOperand(kConstantPoolRegister, 0));
3458 }
3459 return;
3460 }
3461
3462 // avoid gcc strict aliasing error using union cast
3463 union {
3464 double dval;
3465 #if V8_TARGET_ARCH_PPC64
3466 intptr_t ival;
3467 #else
3468 intptr_t ival[2];
3469 #endif
3470 } litVal;
3471
3472 litVal.dval = value;
3473
3474 #if V8_TARGET_ARCH_PPC64
3475 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3476 mov(scratch, Operand(litVal.ival));
3477 mtfprd(result, scratch);
3478 return;
3479 }
3480 #endif
3481
3482 addi(sp, sp, Operand(-kDoubleSize));
3483 #if V8_TARGET_ARCH_PPC64
3484 mov(scratch, Operand(litVal.ival));
3485 std(scratch, MemOperand(sp));
3486 #else
3487 LoadIntLiteral(scratch, litVal.ival[0]);
3488 stw(scratch, MemOperand(sp, 0));
3489 LoadIntLiteral(scratch, litVal.ival[1]);
3490 stw(scratch, MemOperand(sp, 4));
3491 #endif
3492 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3493 lfd(result, MemOperand(sp, 0));
3494 addi(sp, sp, Operand(kDoubleSize));
3495 }
3496
3497
MovIntToDouble(DoubleRegister dst,Register src,Register scratch)3498 void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
3499 Register scratch) {
3500 // sign-extend src to 64-bit
3501 #if V8_TARGET_ARCH_PPC64
3502 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3503 mtfprwa(dst, src);
3504 return;
3505 }
3506 #endif
3507
3508 DCHECK(!src.is(scratch));
3509 subi(sp, sp, Operand(kDoubleSize));
3510 #if V8_TARGET_ARCH_PPC64
3511 extsw(scratch, src);
3512 std(scratch, MemOperand(sp, 0));
3513 #else
3514 srawi(scratch, src, 31);
3515 stw(scratch, MemOperand(sp, Register::kExponentOffset));
3516 stw(src, MemOperand(sp, Register::kMantissaOffset));
3517 #endif
3518 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3519 lfd(dst, MemOperand(sp, 0));
3520 addi(sp, sp, Operand(kDoubleSize));
3521 }
3522
3523
MovUnsignedIntToDouble(DoubleRegister dst,Register src,Register scratch)3524 void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
3525 Register scratch) {
3526 // zero-extend src to 64-bit
3527 #if V8_TARGET_ARCH_PPC64
3528 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3529 mtfprwz(dst, src);
3530 return;
3531 }
3532 #endif
3533
3534 DCHECK(!src.is(scratch));
3535 subi(sp, sp, Operand(kDoubleSize));
3536 #if V8_TARGET_ARCH_PPC64
3537 clrldi(scratch, src, Operand(32));
3538 std(scratch, MemOperand(sp, 0));
3539 #else
3540 li(scratch, Operand::Zero());
3541 stw(scratch, MemOperand(sp, Register::kExponentOffset));
3542 stw(src, MemOperand(sp, Register::kMantissaOffset));
3543 #endif
3544 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3545 lfd(dst, MemOperand(sp, 0));
3546 addi(sp, sp, Operand(kDoubleSize));
3547 }
3548
3549
MovInt64ToDouble(DoubleRegister dst,Register src_hi,Register src)3550 void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
3551 #if !V8_TARGET_ARCH_PPC64
3552 Register src_hi,
3553 #endif
3554 Register src) {
3555 #if V8_TARGET_ARCH_PPC64
3556 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3557 mtfprd(dst, src);
3558 return;
3559 }
3560 #endif
3561
3562 subi(sp, sp, Operand(kDoubleSize));
3563 #if V8_TARGET_ARCH_PPC64
3564 std(src, MemOperand(sp, 0));
3565 #else
3566 stw(src_hi, MemOperand(sp, Register::kExponentOffset));
3567 stw(src, MemOperand(sp, Register::kMantissaOffset));
3568 #endif
3569 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3570 lfd(dst, MemOperand(sp, 0));
3571 addi(sp, sp, Operand(kDoubleSize));
3572 }
3573
3574
3575 #if V8_TARGET_ARCH_PPC64
MovInt64ComponentsToDouble(DoubleRegister dst,Register src_hi,Register src_lo,Register scratch)3576 void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
3577 Register src_hi,
3578 Register src_lo,
3579 Register scratch) {
3580 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3581 sldi(scratch, src_hi, Operand(32));
3582 rldimi(scratch, src_lo, 0, 32);
3583 mtfprd(dst, scratch);
3584 return;
3585 }
3586
3587 subi(sp, sp, Operand(kDoubleSize));
3588 stw(src_hi, MemOperand(sp, Register::kExponentOffset));
3589 stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
3590 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3591 lfd(dst, MemOperand(sp));
3592 addi(sp, sp, Operand(kDoubleSize));
3593 }
3594 #endif
3595
3596
InsertDoubleLow(DoubleRegister dst,Register src,Register scratch)3597 void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
3598 Register scratch) {
3599 #if V8_TARGET_ARCH_PPC64
3600 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3601 mffprd(scratch, dst);
3602 rldimi(scratch, src, 0, 32);
3603 mtfprd(dst, scratch);
3604 return;
3605 }
3606 #endif
3607
3608 subi(sp, sp, Operand(kDoubleSize));
3609 stfd(dst, MemOperand(sp));
3610 stw(src, MemOperand(sp, Register::kMantissaOffset));
3611 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3612 lfd(dst, MemOperand(sp));
3613 addi(sp, sp, Operand(kDoubleSize));
3614 }
3615
3616
InsertDoubleHigh(DoubleRegister dst,Register src,Register scratch)3617 void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
3618 Register scratch) {
3619 #if V8_TARGET_ARCH_PPC64
3620 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3621 mffprd(scratch, dst);
3622 rldimi(scratch, src, 32, 0);
3623 mtfprd(dst, scratch);
3624 return;
3625 }
3626 #endif
3627
3628 subi(sp, sp, Operand(kDoubleSize));
3629 stfd(dst, MemOperand(sp));
3630 stw(src, MemOperand(sp, Register::kExponentOffset));
3631 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3632 lfd(dst, MemOperand(sp));
3633 addi(sp, sp, Operand(kDoubleSize));
3634 }
3635
3636
MovDoubleLowToInt(Register dst,DoubleRegister src)3637 void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
3638 #if V8_TARGET_ARCH_PPC64
3639 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3640 mffprwz(dst, src);
3641 return;
3642 }
3643 #endif
3644
3645 subi(sp, sp, Operand(kDoubleSize));
3646 stfd(src, MemOperand(sp));
3647 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3648 lwz(dst, MemOperand(sp, Register::kMantissaOffset));
3649 addi(sp, sp, Operand(kDoubleSize));
3650 }
3651
3652
MovDoubleHighToInt(Register dst,DoubleRegister src)3653 void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
3654 #if V8_TARGET_ARCH_PPC64
3655 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3656 mffprd(dst, src);
3657 srdi(dst, dst, Operand(32));
3658 return;
3659 }
3660 #endif
3661
3662 subi(sp, sp, Operand(kDoubleSize));
3663 stfd(src, MemOperand(sp));
3664 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3665 lwz(dst, MemOperand(sp, Register::kExponentOffset));
3666 addi(sp, sp, Operand(kDoubleSize));
3667 }
3668
3669
MovDoubleToInt64(Register dst_hi,Register dst,DoubleRegister src)3670 void MacroAssembler::MovDoubleToInt64(
3671 #if !V8_TARGET_ARCH_PPC64
3672 Register dst_hi,
3673 #endif
3674 Register dst, DoubleRegister src) {
3675 #if V8_TARGET_ARCH_PPC64
3676 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3677 mffprd(dst, src);
3678 return;
3679 }
3680 #endif
3681
3682 subi(sp, sp, Operand(kDoubleSize));
3683 stfd(src, MemOperand(sp));
3684 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3685 #if V8_TARGET_ARCH_PPC64
3686 ld(dst, MemOperand(sp, 0));
3687 #else
3688 lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
3689 lwz(dst, MemOperand(sp, Register::kMantissaOffset));
3690 #endif
3691 addi(sp, sp, Operand(kDoubleSize));
3692 }
3693
3694
MovIntToFloat(DoubleRegister dst,Register src)3695 void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
3696 subi(sp, sp, Operand(kFloatSize));
3697 stw(src, MemOperand(sp, 0));
3698 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3699 lfs(dst, MemOperand(sp, 0));
3700 addi(sp, sp, Operand(kFloatSize));
3701 }
3702
3703
MovFloatToInt(Register dst,DoubleRegister src)3704 void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
3705 subi(sp, sp, Operand(kFloatSize));
3706 frsp(src, src);
3707 stfs(src, MemOperand(sp, 0));
3708 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3709 lwz(dst, MemOperand(sp, 0));
3710 addi(sp, sp, Operand(kFloatSize));
3711 }
3712
3713
Add(Register dst,Register src,intptr_t value,Register scratch)3714 void MacroAssembler::Add(Register dst, Register src, intptr_t value,
3715 Register scratch) {
3716 if (is_int16(value)) {
3717 addi(dst, src, Operand(value));
3718 } else {
3719 mov(scratch, Operand(value));
3720 add(dst, src, scratch);
3721 }
3722 }
3723
3724
Cmpi(Register src1,const Operand & src2,Register scratch,CRegister cr)3725 void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
3726 CRegister cr) {
3727 intptr_t value = src2.immediate();
3728 if (is_int16(value)) {
3729 cmpi(src1, src2, cr);
3730 } else {
3731 mov(scratch, src2);
3732 cmp(src1, scratch, cr);
3733 }
3734 }
3735
3736
Cmpli(Register src1,const Operand & src2,Register scratch,CRegister cr)3737 void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
3738 CRegister cr) {
3739 intptr_t value = src2.immediate();
3740 if (is_uint16(value)) {
3741 cmpli(src1, src2, cr);
3742 } else {
3743 mov(scratch, src2);
3744 cmpl(src1, scratch, cr);
3745 }
3746 }
3747
3748
Cmpwi(Register src1,const Operand & src2,Register scratch,CRegister cr)3749 void MacroAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
3750 CRegister cr) {
3751 intptr_t value = src2.immediate();
3752 if (is_int16(value)) {
3753 cmpwi(src1, src2, cr);
3754 } else {
3755 mov(scratch, src2);
3756 cmpw(src1, scratch, cr);
3757 }
3758 }
3759
3760
Cmplwi(Register src1,const Operand & src2,Register scratch,CRegister cr)3761 void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
3762 Register scratch, CRegister cr) {
3763 intptr_t value = src2.immediate();
3764 if (is_uint16(value)) {
3765 cmplwi(src1, src2, cr);
3766 } else {
3767 mov(scratch, src2);
3768 cmplw(src1, scratch, cr);
3769 }
3770 }
3771
3772
And(Register ra,Register rs,const Operand & rb,RCBit rc)3773 void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
3774 RCBit rc) {
3775 if (rb.is_reg()) {
3776 and_(ra, rs, rb.rm(), rc);
3777 } else {
3778 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) {
3779 andi(ra, rs, rb);
3780 } else {
3781 // mov handles the relocation.
3782 DCHECK(!rs.is(r0));
3783 mov(r0, rb);
3784 and_(ra, rs, r0, rc);
3785 }
3786 }
3787 }
3788
3789
Or(Register ra,Register rs,const Operand & rb,RCBit rc)3790 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
3791 if (rb.is_reg()) {
3792 orx(ra, rs, rb.rm(), rc);
3793 } else {
3794 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
3795 ori(ra, rs, rb);
3796 } else {
3797 // mov handles the relocation.
3798 DCHECK(!rs.is(r0));
3799 mov(r0, rb);
3800 orx(ra, rs, r0, rc);
3801 }
3802 }
3803 }
3804
3805
Xor(Register ra,Register rs,const Operand & rb,RCBit rc)3806 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
3807 RCBit rc) {
3808 if (rb.is_reg()) {
3809 xor_(ra, rs, rb.rm(), rc);
3810 } else {
3811 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
3812 xori(ra, rs, rb);
3813 } else {
3814 // mov handles the relocation.
3815 DCHECK(!rs.is(r0));
3816 mov(r0, rb);
3817 xor_(ra, rs, r0, rc);
3818 }
3819 }
3820 }
3821
3822
CmpSmiLiteral(Register src1,Smi * smi,Register scratch,CRegister cr)3823 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
3824 CRegister cr) {
3825 #if V8_TARGET_ARCH_PPC64
3826 LoadSmiLiteral(scratch, smi);
3827 cmp(src1, scratch, cr);
3828 #else
3829 Cmpi(src1, Operand(smi), scratch, cr);
3830 #endif
3831 }
3832
3833
CmplSmiLiteral(Register src1,Smi * smi,Register scratch,CRegister cr)3834 void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
3835 CRegister cr) {
3836 #if V8_TARGET_ARCH_PPC64
3837 LoadSmiLiteral(scratch, smi);
3838 cmpl(src1, scratch, cr);
3839 #else
3840 Cmpli(src1, Operand(smi), scratch, cr);
3841 #endif
3842 }
3843
3844
AddSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)3845 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
3846 Register scratch) {
3847 #if V8_TARGET_ARCH_PPC64
3848 LoadSmiLiteral(scratch, smi);
3849 add(dst, src, scratch);
3850 #else
3851 Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
3852 #endif
3853 }
3854
3855
SubSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)3856 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
3857 Register scratch) {
3858 #if V8_TARGET_ARCH_PPC64
3859 LoadSmiLiteral(scratch, smi);
3860 sub(dst, src, scratch);
3861 #else
3862 Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
3863 #endif
3864 }
3865
3866
AndSmiLiteral(Register dst,Register src,Smi * smi,Register scratch,RCBit rc)3867 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
3868 Register scratch, RCBit rc) {
3869 #if V8_TARGET_ARCH_PPC64
3870 LoadSmiLiteral(scratch, smi);
3871 and_(dst, src, scratch, rc);
3872 #else
3873 And(dst, src, Operand(smi), rc);
3874 #endif
3875 }
3876
3877
3878 // Load a "pointer" sized value from the memory location
LoadP(Register dst,const MemOperand & mem,Register scratch)3879 void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
3880 Register scratch) {
3881 int offset = mem.offset();
3882
3883 if (!is_int16(offset)) {
3884 /* cannot use d-form */
3885 DCHECK(!scratch.is(no_reg));
3886 mov(scratch, Operand(offset));
3887 #if V8_TARGET_ARCH_PPC64
3888 ldx(dst, MemOperand(mem.ra(), scratch));
3889 #else
3890 lwzx(dst, MemOperand(mem.ra(), scratch));
3891 #endif
3892 } else {
3893 #if V8_TARGET_ARCH_PPC64
3894 int misaligned = (offset & 3);
3895 if (misaligned) {
3896 // adjust base to conform to offset alignment requirements
3897 // Todo: enhance to use scratch if dst is unsuitable
3898 DCHECK(!dst.is(r0));
3899 addi(dst, mem.ra(), Operand((offset & 3) - 4));
3900 ld(dst, MemOperand(dst, (offset & ~3) + 4));
3901 } else {
3902 ld(dst, mem);
3903 }
3904 #else
3905 lwz(dst, mem);
3906 #endif
3907 }
3908 }
3909
3910
3911 // Store a "pointer" sized value to the memory location
StoreP(Register src,const MemOperand & mem,Register scratch)3912 void MacroAssembler::StoreP(Register src, const MemOperand& mem,
3913 Register scratch) {
3914 int offset = mem.offset();
3915
3916 if (!is_int16(offset)) {
3917 /* cannot use d-form */
3918 DCHECK(!scratch.is(no_reg));
3919 mov(scratch, Operand(offset));
3920 #if V8_TARGET_ARCH_PPC64
3921 stdx(src, MemOperand(mem.ra(), scratch));
3922 #else
3923 stwx(src, MemOperand(mem.ra(), scratch));
3924 #endif
3925 } else {
3926 #if V8_TARGET_ARCH_PPC64
3927 int misaligned = (offset & 3);
3928 if (misaligned) {
3929 // adjust base to conform to offset alignment requirements
3930 // a suitable scratch is required here
3931 DCHECK(!scratch.is(no_reg));
3932 if (scratch.is(r0)) {
3933 LoadIntLiteral(scratch, offset);
3934 stdx(src, MemOperand(mem.ra(), scratch));
3935 } else {
3936 addi(scratch, mem.ra(), Operand((offset & 3) - 4));
3937 std(src, MemOperand(scratch, (offset & ~3) + 4));
3938 }
3939 } else {
3940 std(src, mem);
3941 }
3942 #else
3943 stw(src, mem);
3944 #endif
3945 }
3946 }
3947
LoadWordArith(Register dst,const MemOperand & mem,Register scratch)3948 void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
3949 Register scratch) {
3950 int offset = mem.offset();
3951
3952 if (!is_int16(offset)) {
3953 DCHECK(!scratch.is(no_reg));
3954 mov(scratch, Operand(offset));
3955 lwax(dst, MemOperand(mem.ra(), scratch));
3956 } else {
3957 #if V8_TARGET_ARCH_PPC64
3958 int misaligned = (offset & 3);
3959 if (misaligned) {
3960 // adjust base to conform to offset alignment requirements
3961 // Todo: enhance to use scratch if dst is unsuitable
3962 DCHECK(!dst.is(r0));
3963 addi(dst, mem.ra(), Operand((offset & 3) - 4));
3964 lwa(dst, MemOperand(dst, (offset & ~3) + 4));
3965 } else {
3966 lwa(dst, mem);
3967 }
3968 #else
3969 lwz(dst, mem);
3970 #endif
3971 }
3972 }
3973
3974
3975 // Variable length depending on whether offset fits into immediate field
3976 // MemOperand currently only supports d-form
LoadWord(Register dst,const MemOperand & mem,Register scratch)3977 void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
3978 Register scratch) {
3979 Register base = mem.ra();
3980 int offset = mem.offset();
3981
3982 if (!is_int16(offset)) {
3983 LoadIntLiteral(scratch, offset);
3984 lwzx(dst, MemOperand(base, scratch));
3985 } else {
3986 lwz(dst, mem);
3987 }
3988 }
3989
3990
3991 // Variable length depending on whether offset fits into immediate field
3992 // MemOperand current only supports d-form
StoreWord(Register src,const MemOperand & mem,Register scratch)3993 void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
3994 Register scratch) {
3995 Register base = mem.ra();
3996 int offset = mem.offset();
3997
3998 if (!is_int16(offset)) {
3999 LoadIntLiteral(scratch, offset);
4000 stwx(src, MemOperand(base, scratch));
4001 } else {
4002 stw(src, mem);
4003 }
4004 }
4005
4006
LoadHalfWordArith(Register dst,const MemOperand & mem,Register scratch)4007 void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
4008 Register scratch) {
4009 int offset = mem.offset();
4010
4011 if (!is_int16(offset)) {
4012 DCHECK(!scratch.is(no_reg));
4013 mov(scratch, Operand(offset));
4014 lhax(dst, MemOperand(mem.ra(), scratch));
4015 } else {
4016 lha(dst, mem);
4017 }
4018 }
4019
4020
4021 // Variable length depending on whether offset fits into immediate field
4022 // MemOperand currently only supports d-form
LoadHalfWord(Register dst,const MemOperand & mem,Register scratch)4023 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
4024 Register scratch) {
4025 Register base = mem.ra();
4026 int offset = mem.offset();
4027
4028 if (!is_int16(offset)) {
4029 LoadIntLiteral(scratch, offset);
4030 lhzx(dst, MemOperand(base, scratch));
4031 } else {
4032 lhz(dst, mem);
4033 }
4034 }
4035
4036
4037 // Variable length depending on whether offset fits into immediate field
4038 // MemOperand current only supports d-form
StoreHalfWord(Register src,const MemOperand & mem,Register scratch)4039 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
4040 Register scratch) {
4041 Register base = mem.ra();
4042 int offset = mem.offset();
4043
4044 if (!is_int16(offset)) {
4045 LoadIntLiteral(scratch, offset);
4046 sthx(src, MemOperand(base, scratch));
4047 } else {
4048 sth(src, mem);
4049 }
4050 }
4051
4052
4053 // Variable length depending on whether offset fits into immediate field
4054 // MemOperand currently only supports d-form
LoadByte(Register dst,const MemOperand & mem,Register scratch)4055 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
4056 Register scratch) {
4057 Register base = mem.ra();
4058 int offset = mem.offset();
4059
4060 if (!is_int16(offset)) {
4061 LoadIntLiteral(scratch, offset);
4062 lbzx(dst, MemOperand(base, scratch));
4063 } else {
4064 lbz(dst, mem);
4065 }
4066 }
4067
4068
4069 // Variable length depending on whether offset fits into immediate field
4070 // MemOperand current only supports d-form
StoreByte(Register src,const MemOperand & mem,Register scratch)4071 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
4072 Register scratch) {
4073 Register base = mem.ra();
4074 int offset = mem.offset();
4075
4076 if (!is_int16(offset)) {
4077 LoadIntLiteral(scratch, offset);
4078 stbx(src, MemOperand(base, scratch));
4079 } else {
4080 stb(src, mem);
4081 }
4082 }
4083
4084
LoadRepresentation(Register dst,const MemOperand & mem,Representation r,Register scratch)4085 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
4086 Representation r, Register scratch) {
4087 DCHECK(!r.IsDouble());
4088 if (r.IsInteger8()) {
4089 LoadByte(dst, mem, scratch);
4090 extsb(dst, dst);
4091 } else if (r.IsUInteger8()) {
4092 LoadByte(dst, mem, scratch);
4093 } else if (r.IsInteger16()) {
4094 LoadHalfWordArith(dst, mem, scratch);
4095 } else if (r.IsUInteger16()) {
4096 LoadHalfWord(dst, mem, scratch);
4097 #if V8_TARGET_ARCH_PPC64
4098 } else if (r.IsInteger32()) {
4099 LoadWordArith(dst, mem, scratch);
4100 #endif
4101 } else {
4102 LoadP(dst, mem, scratch);
4103 }
4104 }
4105
4106
StoreRepresentation(Register src,const MemOperand & mem,Representation r,Register scratch)4107 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
4108 Representation r, Register scratch) {
4109 DCHECK(!r.IsDouble());
4110 if (r.IsInteger8() || r.IsUInteger8()) {
4111 StoreByte(src, mem, scratch);
4112 } else if (r.IsInteger16() || r.IsUInteger16()) {
4113 StoreHalfWord(src, mem, scratch);
4114 #if V8_TARGET_ARCH_PPC64
4115 } else if (r.IsInteger32()) {
4116 StoreWord(src, mem, scratch);
4117 #endif
4118 } else {
4119 if (r.IsHeapObject()) {
4120 AssertNotSmi(src);
4121 } else if (r.IsSmi()) {
4122 AssertSmi(src);
4123 }
4124 StoreP(src, mem, scratch);
4125 }
4126 }
4127
4128
LoadDouble(DoubleRegister dst,const MemOperand & mem,Register scratch)4129 void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
4130 Register scratch) {
4131 Register base = mem.ra();
4132 int offset = mem.offset();
4133
4134 if (!is_int16(offset)) {
4135 mov(scratch, Operand(offset));
4136 lfdx(dst, MemOperand(base, scratch));
4137 } else {
4138 lfd(dst, mem);
4139 }
4140 }
4141
4142
StoreDouble(DoubleRegister src,const MemOperand & mem,Register scratch)4143 void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
4144 Register scratch) {
4145 Register base = mem.ra();
4146 int offset = mem.offset();
4147
4148 if (!is_int16(offset)) {
4149 mov(scratch, Operand(offset));
4150 stfdx(src, MemOperand(base, scratch));
4151 } else {
4152 stfd(src, mem);
4153 }
4154 }
4155
4156
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)4157 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
4158 Register scratch_reg,
4159 Label* no_memento_found) {
4160 ExternalReference new_space_start =
4161 ExternalReference::new_space_start(isolate());
4162 ExternalReference new_space_allocation_top =
4163 ExternalReference::new_space_allocation_top_address(isolate());
4164 addi(scratch_reg, receiver_reg,
4165 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
4166 Cmpi(scratch_reg, Operand(new_space_start), r0);
4167 blt(no_memento_found);
4168 mov(ip, Operand(new_space_allocation_top));
4169 LoadP(ip, MemOperand(ip));
4170 cmp(scratch_reg, ip);
4171 bgt(no_memento_found);
4172 LoadP(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
4173 Cmpi(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()),
4174 r0);
4175 }
4176
4177
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)4178 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
4179 Register reg4, Register reg5,
4180 Register reg6) {
4181 RegList regs = 0;
4182 if (reg1.is_valid()) regs |= reg1.bit();
4183 if (reg2.is_valid()) regs |= reg2.bit();
4184 if (reg3.is_valid()) regs |= reg3.bit();
4185 if (reg4.is_valid()) regs |= reg4.bit();
4186 if (reg5.is_valid()) regs |= reg5.bit();
4187 if (reg6.is_valid()) regs |= reg6.bit();
4188
4189 const RegisterConfiguration* config =
4190 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
4191 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
4192 int code = config->GetAllocatableGeneralCode(i);
4193 Register candidate = Register::from_code(code);
4194 if (regs & candidate.bit()) continue;
4195 return candidate;
4196 }
4197 UNREACHABLE();
4198 return no_reg;
4199 }
4200
4201
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)4202 void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
4203 Register scratch0,
4204 Register scratch1,
4205 Label* found) {
4206 DCHECK(!scratch1.is(scratch0));
4207 Register current = scratch0;
4208 Label loop_again, end;
4209
4210 // scratch contained elements pointer.
4211 mr(current, object);
4212 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
4213 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
4214 CompareRoot(current, Heap::kNullValueRootIndex);
4215 beq(&end);
4216
4217 // Loop based on the map going up the prototype chain.
4218 bind(&loop_again);
4219 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
4220
4221 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
4222 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
4223 lbz(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
4224 cmpi(scratch1, Operand(JS_OBJECT_TYPE));
4225 blt(found);
4226
4227 lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4228 DecodeField<Map::ElementsKindBits>(scratch1);
4229 cmpi(scratch1, Operand(DICTIONARY_ELEMENTS));
4230 beq(found);
4231 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
4232 CompareRoot(current, Heap::kNullValueRootIndex);
4233 bne(&loop_again);
4234
4235 bind(&end);
4236 }
4237
4238
4239 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8,Register reg9,Register reg10)4240 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
4241 Register reg5, Register reg6, Register reg7, Register reg8,
4242 Register reg9, Register reg10) {
4243 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
4244 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
4245 reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
4246 reg10.is_valid();
4247
4248 RegList regs = 0;
4249 if (reg1.is_valid()) regs |= reg1.bit();
4250 if (reg2.is_valid()) regs |= reg2.bit();
4251 if (reg3.is_valid()) regs |= reg3.bit();
4252 if (reg4.is_valid()) regs |= reg4.bit();
4253 if (reg5.is_valid()) regs |= reg5.bit();
4254 if (reg6.is_valid()) regs |= reg6.bit();
4255 if (reg7.is_valid()) regs |= reg7.bit();
4256 if (reg8.is_valid()) regs |= reg8.bit();
4257 if (reg9.is_valid()) regs |= reg9.bit();
4258 if (reg10.is_valid()) regs |= reg10.bit();
4259 int n_of_non_aliasing_regs = NumRegs(regs);
4260
4261 return n_of_valid_regs != n_of_non_aliasing_regs;
4262 }
4263 #endif
4264
4265
CodePatcher(Isolate * isolate,byte * address,int instructions,FlushICache flush_cache)4266 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
4267 FlushICache flush_cache)
4268 : address_(address),
4269 size_(instructions * Assembler::kInstrSize),
4270 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
4271 flush_cache_(flush_cache) {
4272 // Create a new macro assembler pointing to the address of the code to patch.
4273 // The size is adjusted with kGap on order for the assembler to generate size
4274 // bytes of instructions without failing with buffer size constraints.
4275 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4276 }
4277
4278
~CodePatcher()4279 CodePatcher::~CodePatcher() {
4280 // Indicate that code has changed.
4281 if (flush_cache_ == FLUSH) {
4282 Assembler::FlushICache(masm_.isolate(), address_, size_);
4283 }
4284
4285 // Check that the code was patched as expected.
4286 DCHECK(masm_.pc_ == address_ + size_);
4287 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4288 }
4289
4290
Emit(Instr instr)4291 void CodePatcher::Emit(Instr instr) { masm()->emit(instr); }
4292
4293
EmitCondition(Condition cond)4294 void CodePatcher::EmitCondition(Condition cond) {
4295 Instr instr = Assembler::instr_at(masm_.pc_);
4296 switch (cond) {
4297 case eq:
4298 instr = (instr & ~kCondMask) | BT;
4299 break;
4300 case ne:
4301 instr = (instr & ~kCondMask) | BF;
4302 break;
4303 default:
4304 UNIMPLEMENTED();
4305 }
4306 masm_.emit(instr);
4307 }
4308
4309
TruncatingDiv(Register result,Register dividend,int32_t divisor)4310 void MacroAssembler::TruncatingDiv(Register result, Register dividend,
4311 int32_t divisor) {
4312 DCHECK(!dividend.is(result));
4313 DCHECK(!dividend.is(r0));
4314 DCHECK(!result.is(r0));
4315 base::MagicNumbersForDivision<uint32_t> mag =
4316 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
4317 mov(r0, Operand(mag.multiplier));
4318 mulhw(result, dividend, r0);
4319 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
4320 if (divisor > 0 && neg) {
4321 add(result, result, dividend);
4322 }
4323 if (divisor < 0 && !neg && mag.multiplier > 0) {
4324 sub(result, result, dividend);
4325 }
4326 if (mag.shift > 0) srawi(result, result, mag.shift);
4327 ExtractBit(r0, dividend, 31);
4328 add(result, result, r0);
4329 }
4330
4331 } // namespace internal
4332 } // namespace v8
4333
4334 #endif // V8_TARGET_ARCH_PPC
4335