1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
6 #define V8_PPC_MACRO_ASSEMBLER_PPC_H_
7
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/frames.h"
11 #include "src/globals.h"
12
13 namespace v8 {
14 namespace internal {
15
16 // Give alias names to registers for calling conventions.
17 const Register kReturnRegister0 = {Register::kCode_r3};
18 const Register kReturnRegister1 = {Register::kCode_r4};
19 const Register kReturnRegister2 = {Register::kCode_r5};
20 const Register kJSFunctionRegister = {Register::kCode_r4};
21 const Register kContextRegister = {Register::kCode_r30};
22 const Register kAllocateSizeRegister = {Register::kCode_r4};
23 const Register kInterpreterAccumulatorRegister = {Register::kCode_r3};
24 const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r15};
25 const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r16};
26 const Register kInterpreterDispatchTableRegister = {Register::kCode_r17};
27 const Register kJavaScriptCallArgCountRegister = {Register::kCode_r3};
28 const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r6};
29 const Register kRuntimeCallFunctionRegister = {Register::kCode_r4};
30 const Register kRuntimeCallArgCountRegister = {Register::kCode_r3};
31
32 // ----------------------------------------------------------------------------
33 // Static helper functions
34
35 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,int offset)36 inline MemOperand FieldMemOperand(Register object, int offset) {
37 return MemOperand(object, offset - kHeapObjectTag);
38 }
39
40
41 // Flags used for AllocateHeapNumber
42 enum TaggingMode {
43 // Tag the result.
44 TAG_RESULT,
45 // Don't tag
46 DONT_TAG_RESULT
47 };
48
49
50 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
51 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
52 enum PointersToHereCheck {
53 kPointersToHereMaybeInteresting,
54 kPointersToHereAreAlwaysInteresting
55 };
56 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
57
58
59 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
60 Register reg3 = no_reg,
61 Register reg4 = no_reg,
62 Register reg5 = no_reg,
63 Register reg6 = no_reg);
64
65
66 #ifdef DEBUG
67 bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
68 Register reg4 = no_reg, Register reg5 = no_reg,
69 Register reg6 = no_reg, Register reg7 = no_reg,
70 Register reg8 = no_reg, Register reg9 = no_reg,
71 Register reg10 = no_reg);
72 #endif
73
74 // These exist to provide portability between 32 and 64bit
75 #if V8_TARGET_ARCH_PPC64
76 #define LoadPX ldx
77 #define LoadPUX ldux
78 #define StorePX stdx
79 #define StorePUX stdux
80 #define ShiftLeftImm sldi
81 #define ShiftRightImm srdi
82 #define ClearLeftImm clrldi
83 #define ClearRightImm clrrdi
84 #define ShiftRightArithImm sradi
85 #define ShiftLeft_ sld
86 #define ShiftRight_ srd
87 #define ShiftRightArith srad
88 #define Mul mulld
89 #define Div divd
90 #else
91 #define LoadPX lwzx
92 #define LoadPUX lwzux
93 #define StorePX stwx
94 #define StorePUX stwux
95 #define ShiftLeftImm slwi
96 #define ShiftRightImm srwi
97 #define ClearLeftImm clrlwi
98 #define ClearRightImm clrrwi
99 #define ShiftRightArithImm srawi
100 #define ShiftLeft_ slw
101 #define ShiftRight_ srw
102 #define ShiftRightArith sraw
103 #define Mul mullw
104 #define Div divw
105 #endif
106
107
108 // MacroAssembler implements a collection of frequently used macros.
109 class MacroAssembler : public Assembler {
110 public:
111 MacroAssembler(Isolate* isolate, void* buffer, int size,
112 CodeObjectRequired create_code_object);
113
114
115 // Returns the size of a call in instructions. Note, the value returned is
116 // only valid as long as no entries are added to the constant pool between
117 // checking the call size and emitting the actual call.
118 static int CallSize(Register target);
119 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
120 static int CallSizeNotPredictableCodeSize(Address target,
121 RelocInfo::Mode rmode,
122 Condition cond = al);
123
124 // Jump, Call, and Ret pseudo instructions implementing inter-working.
125 void Jump(Register target);
126 void JumpToJSEntry(Register target);
127 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
128 CRegister cr = cr7);
129 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
130 void Call(Register target);
131 void CallJSEntry(Register target);
132 void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
133 int CallSize(Handle<Code> code,
134 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
135 TypeFeedbackId ast_id = TypeFeedbackId::None(),
136 Condition cond = al);
137 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
138 TypeFeedbackId ast_id = TypeFeedbackId::None(),
139 Condition cond = al);
Ret()140 void Ret() { blr(); }
141 void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
142
143 // Emit code that loads |parameter_index|'th parameter from the stack to
144 // the register according to the CallInterfaceDescriptor definition.
145 // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
146 // below the caller's sp.
147 template <class Descriptor>
148 void LoadParameterFromStack(
149 Register reg, typename Descriptor::ParameterIndices parameter_index,
150 int sp_to_ra_offset_in_words = 0) {
151 DCHECK(Descriptor::kPassLastArgsOnStack);
152 UNIMPLEMENTED();
153 }
154
155 // Emit code to discard a non-negative number of pointer-sized elements
156 // from the stack, clobbering only the sp register.
157 void Drop(int count);
158 void Drop(Register count, Register scratch = r0);
159
Ret(int drop)160 void Ret(int drop) {
161 Drop(drop);
162 blr();
163 }
164
165 void Call(Label* target);
166
167 // Register move. May do nothing if the registers are identical.
Move(Register dst,Smi * smi)168 void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
169 void Move(Register dst, Handle<Object> value);
170 void Move(Register dst, Register src, Condition cond = al);
171 void Move(DoubleRegister dst, DoubleRegister src);
172
173 void MultiPush(RegList regs, Register location = sp);
174 void MultiPop(RegList regs, Register location = sp);
175
176 void MultiPushDoubles(RegList dregs, Register location = sp);
177 void MultiPopDoubles(RegList dregs, Register location = sp);
178
179 // Load an object from the root table.
180 void LoadRoot(Register destination, Heap::RootListIndex index,
181 Condition cond = al);
182 // Store an object to the root table.
183 void StoreRoot(Register source, Heap::RootListIndex index,
184 Condition cond = al);
185
186 // ---------------------------------------------------------------------------
187 // GC Support
188
189 void IncrementalMarkingRecordWriteHelper(Register object, Register value,
190 Register address);
191
192 enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
193
194 // Record in the remembered set the fact that we have a pointer to new space
195 // at the address pointed to by the addr register. Only works if addr is not
196 // in new space.
197 void RememberedSetHelper(Register object, // Used for debug code.
198 Register addr, Register scratch,
199 SaveFPRegsMode save_fp,
200 RememberedSetFinalAction and_then);
201
202 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
203 Label* condition_met);
204
205 // Check if object is in new space. Jumps if the object is not in new space.
206 // The register scratch can be object itself, but scratch will be clobbered.
JumpIfNotInNewSpace(Register object,Register scratch,Label * branch)207 void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
208 InNewSpace(object, scratch, eq, branch);
209 }
210
211 // Check if object is in new space. Jumps if the object is in new space.
212 // The register scratch can be object itself, but it will be clobbered.
JumpIfInNewSpace(Register object,Register scratch,Label * branch)213 void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
214 InNewSpace(object, scratch, ne, branch);
215 }
216
217 // Check if an object has a given incremental marking color.
218 void HasColor(Register object, Register scratch0, Register scratch1,
219 Label* has_color, int first_bit, int second_bit);
220
221 void JumpIfBlack(Register object, Register scratch0, Register scratch1,
222 Label* on_black);
223
224 // Checks the color of an object. If the object is white we jump to the
225 // incremental marker.
226 void JumpIfWhite(Register value, Register scratch1, Register scratch2,
227 Register scratch3, Label* value_is_white);
228
229 // Notify the garbage collector that we wrote a pointer into an object.
230 // |object| is the object being stored into, |value| is the object being
231 // stored. value and scratch registers are clobbered by the operation.
232 // The offset is the offset from the start of the object, not the offset from
233 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
234 void RecordWriteField(
235 Register object, int offset, Register value, Register scratch,
236 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
237 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
238 SmiCheck smi_check = INLINE_SMI_CHECK,
239 PointersToHereCheck pointers_to_here_check_for_value =
240 kPointersToHereMaybeInteresting);
241
242 // As above, but the offset has the tag presubtracted. For use with
243 // MemOperand(reg, off).
244 inline void RecordWriteContextSlot(
245 Register context, int offset, Register value, Register scratch,
246 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
247 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
248 SmiCheck smi_check = INLINE_SMI_CHECK,
249 PointersToHereCheck pointers_to_here_check_for_value =
250 kPointersToHereMaybeInteresting) {
251 RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
252 lr_status, save_fp, remembered_set_action, smi_check,
253 pointers_to_here_check_for_value);
254 }
255
256 // Notify the garbage collector that we wrote a code entry into a
257 // JSFunction. Only scratch is clobbered by the operation.
258 void RecordWriteCodeEntryField(Register js_function, Register code_entry,
259 Register scratch);
260
261 void RecordWriteForMap(Register object, Register map, Register dst,
262 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
263
264 // For a given |object| notify the garbage collector that the slot |address|
265 // has been written. |value| is the object being stored. The value and
266 // address registers are clobbered by the operation.
267 void RecordWrite(
268 Register object, Register address, Register value,
269 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
270 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
271 SmiCheck smi_check = INLINE_SMI_CHECK,
272 PointersToHereCheck pointers_to_here_check_for_value =
273 kPointersToHereMaybeInteresting);
274
Push(Register src)275 void Push(Register src) { push(src); }
276
277 // Push a handle.
278 void Push(Handle<Object> handle);
Push(Smi * smi)279 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
280
281 // Push two registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2)282 void Push(Register src1, Register src2) {
283 StorePU(src2, MemOperand(sp, -2 * kPointerSize));
284 StoreP(src1, MemOperand(sp, kPointerSize));
285 }
286
287 // Push three registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3)288 void Push(Register src1, Register src2, Register src3) {
289 StorePU(src3, MemOperand(sp, -3 * kPointerSize));
290 StoreP(src2, MemOperand(sp, kPointerSize));
291 StoreP(src1, MemOperand(sp, 2 * kPointerSize));
292 }
293
294 // Push four registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4)295 void Push(Register src1, Register src2, Register src3, Register src4) {
296 StorePU(src4, MemOperand(sp, -4 * kPointerSize));
297 StoreP(src3, MemOperand(sp, kPointerSize));
298 StoreP(src2, MemOperand(sp, 2 * kPointerSize));
299 StoreP(src1, MemOperand(sp, 3 * kPointerSize));
300 }
301
302 // Push five registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4,Register src5)303 void Push(Register src1, Register src2, Register src3, Register src4,
304 Register src5) {
305 StorePU(src5, MemOperand(sp, -5 * kPointerSize));
306 StoreP(src4, MemOperand(sp, kPointerSize));
307 StoreP(src3, MemOperand(sp, 2 * kPointerSize));
308 StoreP(src2, MemOperand(sp, 3 * kPointerSize));
309 StoreP(src1, MemOperand(sp, 4 * kPointerSize));
310 }
311
Pop(Register dst)312 void Pop(Register dst) { pop(dst); }
313
314 // Pop two registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2)315 void Pop(Register src1, Register src2) {
316 LoadP(src2, MemOperand(sp, 0));
317 LoadP(src1, MemOperand(sp, kPointerSize));
318 addi(sp, sp, Operand(2 * kPointerSize));
319 }
320
321 // Pop three registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3)322 void Pop(Register src1, Register src2, Register src3) {
323 LoadP(src3, MemOperand(sp, 0));
324 LoadP(src2, MemOperand(sp, kPointerSize));
325 LoadP(src1, MemOperand(sp, 2 * kPointerSize));
326 addi(sp, sp, Operand(3 * kPointerSize));
327 }
328
329 // Pop four registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3,Register src4)330 void Pop(Register src1, Register src2, Register src3, Register src4) {
331 LoadP(src4, MemOperand(sp, 0));
332 LoadP(src3, MemOperand(sp, kPointerSize));
333 LoadP(src2, MemOperand(sp, 2 * kPointerSize));
334 LoadP(src1, MemOperand(sp, 3 * kPointerSize));
335 addi(sp, sp, Operand(4 * kPointerSize));
336 }
337
338 // Pop five registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3,Register src4,Register src5)339 void Pop(Register src1, Register src2, Register src3, Register src4,
340 Register src5) {
341 LoadP(src5, MemOperand(sp, 0));
342 LoadP(src4, MemOperand(sp, kPointerSize));
343 LoadP(src3, MemOperand(sp, 2 * kPointerSize));
344 LoadP(src2, MemOperand(sp, 3 * kPointerSize));
345 LoadP(src1, MemOperand(sp, 4 * kPointerSize));
346 addi(sp, sp, Operand(5 * kPointerSize));
347 }
348
349 // Push a fixed frame, consisting of lr, fp, constant pool.
350 void PushCommonFrame(Register marker_reg = no_reg);
351
352 // Push a standard frame, consisting of lr, fp, constant pool,
353 // context and JS function
354 void PushStandardFrame(Register function_reg);
355
356 void PopCommonFrame(Register marker_reg = no_reg);
357
358 // Restore caller's frame pointer and return address prior to being
359 // overwritten by tail call stack preparation.
360 void RestoreFrameStateForTailCall();
361
362 // Push and pop the registers that can hold pointers, as defined by the
363 // RegList constant kSafepointSavedRegisters.
364 void PushSafepointRegisters();
365 void PopSafepointRegisters();
366 // Store value in register src in the safepoint stack slot for
367 // register dst.
368 void StoreToSafepointRegisterSlot(Register src, Register dst);
369 // Load the value of the src register from its safepoint stack slot
370 // into register dst.
371 void LoadFromSafepointRegisterSlot(Register dst, Register src);
372
373 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
374 // from C.
375 // Does not handle errors.
376 void FlushICache(Register address, size_t size, Register scratch);
377
378 // If the value is a NaN, canonicalize the value else, do nothing.
379 void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
CanonicalizeNaN(const DoubleRegister value)380 void CanonicalizeNaN(const DoubleRegister value) {
381 CanonicalizeNaN(value, value);
382 }
383
384 // Converts the integer (untagged smi) in |src| to a double, storing
385 // the result to |dst|
386 void ConvertIntToDouble(Register src, DoubleRegister dst);
387
388 // Converts the unsigned integer (untagged smi) in |src| to
389 // a double, storing the result to |dst|
390 void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
391
392 // Converts the integer (untagged smi) in |src| to
393 // a float, storing the result in |dst|
394 void ConvertIntToFloat(Register src, DoubleRegister dst);
395
396 // Converts the unsigned integer (untagged smi) in |src| to
397 // a float, storing the result in |dst|
398 void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
399
400 #if V8_TARGET_ARCH_PPC64
401 void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
402 void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
403 void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
404 void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
405 #endif
406
407 // Converts the double_input to an integer. Note that, upon return,
408 // the contents of double_dst will also hold the fixed point representation.
409 void ConvertDoubleToInt64(const DoubleRegister double_input,
410 #if !V8_TARGET_ARCH_PPC64
411 const Register dst_hi,
412 #endif
413 const Register dst, const DoubleRegister double_dst,
414 FPRoundingMode rounding_mode = kRoundToZero);
415
416 #if V8_TARGET_ARCH_PPC64
417 // Converts the double_input to an unsigned integer. Note that, upon return,
418 // the contents of double_dst will also hold the fixed point representation.
419 void ConvertDoubleToUnsignedInt64(
420 const DoubleRegister double_input, const Register dst,
421 const DoubleRegister double_dst,
422 FPRoundingMode rounding_mode = kRoundToZero);
423 #endif
424
425 #if !V8_TARGET_ARCH_PPC64
426 void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
427 Register src_high, Register scratch, Register shift);
428 void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
429 Register src_high, uint32_t shift);
430 void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
431 Register src_high, Register scratch, Register shift);
432 void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
433 Register src_high, uint32_t shift);
434 void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
435 Register src_high, Register scratch, Register shift);
436 void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
437 Register src_high, uint32_t shift);
438 #endif
439
440 // Generates function and stub prologue code.
441 void StubPrologue(StackFrame::Type type, Register base = no_reg,
442 int prologue_offset = 0);
443 void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
444
445 // Enter exit frame.
446 // stack_space - extra stack space, used for parameters before call to C.
447 // At least one slot (for the return address) should be provided.
448 void EnterExitFrame(bool save_doubles, int stack_space = 1,
449 StackFrame::Type frame_type = StackFrame::EXIT);
450
451 // Leave the current exit frame. Expects the return value in r0.
452 // Expect the number of values, pushed prior to the exit frame, to
453 // remove in a register (or no_reg, if there is nothing to remove).
454 void LeaveExitFrame(bool save_doubles, Register argument_count,
455 bool restore_context,
456 bool argument_count_is_length = false);
457
458 // Get the actual activation frame alignment for target environment.
459 static int ActivationFrameAlignment();
460
461 void LoadContext(Register dst, int context_chain_length);
462
463 // Load the global object from the current context.
LoadGlobalObject(Register dst)464 void LoadGlobalObject(Register dst) {
465 LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
466 }
467
468 // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)469 void LoadGlobalProxy(Register dst) {
470 LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
471 }
472
473 void LoadNativeContextSlot(int index, Register dst);
474
475 // Load the initial map from the global function. The registers
476 // function and map can be the same, function is then overwritten.
477 void LoadGlobalFunctionInitialMap(Register function, Register map,
478 Register scratch);
479
InitializeRootRegister()480 void InitializeRootRegister() {
481 ExternalReference roots_array_start =
482 ExternalReference::roots_array_start(isolate());
483 mov(kRootRegister, Operand(roots_array_start));
484 }
485
486 // ----------------------------------------------------------------
487 // new PPC macro-assembler interfaces that are slightly higher level
488 // than assembler-ppc and may generate variable length sequences
489
490 // load a literal signed int value <value> to GPR <dst>
491 void LoadIntLiteral(Register dst, int value);
492
493 // load an SMI value <value> to GPR <dst>
494 void LoadSmiLiteral(Register dst, Smi* smi);
495
496 // load a literal double value <value> to FPR <result>
497 void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
498
499 void LoadWord(Register dst, const MemOperand& mem, Register scratch);
500 void LoadWordArith(Register dst, const MemOperand& mem,
501 Register scratch = no_reg);
502 void StoreWord(Register src, const MemOperand& mem, Register scratch);
503
504 void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
505 void LoadHalfWordArith(Register dst, const MemOperand& mem,
506 Register scratch = no_reg);
507 void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
508
509 void LoadByte(Register dst, const MemOperand& mem, Register scratch);
510 void StoreByte(Register src, const MemOperand& mem, Register scratch);
511
512 void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
513 Register scratch = no_reg);
514 void StoreRepresentation(Register src, const MemOperand& mem,
515 Representation r, Register scratch = no_reg);
516
517 void LoadDouble(DoubleRegister dst, const MemOperand& mem,
518 Register scratch = no_reg);
519 void LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
520 Register scratch = no_reg);
521
522 void LoadSingle(DoubleRegister dst, const MemOperand& mem,
523 Register scratch = no_reg);
524 void LoadSingleU(DoubleRegister dst, const MemOperand& mem,
525 Register scratch = no_reg);
526
527 void StoreDouble(DoubleRegister src, const MemOperand& mem,
528 Register scratch = no_reg);
529 void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
530 Register scratch = no_reg);
531
532 void StoreSingle(DoubleRegister src, const MemOperand& mem,
533 Register scratch = no_reg);
534 void StoreSingleU(DoubleRegister src, const MemOperand& mem,
535 Register scratch = no_reg);
536
537 // Move values between integer and floating point registers.
538 void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
539 void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
540 Register scratch);
541 void MovInt64ToDouble(DoubleRegister dst,
542 #if !V8_TARGET_ARCH_PPC64
543 Register src_hi,
544 #endif
545 Register src);
546 #if V8_TARGET_ARCH_PPC64
547 void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
548 Register src_lo, Register scratch);
549 #endif
550 void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
551 void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
552 void MovDoubleLowToInt(Register dst, DoubleRegister src);
553 void MovDoubleHighToInt(Register dst, DoubleRegister src);
554 void MovDoubleToInt64(
555 #if !V8_TARGET_ARCH_PPC64
556 Register dst_hi,
557 #endif
558 Register dst, DoubleRegister src);
559 void MovIntToFloat(DoubleRegister dst, Register src);
560 void MovFloatToInt(Register dst, DoubleRegister src);
561
562 void Add(Register dst, Register src, intptr_t value, Register scratch);
563 void Cmpi(Register src1, const Operand& src2, Register scratch,
564 CRegister cr = cr7);
565 void Cmpli(Register src1, const Operand& src2, Register scratch,
566 CRegister cr = cr7);
567 void Cmpwi(Register src1, const Operand& src2, Register scratch,
568 CRegister cr = cr7);
569 void Cmplwi(Register src1, const Operand& src2, Register scratch,
570 CRegister cr = cr7);
571 void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
572 void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
573 void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
574
575 void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
576 void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
577 void CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
578 CRegister cr = cr7);
579 void CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
580 CRegister cr = cr7);
581 void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
582 RCBit rc = LeaveRC);
583
584 // Set new rounding mode RN to FPSCR
585 void SetRoundingMode(FPRoundingMode RN);
586
587 // reset rounding mode to default (kRoundToNearest)
588 void ResetRoundingMode();
589
590 // These exist to provide portability between 32 and 64bit
591 void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
592 void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
593 void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
594 void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
595
596 // ---------------------------------------------------------------------------
597 // JavaScript invokes
598
599 // Removes current frame and its arguments from the stack preserving
600 // the arguments and a return address pushed to the stack for the next call.
601 // Both |callee_args_count| and |caller_args_count_reg| do not include
602 // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
603 // is trashed.
604 void PrepareForTailCall(const ParameterCount& callee_args_count,
605 Register caller_args_count_reg, Register scratch0,
606 Register scratch1);
607
608 // Invoke the JavaScript function code by either calling or jumping.
609 void InvokeFunctionCode(Register function, Register new_target,
610 const ParameterCount& expected,
611 const ParameterCount& actual, InvokeFlag flag,
612 const CallWrapper& call_wrapper);
613
614 // On function call, call into the debugger if necessary.
615 void CheckDebugHook(Register fun, Register new_target,
616 const ParameterCount& expected,
617 const ParameterCount& actual);
618
619 // Invoke the JavaScript function in the given register. Changes the
620 // current context to the context in the function before invoking.
621 void InvokeFunction(Register function, Register new_target,
622 const ParameterCount& actual, InvokeFlag flag,
623 const CallWrapper& call_wrapper);
624
625 void InvokeFunction(Register function, const ParameterCount& expected,
626 const ParameterCount& actual, InvokeFlag flag,
627 const CallWrapper& call_wrapper);
628
629 void InvokeFunction(Handle<JSFunction> function,
630 const ParameterCount& expected,
631 const ParameterCount& actual, InvokeFlag flag,
632 const CallWrapper& call_wrapper);
633
634 void IsObjectJSStringType(Register object, Register scratch, Label* fail);
635
636 void IsObjectNameType(Register object, Register scratch, Label* fail);
637
638 void DebugBreak();
639 // Frame restart support
640 void MaybeDropFrames();
641
642 // Exception handling
643
644 // Push a new stack handler and link into stack handler chain.
645 void PushStackHandler();
646
647 // Unlink the stack handler on top of the stack from the stack handler chain.
648 // Must preserve the result register.
649 void PopStackHandler();
650
651 // ---------------------------------------------------------------------------
652 // Inline caching support
653
654 void GetNumberHash(Register t0, Register scratch);
655
MarkCode(NopMarkerTypes type)656 inline void MarkCode(NopMarkerTypes type) { nop(type); }
657
658 // Check if the given instruction is a 'type' marker.
659 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
660 // These instructions are generated to mark special location in the code,
661 // like some special IC code.
IsMarkedCode(Instr instr,int type)662 static inline bool IsMarkedCode(Instr instr, int type) {
663 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
664 return IsNop(instr, type);
665 }
666
667
GetCodeMarker(Instr instr)668 static inline int GetCodeMarker(Instr instr) {
669 int dst_reg_offset = 12;
670 int dst_mask = 0xf << dst_reg_offset;
671 int src_mask = 0xf;
672 int dst_reg = (instr & dst_mask) >> dst_reg_offset;
673 int src_reg = instr & src_mask;
674 uint32_t non_register_mask = ~(dst_mask | src_mask);
675 uint32_t mov_mask = al | 13 << 21;
676
677 // Return <n> if we have a mov rn rn, else return -1.
678 int type = ((instr & non_register_mask) == mov_mask) &&
679 (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
680 (dst_reg < LAST_CODE_MARKER)
681 ? src_reg
682 : -1;
683 DCHECK((type == -1) ||
684 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
685 return type;
686 }
687
688
689 // ---------------------------------------------------------------------------
690 // Allocation support
691
692 // Allocate an object in new space or old space. The object_size is
693 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
694 // is passed. If the space is exhausted control continues at the gc_required
695 // label. The allocated object is returned in result. If the flag
696 // tag_allocated_object is true the result is tagged as as a heap object.
697 // All registers are clobbered also when control continues at the gc_required
698 // label.
699 void Allocate(int object_size, Register result, Register scratch1,
700 Register scratch2, Label* gc_required, AllocationFlags flags);
701
702 void Allocate(Register object_size, Register result, Register result_end,
703 Register scratch, Label* gc_required, AllocationFlags flags);
704
705 // FastAllocate is right now only used for folded allocations. It just
706 // increments the top pointer without checking against limit. This can only
707 // be done if it was proved earlier that the allocation will succeed.
708 void FastAllocate(int object_size, Register result, Register scratch1,
709 Register scratch2, AllocationFlags flags);
710
711 void FastAllocate(Register object_size, Register result, Register result_end,
712 Register scratch, AllocationFlags flags);
713
714 // Allocates a heap number or jumps to the gc_required label if the young
715 // space is full and a scavenge is needed. All registers are clobbered also
716 // when control continues at the gc_required label.
717 void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
718 Register heap_number_map, Label* gc_required,
719 MutableMode mode = IMMUTABLE);
720 void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
721 Register scratch1, Register scratch2,
722 Register heap_number_map,
723 Label* gc_required);
724
725 // Allocate and initialize a JSValue wrapper with the specified {constructor}
726 // and {value}.
727 void AllocateJSValue(Register result, Register constructor, Register value,
728 Register scratch1, Register scratch2,
729 Label* gc_required);
730
731 // Initialize fields with filler values. |count| fields starting at
732 // |current_address| are overwritten with the value in |filler|. At the end
733 // the loop, |current_address| points at the next uninitialized field.
734 // |count| is assumed to be non-zero.
735 void InitializeNFieldsWithFiller(Register current_address, Register count,
736 Register filler);
737
738 // Initialize fields with filler values. Fields starting at |current_address|
739 // not including |end_address| are overwritten with the value in |filler|. At
740 // the end the loop, |current_address| takes the value of |end_address|.
741 void InitializeFieldsWithFiller(Register current_address,
742 Register end_address, Register filler);
743
744 // ---------------------------------------------------------------------------
745 // Support functions.
746
747 // Machine code version of Map::GetConstructor().
748 // |temp| holds |result|'s map when done, and |temp2| its instance type.
749 void GetMapConstructor(Register result, Register map, Register temp,
750 Register temp2);
751
752 // Compare object type for heap object. heap_object contains a non-Smi
753 // whose object type should be compared with the given type. This both
754 // sets the flags and leaves the object type in the type_reg register.
755 // It leaves the map in the map register (unless the type_reg and map register
756 // are the same register). It leaves the heap object in the heap_object
757 // register unless the heap_object register is the same register as one of the
758 // other registers.
759 // Type_reg can be no_reg. In that case ip is used.
760 void CompareObjectType(Register heap_object, Register map, Register type_reg,
761 InstanceType type);
762
763 // Compare instance type in a map. map contains a valid map object whose
764 // object type should be compared with the given type. This both
765 // sets the flags and leaves the object type in the type_reg register.
766 void CompareInstanceType(Register map, Register type_reg, InstanceType type);
767
768 // Compare an object's map with the specified map and its transitioned
769 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
770 // set with result of map compare. If multiple map compares are required, the
771 // compare sequences branches to early_success.
772 void CompareMap(Register obj, Register scratch, Handle<Map> map,
773 Label* early_success);
774
775 // As above, but the map of the object is already loaded into the register
776 // which is preserved by the code generated.
777 void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
778
779 // Check if the map of an object is equal to a specified map and branch to
780 // label if not. Skip the smi check if not required (object is known to be a
781 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
782 // against maps that are ElementsKind transition maps of the specified map.
783 void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
784 SmiCheckType smi_check_type);
785
786
787 void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
788 Label* fail, SmiCheckType smi_check_type);
789
790
791 // Check if the map of an object is equal to a specified weak map and branch
792 // to a specified target if equal. Skip the smi check if not required
793 // (object is known to be a heap object)
794 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
795 Handle<WeakCell> cell, Handle<Code> success,
796 SmiCheckType smi_check_type);
797
798 // Compare the given value and the value of weak cell.
799 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch,
800 CRegister cr = cr7);
801
802 void GetWeakValue(Register value, Handle<WeakCell> cell);
803
804 // Load the value of the weak cell in the value register. Branch to the given
805 // miss label if the weak cell was cleared.
806 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
807
808 // Compare the object in a register to a value from the root list.
809 // Uses the ip register as scratch.
810 void CompareRoot(Register obj, Heap::RootListIndex index);
PushRoot(Heap::RootListIndex index)811 void PushRoot(Heap::RootListIndex index) {
812 LoadRoot(r0, index);
813 Push(r0);
814 }
815
816 // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with,Heap::RootListIndex index,Label * if_equal)817 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
818 CompareRoot(with, index);
819 beq(if_equal);
820 }
821
822 // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with,Heap::RootListIndex index,Label * if_not_equal)823 void JumpIfNotRoot(Register with, Heap::RootListIndex index,
824 Label* if_not_equal) {
825 CompareRoot(with, index);
826 bne(if_not_equal);
827 }
828
829 // Load and check the instance type of an object for being a string.
830 // Loads the type into the second argument register.
831 // Returns a condition that will be enabled if the object was a string.
IsObjectStringType(Register obj,Register type)832 Condition IsObjectStringType(Register obj, Register type) {
833 LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
834 lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
835 andi(r0, type, Operand(kIsNotStringMask));
836 DCHECK_EQ(0u, kStringTag);
837 return eq;
838 }
839
840 // Get the number of least significant bits from a register
841 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
842 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
843
844 // Load the value of a smi object into a double register.
845 void SmiToDouble(DoubleRegister value, Register smi);
846
847 // Check if a double can be exactly represented as a signed 32-bit integer.
848 // CR_EQ in cr7 is set if true.
849 void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
850 Register scratch2, DoubleRegister double_scratch);
851
852 // Check if a double is equal to -0.0.
853 // CR_EQ in cr7 holds the result.
854 void TestDoubleIsMinusZero(DoubleRegister input, Register scratch1,
855 Register scratch2);
856
857 // Check the sign of a double.
858 // CR_LT in cr7 holds the result.
859 void TestDoubleSign(DoubleRegister input, Register scratch);
860 void TestHeapNumberSign(Register input, Register scratch);
861
862 // Try to convert a double to a signed 32-bit integer.
863 // CR_EQ in cr7 is set and result assigned if the conversion is exact.
864 void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
865 Register scratch, DoubleRegister double_scratch);
866
867 // Floor a double and writes the value to the result register.
868 // Go to exact if the conversion is exact (to be able to test -0),
869 // fall through calling code if an overflow occurred, else go to done.
870 // In return, input_high is loaded with high bits of input.
871 void TryInt32Floor(Register result, DoubleRegister double_input,
872 Register input_high, Register scratch,
873 DoubleRegister double_scratch, Label* done, Label* exact);
874
875 // Performs a truncating conversion of a floating point number as used by
876 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
877 // succeeds, otherwise falls through if result is saturated. On return
878 // 'result' either holds answer, or is clobbered on fall through.
879 //
880 // Only public for the test code in test-code-stubs-arm.cc.
881 void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
882 Label* done);
883
884 // Performs a truncating conversion of a floating point number as used by
885 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
886 // Exits with 'result' holding the answer.
887 void TruncateDoubleToI(Register result, DoubleRegister double_input);
888
889 // Performs a truncating conversion of a heap number as used by
890 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
891 // must be different registers. Exits with 'result' holding the answer.
892 void TruncateHeapNumberToI(Register result, Register object);
893
894 // Converts the smi or heap number in object to an int32 using the rules
895 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
896 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
897 // different registers.
898 void TruncateNumberToI(Register object, Register result,
899 Register heap_number_map, Register scratch1,
900 Label* not_int32);
901
902 // Overflow handling functions.
903 // Usage: call the appropriate arithmetic function and then call one of the
904 // flow control functions with the corresponding label.
905
906 // Compute dst = left + right, setting condition codes. dst may be same as
907 // either left or right (or a unique register). left and right must not be
908 // the same register.
909 void AddAndCheckForOverflow(Register dst, Register left, Register right,
910 Register overflow_dst, Register scratch = r0);
911 void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
912 Register overflow_dst, Register scratch = r0);
913
914 // Compute dst = left - right, setting condition codes. dst may be same as
915 // either left or right (or a unique register). left and right must not be
916 // the same register.
917 void SubAndCheckForOverflow(Register dst, Register left, Register right,
918 Register overflow_dst, Register scratch = r0);
919
BranchOnOverflow(Label * label)920 void BranchOnOverflow(Label* label) { blt(label, cr0); }
921
BranchOnNoOverflow(Label * label)922 void BranchOnNoOverflow(Label* label) { bge(label, cr0); }
923
RetOnOverflow(void)924 void RetOnOverflow(void) { Ret(lt, cr0); }
925
RetOnNoOverflow(void)926 void RetOnNoOverflow(void) { Ret(ge, cr0); }
927
928 // ---------------------------------------------------------------------------
929 // Runtime calls
930
931 // Call a code stub.
932 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
933 Condition cond = al);
934
935 // Call a code stub.
936 void TailCallStub(CodeStub* stub, Condition cond = al);
937
938 // Call a runtime routine.
939 void CallRuntime(const Runtime::Function* f, int num_arguments,
940 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
CallRuntimeSaveDoubles(Runtime::FunctionId fid)941 void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
942 const Runtime::Function* function = Runtime::FunctionForId(fid);
943 CallRuntime(function, function->nargs, kSaveFPRegs);
944 }
945
946 // Convenience function: Same as above, but takes the fid instead.
947 void CallRuntime(Runtime::FunctionId fid,
948 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
949 const Runtime::Function* function = Runtime::FunctionForId(fid);
950 CallRuntime(function, function->nargs, save_doubles);
951 }
952
953 // Convenience function: Same as above, but takes the fid instead.
954 void CallRuntime(Runtime::FunctionId fid, int num_arguments,
955 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
956 CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
957 }
958
959 // Convenience function: call an external reference.
960 void CallExternalReference(const ExternalReference& ext, int num_arguments);
961
962 // Convenience function: tail call a runtime routine (jump).
963 void TailCallRuntime(Runtime::FunctionId fid);
964
965 int CalculateStackPassedWords(int num_reg_arguments,
966 int num_double_arguments);
967
968 // Before calling a C-function from generated code, align arguments on stack.
969 // After aligning the frame, non-register arguments must be stored in
970 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
971 // are word sized. If double arguments are used, this function assumes that
972 // all double arguments are stored before core registers; otherwise the
973 // correct alignment of the double values is not guaranteed.
974 // Some compilers/platforms require the stack to be aligned when calling
975 // C++ code.
976 // Needs a scratch register to do some arithmetic. This register will be
977 // trashed.
978 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
979 Register scratch);
980 void PrepareCallCFunction(int num_reg_arguments, Register scratch);
981
982 // There are two ways of passing double arguments on ARM, depending on
983 // whether soft or hard floating point ABI is used. These functions
984 // abstract parameter passing for the three different ways we call
985 // C functions from generated code.
986 void MovToFloatParameter(DoubleRegister src);
987 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
988 void MovToFloatResult(DoubleRegister src);
989
990 // Calls a C function and cleans up the space for arguments allocated
991 // by PrepareCallCFunction. The called function is not allowed to trigger a
992 // garbage collection, since that might move the code and invalidate the
993 // return address (unless this is somehow accounted for by the called
994 // function).
995 void CallCFunction(ExternalReference function, int num_arguments);
996 void CallCFunction(Register function, int num_arguments);
997 void CallCFunction(ExternalReference function, int num_reg_arguments,
998 int num_double_arguments);
999 void CallCFunction(Register function, int num_reg_arguments,
1000 int num_double_arguments);
1001
1002 void MovFromFloatParameter(DoubleRegister dst);
1003 void MovFromFloatResult(DoubleRegister dst);
1004
1005 // Jump to a runtime routine.
1006 void JumpToExternalReference(const ExternalReference& builtin,
1007 bool builtin_exit_frame = false);
1008
CodeObject()1009 Handle<Object> CodeObject() {
1010 DCHECK(!code_object_.is_null());
1011 return code_object_;
1012 }
1013
1014
1015 // Emit code for a truncating division by a constant. The dividend register is
1016 // unchanged and ip gets clobbered. Dividend and result must be different.
1017 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1018
1019 // ---------------------------------------------------------------------------
1020 // StatsCounter support
1021
1022 void SetCounter(StatsCounter* counter, int value, Register scratch1,
1023 Register scratch2);
1024 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1025 Register scratch2);
1026 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1027 Register scratch2);
1028
1029
1030 // ---------------------------------------------------------------------------
1031 // Debugging
1032
1033 // Calls Abort(msg) if the condition cond is not satisfied.
1034 // Use --debug_code to enable.
1035 void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
1036 void AssertFastElements(Register elements);
1037
1038 // Like Assert(), but always enabled.
1039 void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
1040
1041 // Print a message to stdout and abort execution.
1042 void Abort(BailoutReason reason);
1043
1044 // Verify restrictions about code generated in stubs.
set_generating_stub(bool value)1045 void set_generating_stub(bool value) { generating_stub_ = value; }
generating_stub()1046 bool generating_stub() { return generating_stub_; }
set_has_frame(bool value)1047 void set_has_frame(bool value) { has_frame_ = value; }
has_frame()1048 bool has_frame() { return has_frame_; }
1049 inline bool AllowThisStubCall(CodeStub* stub);
1050
1051 // ---------------------------------------------------------------------------
1052 // Number utilities
1053
1054 // Check whether the value of reg is a power of two and not zero. If not
1055 // control continues at the label not_power_of_two. If reg is a power of two
1056 // the register scratch contains the value of (reg - 1) when control falls
1057 // through.
1058 void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
1059 Label* not_power_of_two_or_zero);
1060 // Check whether the value of reg is a power of two and not zero.
1061 // Control falls through if it is, with scratch containing the mask
1062 // value (reg - 1).
1063 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1064 // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1065 // strictly positive but not a power of two.
1066 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
1067 Label* zero_and_neg,
1068 Label* not_power_of_two);
1069
1070 // ---------------------------------------------------------------------------
1071 // Bit testing/extraction
1072 //
1073 // Bit numbering is such that the least significant bit is bit 0
1074 // (for consistency between 32/64-bit).
1075
1076 // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
1077 // and, if !test, shift them into the least significant bits of dst.
1078 inline void ExtractBitRange(Register dst, Register src, int rangeStart,
1079 int rangeEnd, RCBit rc = LeaveRC,
1080 bool test = false) {
1081 DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
1082 int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
1083 int width = rangeStart - rangeEnd + 1;
1084 if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
1085 // Prefer faster andi when applicable.
1086 andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
1087 } else {
1088 #if V8_TARGET_ARCH_PPC64
1089 rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
1090 #else
1091 rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1,
1092 rc);
1093 #endif
1094 }
1095 }
1096
1097 inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
1098 RCBit rc = LeaveRC, bool test = false) {
1099 ExtractBitRange(dst, src, bitNumber, bitNumber, rc, test);
1100 }
1101
1102 // Extract consecutive bits (defined by mask) from src and place them
1103 // into the least significant bits of dst.
1104 inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
1105 RCBit rc = LeaveRC, bool test = false) {
1106 int start = kBitsPerPointer - 1;
1107 int end;
1108 uintptr_t bit = (1L << start);
1109
1110 while (bit && (mask & bit) == 0) {
1111 start--;
1112 bit >>= 1;
1113 }
1114 end = start;
1115 bit >>= 1;
1116
1117 while (bit && (mask & bit)) {
1118 end--;
1119 bit >>= 1;
1120 }
1121
1122 // 1-bits in mask must be contiguous
1123 DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
1124
1125 ExtractBitRange(dst, src, start, end, rc, test);
1126 }
1127
1128 // Test single bit in value.
1129 inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
1130 ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC, true);
1131 }
1132
1133 // Test consecutive bit range in value. Range is defined by
1134 // rangeStart - rangeEnd.
1135 inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
1136 Register scratch = r0) {
1137 ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC, true);
1138 }
1139
1140 // Test consecutive bit range in value. Range is defined by mask.
1141 inline void TestBitMask(Register value, uintptr_t mask,
1142 Register scratch = r0) {
1143 ExtractBitMask(scratch, value, mask, SetRC, true);
1144 }
1145
1146
1147 // ---------------------------------------------------------------------------
1148 // Smi utilities
1149
1150 // Shift left by kSmiShift
1151 void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
1152 void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
1153 ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
1154 }
1155
1156 #if !V8_TARGET_ARCH_PPC64
1157 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1158 void SmiTagCheckOverflow(Register reg, Register overflow);
1159 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1160
JumpIfNotSmiCandidate(Register value,Register scratch,Label * not_smi_label)1161 inline void JumpIfNotSmiCandidate(Register value, Register scratch,
1162 Label* not_smi_label) {
1163 // High bits must be identical to fit into an Smi
1164 STATIC_ASSERT(kSmiShift == 1);
1165 addis(scratch, value, Operand(0x40000000u >> 16));
1166 cmpi(scratch, Operand::Zero());
1167 blt(not_smi_label);
1168 }
1169 #endif
TestUnsignedSmiCandidate(Register value,Register scratch)1170 inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
1171 // The test is different for unsigned int values. Since we need
1172 // the value to be in the range of a positive smi, we can't
1173 // handle any of the high bits being set in the value.
1174 TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift,
1175 scratch);
1176 }
JumpIfNotUnsignedSmiCandidate(Register value,Register scratch,Label * not_smi_label)1177 inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
1178 Label* not_smi_label) {
1179 TestUnsignedSmiCandidate(value, scratch);
1180 bne(not_smi_label, cr0);
1181 }
1182
1183 void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
1184
1185 void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
1186 ShiftRightArithImm(dst, src, kSmiShift, rc);
1187 }
1188
SmiToPtrArrayOffset(Register dst,Register src)1189 void SmiToPtrArrayOffset(Register dst, Register src) {
1190 #if V8_TARGET_ARCH_PPC64
1191 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
1192 ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
1193 #else
1194 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
1195 ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
1196 #endif
1197 }
1198
SmiToByteArrayOffset(Register dst,Register src)1199 void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
1200
SmiToShortArrayOffset(Register dst,Register src)1201 void SmiToShortArrayOffset(Register dst, Register src) {
1202 #if V8_TARGET_ARCH_PPC64
1203 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
1204 ShiftRightArithImm(dst, src, kSmiShift - 1);
1205 #else
1206 STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
1207 if (!dst.is(src)) {
1208 mr(dst, src);
1209 }
1210 #endif
1211 }
1212
SmiToIntArrayOffset(Register dst,Register src)1213 void SmiToIntArrayOffset(Register dst, Register src) {
1214 #if V8_TARGET_ARCH_PPC64
1215 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
1216 ShiftRightArithImm(dst, src, kSmiShift - 2);
1217 #else
1218 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
1219 ShiftLeftImm(dst, src, Operand(2 - kSmiShift));
1220 #endif
1221 }
1222
1223 #define SmiToFloatArrayOffset SmiToIntArrayOffset
1224
SmiToDoubleArrayOffset(Register dst,Register src)1225 void SmiToDoubleArrayOffset(Register dst, Register src) {
1226 #if V8_TARGET_ARCH_PPC64
1227 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
1228 ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2);
1229 #else
1230 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
1231 ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
1232 #endif
1233 }
1234
SmiToArrayOffset(Register dst,Register src,int elementSizeLog2)1235 void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
1236 if (kSmiShift < elementSizeLog2) {
1237 ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift));
1238 } else if (kSmiShift > elementSizeLog2) {
1239 ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2);
1240 } else if (!dst.is(src)) {
1241 mr(dst, src);
1242 }
1243 }
1244
IndexToArrayOffset(Register dst,Register src,int elementSizeLog2,bool isSmi)1245 void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
1246 bool isSmi) {
1247 if (isSmi) {
1248 SmiToArrayOffset(dst, src, elementSizeLog2);
1249 } else {
1250 ShiftLeftImm(dst, src, Operand(elementSizeLog2));
1251 }
1252 }
1253
1254 // Untag the source value into destination and jump if source is a smi.
1255 // Souce and destination can be the same register.
1256 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1257
TestIfSmi(Register value,Register scratch)1258 inline void TestIfSmi(Register value, Register scratch) {
1259 TestBitRange(value, kSmiTagSize - 1, 0, scratch);
1260 }
1261
TestIfPositiveSmi(Register value,Register scratch)1262 inline void TestIfPositiveSmi(Register value, Register scratch) {
1263 #if V8_TARGET_ARCH_PPC64
1264 rldicl(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize), SetRC);
1265 #else
1266 rlwinm(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize),
1267 kBitsPerPointer - 1, SetRC);
1268 #endif
1269 }
1270
1271 // Jump the register contains a smi.
JumpIfSmi(Register value,Label * smi_label)1272 inline void JumpIfSmi(Register value, Label* smi_label) {
1273 TestIfSmi(value, r0);
1274 beq(smi_label, cr0); // branch if SMI
1275 }
1276 // Jump if either of the registers contain a non-smi.
JumpIfNotSmi(Register value,Label * not_smi_label)1277 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1278 TestIfSmi(value, r0);
1279 bne(not_smi_label, cr0);
1280 }
1281 // Jump if either of the registers contain a non-smi.
1282 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1283 // Jump if either of the registers contain a smi.
1284 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1285
1286 // Abort execution if argument is a number, enabled via --debug-code.
1287 void AssertNotNumber(Register object);
1288
1289 // Abort execution if argument is a smi, enabled via --debug-code.
1290 void AssertNotSmi(Register object);
1291 void AssertSmi(Register object);
1292
1293
1294 #if V8_TARGET_ARCH_PPC64
1295 inline void TestIfInt32(Register value, Register scratch,
1296 CRegister cr = cr7) {
1297 // High bits must be identical to fit into an 32-bit integer
1298 extsw(scratch, value);
1299 cmp(scratch, value, cr);
1300 }
1301 #else
1302 inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
1303 CRegister cr = cr7) {
1304 // High bits must be identical to fit into an 32-bit integer
1305 srawi(scratch, lo_word, 31);
1306 cmp(scratch, hi_word, cr);
1307 }
1308 #endif
1309
1310 #if V8_TARGET_ARCH_PPC64
1311 // Ensure it is permissable to read/write int value directly from
1312 // upper half of the smi.
1313 STATIC_ASSERT(kSmiTag == 0);
1314 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
1315 #endif
1316 #if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
1317 #define SmiWordOffset(offset) (offset + kPointerSize / 2)
1318 #else
1319 #define SmiWordOffset(offset) offset
1320 #endif
1321
1322 // Abort execution if argument is not a string, enabled via --debug-code.
1323 void AssertString(Register object);
1324
1325 // Abort execution if argument is not a name, enabled via --debug-code.
1326 void AssertName(Register object);
1327
1328 void AssertFunction(Register object);
1329
1330 // Abort execution if argument is not a JSBoundFunction,
1331 // enabled via --debug-code.
1332 void AssertBoundFunction(Register object);
1333
1334 // Abort execution if argument is not a JSGeneratorObject,
1335 // enabled via --debug-code.
1336 void AssertGeneratorObject(Register object);
1337
1338 // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
1339 void AssertReceiver(Register object);
1340
1341 // Abort execution if argument is not undefined or an AllocationSite, enabled
1342 // via --debug-code.
1343 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1344
1345 // Abort execution if reg is not the root value with the given index,
1346 // enabled via --debug-code.
1347 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1348
1349 // ---------------------------------------------------------------------------
1350 // HeapNumber utilities
1351
1352 void JumpIfNotHeapNumber(Register object, Register heap_number_map,
1353 Register scratch, Label* on_not_heap_number);
1354
1355 // ---------------------------------------------------------------------------
1356 // String utilities
1357
1358 // Checks if both objects are sequential one-byte strings and jumps to label
1359 // if either is not. Assumes that neither object is a smi.
1360 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1361 Register object2,
1362 Register scratch1,
1363 Register scratch2,
1364 Label* failure);
1365
1366 // Checks if both objects are sequential one-byte strings and jumps to label
1367 // if either is not.
1368 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1369 Register scratch1,
1370 Register scratch2,
1371 Label* not_flat_one_byte_strings);
1372
1373 // Checks if both instance types are sequential one-byte strings and jumps to
1374 // label if either is not.
1375 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1376 Register first_object_instance_type, Register second_object_instance_type,
1377 Register scratch1, Register scratch2, Label* failure);
1378
1379 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1380
1381 void EmitSeqStringSetCharCheck(Register string, Register index,
1382 Register value, uint32_t encoding_mask);
1383
1384 // ---------------------------------------------------------------------------
1385 // Patching helpers.
1386
1387 // Decode offset from constant pool load instruction(s).
1388 // Caller must place the instruction word at <location> in <result>.
1389 void DecodeConstantPoolOffset(Register result, Register location);
1390
1391 void ClampUint8(Register output_reg, Register input_reg);
1392
1393 // Saturate a value into 8-bit unsigned integer
1394 // if input_value < 0, output_value is 0
1395 // if input_value > 255, output_value is 255
1396 // otherwise output_value is the (int)input_value (round to nearest)
1397 void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
1398 DoubleRegister temp_double_reg);
1399
1400
1401 void LoadInstanceDescriptors(Register map, Register descriptors);
1402 void EnumLength(Register dst, Register map);
1403 void NumberOfOwnDescriptors(Register dst, Register map);
1404 void LoadAccessor(Register dst, Register holder, int accessor_index,
1405 AccessorComponent accessor);
1406
1407 template <typename Field>
1408 void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) {
1409 ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift,
1410 rc);
1411 }
1412
1413 template <typename Field>
1414 void DecodeField(Register reg, RCBit rc = LeaveRC) {
1415 DecodeField<Field>(reg, reg, rc);
1416 }
1417
1418 template <typename Field>
DecodeFieldToSmi(Register dst,Register src)1419 void DecodeFieldToSmi(Register dst, Register src) {
1420 #if V8_TARGET_ARCH_PPC64
1421 DecodeField<Field>(dst, src);
1422 SmiTag(dst);
1423 #else
1424 // 32-bit can do this in one instruction:
1425 int start = Field::kSize + kSmiShift - 1;
1426 int end = kSmiShift;
1427 int rotate = kSmiShift - Field::kShift;
1428 if (rotate < 0) {
1429 rotate += kBitsPerPointer;
1430 }
1431 rlwinm(dst, src, rotate, kBitsPerPointer - start - 1,
1432 kBitsPerPointer - end - 1);
1433 #endif
1434 }
1435
1436 template <typename Field>
DecodeFieldToSmi(Register reg)1437 void DecodeFieldToSmi(Register reg) {
1438 DecodeFieldToSmi<Field>(reg, reg);
1439 }
1440
1441 // Load the type feedback vector from a JavaScript frame.
1442 void EmitLoadFeedbackVector(Register vector);
1443
1444 // Activation support.
1445 void EnterFrame(StackFrame::Type type,
1446 bool load_constant_pool_pointer_reg = false);
1447 // Returns the pc offset at which the frame ends.
1448 int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
1449
1450 void EnterBuiltinFrame(Register context, Register target, Register argc);
1451 void LeaveBuiltinFrame(Register context, Register target, Register argc);
1452
1453 // Expects object in r3 and returns map with validated enum cache
1454 // in r3. Assumes that any other register can be used as a scratch.
1455 void CheckEnumCache(Label* call_runtime);
1456
1457 // AllocationMemento support. Arrays may have an associated
1458 // AllocationMemento object that can be checked for in order to pretransition
1459 // to another type.
1460 // On entry, receiver_reg should point to the array object.
1461 // scratch_reg gets clobbered.
1462 // If allocation info is present, condition flags are set to eq.
1463 void TestJSArrayForAllocationMemento(Register receiver_reg,
1464 Register scratch_reg,
1465 Register scratch2_reg,
1466 Label* no_memento_found);
1467
1468 // Loads the constant pool pointer (kConstantPoolRegister).
1469 void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
1470 Register code_target_address);
1471 void LoadConstantPoolPointerRegister();
1472 void LoadConstantPoolPointerRegister(Register base, int code_entry_delta = 0);
1473
AbortConstantPoolBuilding()1474 void AbortConstantPoolBuilding() {
1475 #ifdef DEBUG
1476 // Avoid DCHECK(!is_linked()) failure in ~Label()
1477 bind(ConstantPoolPosition());
1478 #endif
1479 }
1480
1481 private:
1482 static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1483
1484 void CallCFunctionHelper(Register function, int num_reg_arguments,
1485 int num_double_arguments);
1486
1487 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
1488 CRegister cr = cr7);
1489
1490 // Helper functions for generating invokes.
1491 void InvokePrologue(const ParameterCount& expected,
1492 const ParameterCount& actual, Label* done,
1493 bool* definitely_mismatches, InvokeFlag flag,
1494 const CallWrapper& call_wrapper);
1495
1496 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1497 void InNewSpace(Register object, Register scratch,
1498 Condition cond, // eq for new space, ne otherwise.
1499 Label* branch);
1500
1501 // Helper for finding the mark bits for an address. Afterwards, the
1502 // bitmap register points at the word with the mark bits and the mask
1503 // the position of the first bit. Leaves addr_reg unchanged.
1504 inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
1505 Register mask_reg);
1506
1507 static const RegList kSafepointSavedRegisters;
1508 static const int kNumSafepointSavedRegisters;
1509
1510 // Compute memory operands for safepoint stack slots.
1511 static int SafepointRegisterStackIndex(int reg_code);
1512 MemOperand SafepointRegisterSlot(Register reg);
1513 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1514
1515 bool generating_stub_;
1516 bool has_frame_;
1517 // This handle will be patched with the code object on installation.
1518 Handle<Object> code_object_;
1519
1520 // Needs access to SafepointRegisterStackIndex for compiled frame
1521 // traversal.
1522 friend class StandardFrame;
1523 };
1524
1525
1526 // The code patcher is used to patch (typically) small parts of code e.g. for
1527 // debugging and other types of instrumentation. When using the code patcher
1528 // the exact number of bytes specified must be emitted. It is not legal to emit
1529 // relocation information. If any of these constraints are violated it causes
1530 // an assertion to fail.
1531 class CodePatcher {
1532 public:
1533 enum FlushICache { FLUSH, DONT_FLUSH };
1534
1535 CodePatcher(Isolate* isolate, byte* address, int instructions,
1536 FlushICache flush_cache = FLUSH);
1537 ~CodePatcher();
1538
1539 // Macro assembler to emit code.
masm()1540 MacroAssembler* masm() { return &masm_; }
1541
1542 // Emit an instruction directly.
1543 void Emit(Instr instr);
1544
1545 // Emit the condition part of an instruction leaving the rest of the current
1546 // instruction unchanged.
1547 void EmitCondition(Condition cond);
1548
1549 private:
1550 byte* address_; // The address of the code being patched.
1551 int size_; // Number of bytes of the expected patch size.
1552 MacroAssembler masm_; // Macro assembler used to generate the code.
1553 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1554 };
1555
1556
1557 // -----------------------------------------------------------------------------
1558 // Static helper functions.
1559
1560 inline MemOperand ContextMemOperand(Register context, int index = 0) {
1561 return MemOperand(context, Context::SlotOffset(index));
1562 }
1563
1564
NativeContextMemOperand()1565 inline MemOperand NativeContextMemOperand() {
1566 return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
1567 }
1568
1569 #define ACCESS_MASM(masm) masm->
1570
1571 } // namespace internal
1572 } // namespace v8
1573
1574 #endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_
1575