1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
6 #define V8_PPC_MACRO_ASSEMBLER_PPC_H_
7 
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/frames.h"
11 #include "src/globals.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 // Give alias names to registers for calling conventions.
17 const Register kReturnRegister0 = {Register::kCode_r3};
18 const Register kReturnRegister1 = {Register::kCode_r4};
19 const Register kReturnRegister2 = {Register::kCode_r5};
20 const Register kJSFunctionRegister = {Register::kCode_r4};
21 const Register kContextRegister = {Register::kCode_r30};
22 const Register kAllocateSizeRegister = {Register::kCode_r4};
23 const Register kInterpreterAccumulatorRegister = {Register::kCode_r3};
24 const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r15};
25 const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r16};
26 const Register kInterpreterDispatchTableRegister = {Register::kCode_r17};
27 const Register kJavaScriptCallArgCountRegister = {Register::kCode_r3};
28 const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r6};
29 const Register kRuntimeCallFunctionRegister = {Register::kCode_r4};
30 const Register kRuntimeCallArgCountRegister = {Register::kCode_r3};
31 
32 // ----------------------------------------------------------------------------
33 // Static helper functions
34 
35 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,int offset)36 inline MemOperand FieldMemOperand(Register object, int offset) {
37   return MemOperand(object, offset - kHeapObjectTag);
38 }
39 
40 
41 // Flags used for AllocateHeapNumber
42 enum TaggingMode {
43   // Tag the result.
44   TAG_RESULT,
45   // Don't tag
46   DONT_TAG_RESULT
47 };
48 
49 
50 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
51 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
52 enum PointersToHereCheck {
53   kPointersToHereMaybeInteresting,
54   kPointersToHereAreAlwaysInteresting
55 };
56 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
57 
58 
59 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
60                                    Register reg3 = no_reg,
61                                    Register reg4 = no_reg,
62                                    Register reg5 = no_reg,
63                                    Register reg6 = no_reg);
64 
65 
66 #ifdef DEBUG
67 bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
68                 Register reg4 = no_reg, Register reg5 = no_reg,
69                 Register reg6 = no_reg, Register reg7 = no_reg,
70                 Register reg8 = no_reg, Register reg9 = no_reg,
71                 Register reg10 = no_reg);
72 #endif
73 
74 // These exist to provide portability between 32 and 64bit
75 #if V8_TARGET_ARCH_PPC64
76 #define LoadPX ldx
77 #define LoadPUX ldux
78 #define StorePX stdx
79 #define StorePUX stdux
80 #define ShiftLeftImm sldi
81 #define ShiftRightImm srdi
82 #define ClearLeftImm clrldi
83 #define ClearRightImm clrrdi
84 #define ShiftRightArithImm sradi
85 #define ShiftLeft_ sld
86 #define ShiftRight_ srd
87 #define ShiftRightArith srad
88 #define Mul mulld
89 #define Div divd
90 #else
91 #define LoadPX lwzx
92 #define LoadPUX lwzux
93 #define StorePX stwx
94 #define StorePUX stwux
95 #define ShiftLeftImm slwi
96 #define ShiftRightImm srwi
97 #define ClearLeftImm clrlwi
98 #define ClearRightImm clrrwi
99 #define ShiftRightArithImm srawi
100 #define ShiftLeft_ slw
101 #define ShiftRight_ srw
102 #define ShiftRightArith sraw
103 #define Mul mullw
104 #define Div divw
105 #endif
106 
107 
108 // MacroAssembler implements a collection of frequently used macros.
109 class MacroAssembler : public Assembler {
110  public:
111   MacroAssembler(Isolate* isolate, void* buffer, int size,
112                  CodeObjectRequired create_code_object);
113 
114 
115   // Returns the size of a call in instructions. Note, the value returned is
116   // only valid as long as no entries are added to the constant pool between
117   // checking the call size and emitting the actual call.
118   static int CallSize(Register target);
119   int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
120   static int CallSizeNotPredictableCodeSize(Address target,
121                                             RelocInfo::Mode rmode,
122                                             Condition cond = al);
123 
124   // Jump, Call, and Ret pseudo instructions implementing inter-working.
125   void Jump(Register target);
126   void JumpToJSEntry(Register target);
127   void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
128             CRegister cr = cr7);
129   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
130   void Call(Register target);
131   void CallJSEntry(Register target);
132   void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
133   int CallSize(Handle<Code> code,
134                RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
135                TypeFeedbackId ast_id = TypeFeedbackId::None(),
136                Condition cond = al);
137   void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
138             TypeFeedbackId ast_id = TypeFeedbackId::None(),
139             Condition cond = al);
Ret()140   void Ret() { blr(); }
141   void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
142 
143   // Emit code that loads |parameter_index|'th parameter from the stack to
144   // the register according to the CallInterfaceDescriptor definition.
145   // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
146   // below the caller's sp.
147   template <class Descriptor>
148   void LoadParameterFromStack(
149       Register reg, typename Descriptor::ParameterIndices parameter_index,
150       int sp_to_ra_offset_in_words = 0) {
151     DCHECK(Descriptor::kPassLastArgsOnStack);
152     UNIMPLEMENTED();
153   }
154 
155   // Emit code to discard a non-negative number of pointer-sized elements
156   // from the stack, clobbering only the sp register.
157   void Drop(int count);
158   void Drop(Register count, Register scratch = r0);
159 
Ret(int drop)160   void Ret(int drop) {
161     Drop(drop);
162     blr();
163   }
164 
165   void Call(Label* target);
166 
167   // Register move. May do nothing if the registers are identical.
Move(Register dst,Smi * smi)168   void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
169   void Move(Register dst, Handle<Object> value);
170   void Move(Register dst, Register src, Condition cond = al);
171   void Move(DoubleRegister dst, DoubleRegister src);
172 
173   void MultiPush(RegList regs, Register location = sp);
174   void MultiPop(RegList regs, Register location = sp);
175 
176   void MultiPushDoubles(RegList dregs, Register location = sp);
177   void MultiPopDoubles(RegList dregs, Register location = sp);
178 
179   // Load an object from the root table.
180   void LoadRoot(Register destination, Heap::RootListIndex index,
181                 Condition cond = al);
182   // Store an object to the root table.
183   void StoreRoot(Register source, Heap::RootListIndex index,
184                  Condition cond = al);
185 
186   // ---------------------------------------------------------------------------
187   // GC Support
188 
189   void IncrementalMarkingRecordWriteHelper(Register object, Register value,
190                                            Register address);
191 
192   enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
193 
194   // Record in the remembered set the fact that we have a pointer to new space
195   // at the address pointed to by the addr register.  Only works if addr is not
196   // in new space.
197   void RememberedSetHelper(Register object,  // Used for debug code.
198                            Register addr, Register scratch,
199                            SaveFPRegsMode save_fp,
200                            RememberedSetFinalAction and_then);
201 
202   void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
203                      Label* condition_met);
204 
205   // Check if object is in new space.  Jumps if the object is not in new space.
206   // The register scratch can be object itself, but scratch will be clobbered.
JumpIfNotInNewSpace(Register object,Register scratch,Label * branch)207   void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
208     InNewSpace(object, scratch, eq, branch);
209   }
210 
211   // Check if object is in new space.  Jumps if the object is in new space.
212   // The register scratch can be object itself, but it will be clobbered.
JumpIfInNewSpace(Register object,Register scratch,Label * branch)213   void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
214     InNewSpace(object, scratch, ne, branch);
215   }
216 
217   // Check if an object has a given incremental marking color.
218   void HasColor(Register object, Register scratch0, Register scratch1,
219                 Label* has_color, int first_bit, int second_bit);
220 
221   void JumpIfBlack(Register object, Register scratch0, Register scratch1,
222                    Label* on_black);
223 
224   // Checks the color of an object.  If the object is white we jump to the
225   // incremental marker.
226   void JumpIfWhite(Register value, Register scratch1, Register scratch2,
227                    Register scratch3, Label* value_is_white);
228 
229   // Notify the garbage collector that we wrote a pointer into an object.
230   // |object| is the object being stored into, |value| is the object being
231   // stored.  value and scratch registers are clobbered by the operation.
232   // The offset is the offset from the start of the object, not the offset from
233   // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
234   void RecordWriteField(
235       Register object, int offset, Register value, Register scratch,
236       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
237       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
238       SmiCheck smi_check = INLINE_SMI_CHECK,
239       PointersToHereCheck pointers_to_here_check_for_value =
240           kPointersToHereMaybeInteresting);
241 
242   // As above, but the offset has the tag presubtracted.  For use with
243   // MemOperand(reg, off).
244   inline void RecordWriteContextSlot(
245       Register context, int offset, Register value, Register scratch,
246       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
247       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
248       SmiCheck smi_check = INLINE_SMI_CHECK,
249       PointersToHereCheck pointers_to_here_check_for_value =
250           kPointersToHereMaybeInteresting) {
251     RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
252                      lr_status, save_fp, remembered_set_action, smi_check,
253                      pointers_to_here_check_for_value);
254   }
255 
256   // Notify the garbage collector that we wrote a code entry into a
257   // JSFunction. Only scratch is clobbered by the operation.
258   void RecordWriteCodeEntryField(Register js_function, Register code_entry,
259                                  Register scratch);
260 
261   void RecordWriteForMap(Register object, Register map, Register dst,
262                          LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
263 
264   // For a given |object| notify the garbage collector that the slot |address|
265   // has been written.  |value| is the object being stored. The value and
266   // address registers are clobbered by the operation.
267   void RecordWrite(
268       Register object, Register address, Register value,
269       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
270       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
271       SmiCheck smi_check = INLINE_SMI_CHECK,
272       PointersToHereCheck pointers_to_here_check_for_value =
273           kPointersToHereMaybeInteresting);
274 
Push(Register src)275   void Push(Register src) { push(src); }
276 
277   // Push a handle.
278   void Push(Handle<Object> handle);
Push(Smi * smi)279   void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
280 
281   // Push two registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2)282   void Push(Register src1, Register src2) {
283     StorePU(src2, MemOperand(sp, -2 * kPointerSize));
284     StoreP(src1, MemOperand(sp, kPointerSize));
285   }
286 
287   // Push three registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3)288   void Push(Register src1, Register src2, Register src3) {
289     StorePU(src3, MemOperand(sp, -3 * kPointerSize));
290     StoreP(src2, MemOperand(sp, kPointerSize));
291     StoreP(src1, MemOperand(sp, 2 * kPointerSize));
292   }
293 
294   // Push four registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4)295   void Push(Register src1, Register src2, Register src3, Register src4) {
296     StorePU(src4, MemOperand(sp, -4 * kPointerSize));
297     StoreP(src3, MemOperand(sp, kPointerSize));
298     StoreP(src2, MemOperand(sp, 2 * kPointerSize));
299     StoreP(src1, MemOperand(sp, 3 * kPointerSize));
300   }
301 
302   // Push five registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4,Register src5)303   void Push(Register src1, Register src2, Register src3, Register src4,
304             Register src5) {
305     StorePU(src5, MemOperand(sp, -5 * kPointerSize));
306     StoreP(src4, MemOperand(sp, kPointerSize));
307     StoreP(src3, MemOperand(sp, 2 * kPointerSize));
308     StoreP(src2, MemOperand(sp, 3 * kPointerSize));
309     StoreP(src1, MemOperand(sp, 4 * kPointerSize));
310   }
311 
Pop(Register dst)312   void Pop(Register dst) { pop(dst); }
313 
314   // Pop two registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2)315   void Pop(Register src1, Register src2) {
316     LoadP(src2, MemOperand(sp, 0));
317     LoadP(src1, MemOperand(sp, kPointerSize));
318     addi(sp, sp, Operand(2 * kPointerSize));
319   }
320 
321   // Pop three registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3)322   void Pop(Register src1, Register src2, Register src3) {
323     LoadP(src3, MemOperand(sp, 0));
324     LoadP(src2, MemOperand(sp, kPointerSize));
325     LoadP(src1, MemOperand(sp, 2 * kPointerSize));
326     addi(sp, sp, Operand(3 * kPointerSize));
327   }
328 
329   // Pop four registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3,Register src4)330   void Pop(Register src1, Register src2, Register src3, Register src4) {
331     LoadP(src4, MemOperand(sp, 0));
332     LoadP(src3, MemOperand(sp, kPointerSize));
333     LoadP(src2, MemOperand(sp, 2 * kPointerSize));
334     LoadP(src1, MemOperand(sp, 3 * kPointerSize));
335     addi(sp, sp, Operand(4 * kPointerSize));
336   }
337 
338   // Pop five registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3,Register src4,Register src5)339   void Pop(Register src1, Register src2, Register src3, Register src4,
340            Register src5) {
341     LoadP(src5, MemOperand(sp, 0));
342     LoadP(src4, MemOperand(sp, kPointerSize));
343     LoadP(src3, MemOperand(sp, 2 * kPointerSize));
344     LoadP(src2, MemOperand(sp, 3 * kPointerSize));
345     LoadP(src1, MemOperand(sp, 4 * kPointerSize));
346     addi(sp, sp, Operand(5 * kPointerSize));
347   }
348 
349   // Push a fixed frame, consisting of lr, fp, constant pool.
350   void PushCommonFrame(Register marker_reg = no_reg);
351 
352   // Push a standard frame, consisting of lr, fp, constant pool,
353   // context and JS function
354   void PushStandardFrame(Register function_reg);
355 
356   void PopCommonFrame(Register marker_reg = no_reg);
357 
358   // Restore caller's frame pointer and return address prior to being
359   // overwritten by tail call stack preparation.
360   void RestoreFrameStateForTailCall();
361 
362   // Push and pop the registers that can hold pointers, as defined by the
363   // RegList constant kSafepointSavedRegisters.
364   void PushSafepointRegisters();
365   void PopSafepointRegisters();
366   // Store value in register src in the safepoint stack slot for
367   // register dst.
368   void StoreToSafepointRegisterSlot(Register src, Register dst);
369   // Load the value of the src register from its safepoint stack slot
370   // into register dst.
371   void LoadFromSafepointRegisterSlot(Register dst, Register src);
372 
373   // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
374   // from C.
375   // Does not handle errors.
376   void FlushICache(Register address, size_t size, Register scratch);
377 
378   // If the value is a NaN, canonicalize the value else, do nothing.
379   void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
CanonicalizeNaN(const DoubleRegister value)380   void CanonicalizeNaN(const DoubleRegister value) {
381     CanonicalizeNaN(value, value);
382   }
383 
384   // Converts the integer (untagged smi) in |src| to a double, storing
385   // the result to |dst|
386   void ConvertIntToDouble(Register src, DoubleRegister dst);
387 
388   // Converts the unsigned integer (untagged smi) in |src| to
389   // a double, storing the result to |dst|
390   void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
391 
392   // Converts the integer (untagged smi) in |src| to
393   // a float, storing the result in |dst|
394   void ConvertIntToFloat(Register src, DoubleRegister dst);
395 
396   // Converts the unsigned integer (untagged smi) in |src| to
397   // a float, storing the result in |dst|
398   void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
399 
400 #if V8_TARGET_ARCH_PPC64
401   void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
402   void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
403   void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
404   void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
405 #endif
406 
407   // Converts the double_input to an integer.  Note that, upon return,
408   // the contents of double_dst will also hold the fixed point representation.
409   void ConvertDoubleToInt64(const DoubleRegister double_input,
410 #if !V8_TARGET_ARCH_PPC64
411                             const Register dst_hi,
412 #endif
413                             const Register dst, const DoubleRegister double_dst,
414                             FPRoundingMode rounding_mode = kRoundToZero);
415 
416 #if V8_TARGET_ARCH_PPC64
417   // Converts the double_input to an unsigned integer.  Note that, upon return,
418   // the contents of double_dst will also hold the fixed point representation.
419   void ConvertDoubleToUnsignedInt64(
420       const DoubleRegister double_input, const Register dst,
421       const DoubleRegister double_dst,
422       FPRoundingMode rounding_mode = kRoundToZero);
423 #endif
424 
425 #if !V8_TARGET_ARCH_PPC64
426   void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
427                      Register src_high, Register scratch, Register shift);
428   void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
429                      Register src_high, uint32_t shift);
430   void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
431                       Register src_high, Register scratch, Register shift);
432   void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
433                       Register src_high, uint32_t shift);
434   void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
435                          Register src_high, Register scratch, Register shift);
436   void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
437                          Register src_high, uint32_t shift);
438 #endif
439 
440   // Generates function and stub prologue code.
441   void StubPrologue(StackFrame::Type type, Register base = no_reg,
442                     int prologue_offset = 0);
443   void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
444 
445   // Enter exit frame.
446   // stack_space - extra stack space, used for parameters before call to C.
447   // At least one slot (for the return address) should be provided.
448   void EnterExitFrame(bool save_doubles, int stack_space = 1,
449                       StackFrame::Type frame_type = StackFrame::EXIT);
450 
451   // Leave the current exit frame. Expects the return value in r0.
452   // Expect the number of values, pushed prior to the exit frame, to
453   // remove in a register (or no_reg, if there is nothing to remove).
454   void LeaveExitFrame(bool save_doubles, Register argument_count,
455                       bool restore_context,
456                       bool argument_count_is_length = false);
457 
458   // Get the actual activation frame alignment for target environment.
459   static int ActivationFrameAlignment();
460 
461   void LoadContext(Register dst, int context_chain_length);
462 
463   // Load the global object from the current context.
LoadGlobalObject(Register dst)464   void LoadGlobalObject(Register dst) {
465     LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
466   }
467 
468   // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)469   void LoadGlobalProxy(Register dst) {
470     LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
471   }
472 
473   // Conditionally load the cached Array transitioned map of type
474   // transitioned_kind from the native context if the map in register
475   // map_in_out is the cached Array map in the native context of
476   // expected_kind.
477   void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
478                                            ElementsKind transitioned_kind,
479                                            Register map_in_out,
480                                            Register scratch,
481                                            Label* no_map_match);
482 
483   void LoadNativeContextSlot(int index, Register dst);
484 
485   // Load the initial map from the global function. The registers
486   // function and map can be the same, function is then overwritten.
487   void LoadGlobalFunctionInitialMap(Register function, Register map,
488                                     Register scratch);
489 
InitializeRootRegister()490   void InitializeRootRegister() {
491     ExternalReference roots_array_start =
492         ExternalReference::roots_array_start(isolate());
493     mov(kRootRegister, Operand(roots_array_start));
494   }
495 
496   // ----------------------------------------------------------------
497   // new PPC macro-assembler interfaces that are slightly higher level
498   // than assembler-ppc and may generate variable length sequences
499 
500   // load a literal signed int value <value> to GPR <dst>
501   void LoadIntLiteral(Register dst, int value);
502 
503   // load an SMI value <value> to GPR <dst>
504   void LoadSmiLiteral(Register dst, Smi* smi);
505 
506   // load a literal double value <value> to FPR <result>
507   void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
508 
509   void LoadWord(Register dst, const MemOperand& mem, Register scratch);
510   void LoadWordArith(Register dst, const MemOperand& mem,
511                      Register scratch = no_reg);
512   void StoreWord(Register src, const MemOperand& mem, Register scratch);
513 
514   void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
515   void LoadHalfWordArith(Register dst, const MemOperand& mem,
516                          Register scratch = no_reg);
517   void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
518 
519   void LoadByte(Register dst, const MemOperand& mem, Register scratch);
520   void StoreByte(Register src, const MemOperand& mem, Register scratch);
521 
522   void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
523                           Register scratch = no_reg);
524   void StoreRepresentation(Register src, const MemOperand& mem,
525                            Representation r, Register scratch = no_reg);
526 
527   void LoadDouble(DoubleRegister dst, const MemOperand& mem,
528                   Register scratch = no_reg);
529   void LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
530                   Register scratch = no_reg);
531 
532   void LoadSingle(DoubleRegister dst, const MemOperand& mem,
533                   Register scratch = no_reg);
534   void LoadSingleU(DoubleRegister dst, const MemOperand& mem,
535                    Register scratch = no_reg);
536 
537   void StoreDouble(DoubleRegister src, const MemOperand& mem,
538                    Register scratch = no_reg);
539   void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
540                    Register scratch = no_reg);
541 
542   void StoreSingle(DoubleRegister src, const MemOperand& mem,
543                    Register scratch = no_reg);
544   void StoreSingleU(DoubleRegister src, const MemOperand& mem,
545                     Register scratch = no_reg);
546 
547   // Move values between integer and floating point registers.
548   void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
549   void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
550                               Register scratch);
551   void MovInt64ToDouble(DoubleRegister dst,
552 #if !V8_TARGET_ARCH_PPC64
553                         Register src_hi,
554 #endif
555                         Register src);
556 #if V8_TARGET_ARCH_PPC64
557   void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
558                                   Register src_lo, Register scratch);
559 #endif
560   void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
561   void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
562   void MovDoubleLowToInt(Register dst, DoubleRegister src);
563   void MovDoubleHighToInt(Register dst, DoubleRegister src);
564   void MovDoubleToInt64(
565 #if !V8_TARGET_ARCH_PPC64
566       Register dst_hi,
567 #endif
568       Register dst, DoubleRegister src);
569   void MovIntToFloat(DoubleRegister dst, Register src);
570   void MovFloatToInt(Register dst, DoubleRegister src);
571 
572   void Add(Register dst, Register src, intptr_t value, Register scratch);
573   void Cmpi(Register src1, const Operand& src2, Register scratch,
574             CRegister cr = cr7);
575   void Cmpli(Register src1, const Operand& src2, Register scratch,
576              CRegister cr = cr7);
577   void Cmpwi(Register src1, const Operand& src2, Register scratch,
578              CRegister cr = cr7);
579   void Cmplwi(Register src1, const Operand& src2, Register scratch,
580               CRegister cr = cr7);
581   void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
582   void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
583   void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
584 
585   void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
586   void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
587   void CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
588                      CRegister cr = cr7);
589   void CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
590                       CRegister cr = cr7);
591   void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
592                      RCBit rc = LeaveRC);
593 
594   // Set new rounding mode RN to FPSCR
595   void SetRoundingMode(FPRoundingMode RN);
596 
597   // reset rounding mode to default (kRoundToNearest)
598   void ResetRoundingMode();
599 
600   // These exist to provide portability between 32 and 64bit
601   void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
602   void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
603   void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
604   void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
605 
606   // ---------------------------------------------------------------------------
607   // JavaScript invokes
608 
609   // Removes current frame and its arguments from the stack preserving
610   // the arguments and a return address pushed to the stack for the next call.
611   // Both |callee_args_count| and |caller_args_count_reg| do not include
612   // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
613   // is trashed.
614   void PrepareForTailCall(const ParameterCount& callee_args_count,
615                           Register caller_args_count_reg, Register scratch0,
616                           Register scratch1);
617 
618   // Invoke the JavaScript function code by either calling or jumping.
619   void InvokeFunctionCode(Register function, Register new_target,
620                           const ParameterCount& expected,
621                           const ParameterCount& actual, InvokeFlag flag,
622                           const CallWrapper& call_wrapper);
623 
624   void FloodFunctionIfStepping(Register fun, Register new_target,
625                                const ParameterCount& expected,
626                                const ParameterCount& actual);
627 
628   // Invoke the JavaScript function in the given register. Changes the
629   // current context to the context in the function before invoking.
630   void InvokeFunction(Register function, Register new_target,
631                       const ParameterCount& actual, InvokeFlag flag,
632                       const CallWrapper& call_wrapper);
633 
634   void InvokeFunction(Register function, const ParameterCount& expected,
635                       const ParameterCount& actual, InvokeFlag flag,
636                       const CallWrapper& call_wrapper);
637 
638   void InvokeFunction(Handle<JSFunction> function,
639                       const ParameterCount& expected,
640                       const ParameterCount& actual, InvokeFlag flag,
641                       const CallWrapper& call_wrapper);
642 
643   void IsObjectJSStringType(Register object, Register scratch, Label* fail);
644 
645   void IsObjectNameType(Register object, Register scratch, Label* fail);
646 
647   // ---------------------------------------------------------------------------
648   // Debugger Support
649 
650   void DebugBreak();
651 
652   // ---------------------------------------------------------------------------
653   // Exception handling
654 
655   // Push a new stack handler and link into stack handler chain.
656   void PushStackHandler();
657 
658   // Unlink the stack handler on top of the stack from the stack handler chain.
659   // Must preserve the result register.
660   void PopStackHandler();
661 
662   // ---------------------------------------------------------------------------
663   // Inline caching support
664 
665   void GetNumberHash(Register t0, Register scratch);
666 
MarkCode(NopMarkerTypes type)667   inline void MarkCode(NopMarkerTypes type) { nop(type); }
668 
669   // Check if the given instruction is a 'type' marker.
670   // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
671   // These instructions are generated to mark special location in the code,
672   // like some special IC code.
IsMarkedCode(Instr instr,int type)673   static inline bool IsMarkedCode(Instr instr, int type) {
674     DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
675     return IsNop(instr, type);
676   }
677 
678 
GetCodeMarker(Instr instr)679   static inline int GetCodeMarker(Instr instr) {
680     int dst_reg_offset = 12;
681     int dst_mask = 0xf << dst_reg_offset;
682     int src_mask = 0xf;
683     int dst_reg = (instr & dst_mask) >> dst_reg_offset;
684     int src_reg = instr & src_mask;
685     uint32_t non_register_mask = ~(dst_mask | src_mask);
686     uint32_t mov_mask = al | 13 << 21;
687 
688     // Return <n> if we have a mov rn rn, else return -1.
689     int type = ((instr & non_register_mask) == mov_mask) &&
690                        (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
691                        (dst_reg < LAST_CODE_MARKER)
692                    ? src_reg
693                    : -1;
694     DCHECK((type == -1) ||
695            ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
696     return type;
697   }
698 
699 
700   // ---------------------------------------------------------------------------
701   // Allocation support
702 
703   // Allocate an object in new space or old space. The object_size is
704   // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
705   // is passed. If the space is exhausted control continues at the gc_required
706   // label. The allocated object is returned in result. If the flag
707   // tag_allocated_object is true the result is tagged as as a heap object.
708   // All registers are clobbered also when control continues at the gc_required
709   // label.
710   void Allocate(int object_size, Register result, Register scratch1,
711                 Register scratch2, Label* gc_required, AllocationFlags flags);
712 
713   void Allocate(Register object_size, Register result, Register result_end,
714                 Register scratch, Label* gc_required, AllocationFlags flags);
715 
716   // FastAllocate is right now only used for folded allocations. It just
717   // increments the top pointer without checking against limit. This can only
718   // be done if it was proved earlier that the allocation will succeed.
719   void FastAllocate(int object_size, Register result, Register scratch1,
720                     Register scratch2, AllocationFlags flags);
721 
722   void FastAllocate(Register object_size, Register result, Register result_end,
723                     Register scratch, AllocationFlags flags);
724 
725   void AllocateTwoByteString(Register result, Register length,
726                              Register scratch1, Register scratch2,
727                              Register scratch3, Label* gc_required);
728   void AllocateOneByteString(Register result, Register length,
729                              Register scratch1, Register scratch2,
730                              Register scratch3, Label* gc_required);
731   void AllocateTwoByteConsString(Register result, Register length,
732                                  Register scratch1, Register scratch2,
733                                  Label* gc_required);
734   void AllocateOneByteConsString(Register result, Register length,
735                                  Register scratch1, Register scratch2,
736                                  Label* gc_required);
737   void AllocateTwoByteSlicedString(Register result, Register length,
738                                    Register scratch1, Register scratch2,
739                                    Label* gc_required);
740   void AllocateOneByteSlicedString(Register result, Register length,
741                                    Register scratch1, Register scratch2,
742                                    Label* gc_required);
743 
744   // Allocates a heap number or jumps to the gc_required label if the young
745   // space is full and a scavenge is needed. All registers are clobbered also
746   // when control continues at the gc_required label.
747   void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
748                           Register heap_number_map, Label* gc_required,
749                           MutableMode mode = IMMUTABLE);
750   void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
751                                    Register scratch1, Register scratch2,
752                                    Register heap_number_map,
753                                    Label* gc_required);
754 
755   // Allocate and initialize a JSValue wrapper with the specified {constructor}
756   // and {value}.
757   void AllocateJSValue(Register result, Register constructor, Register value,
758                        Register scratch1, Register scratch2,
759                        Label* gc_required);
760 
761   // Initialize fields with filler values.  |count| fields starting at
762   // |current_address| are overwritten with the value in |filler|.  At the end
763   // the loop, |current_address| points at the next uninitialized field.
764   // |count| is assumed to be non-zero.
765   void InitializeNFieldsWithFiller(Register current_address, Register count,
766                                    Register filler);
767 
768   // Initialize fields with filler values.  Fields starting at |current_address|
769   // not including |end_address| are overwritten with the value in |filler|.  At
770   // the end the loop, |current_address| takes the value of |end_address|.
771   void InitializeFieldsWithFiller(Register current_address,
772                                   Register end_address, Register filler);
773 
774   // ---------------------------------------------------------------------------
775   // Support functions.
776 
777   // Machine code version of Map::GetConstructor().
778   // |temp| holds |result|'s map when done, and |temp2| its instance type.
779   void GetMapConstructor(Register result, Register map, Register temp,
780                          Register temp2);
781 
782   // Try to get function prototype of a function and puts the value in
783   // the result register. Checks that the function really is a
784   // function and jumps to the miss label if the fast checks fail. The
785   // function register will be untouched; the other registers may be
786   // clobbered.
787   void TryGetFunctionPrototype(Register function, Register result,
788                                Register scratch, Label* miss);
789 
790   // Compare object type for heap object.  heap_object contains a non-Smi
791   // whose object type should be compared with the given type.  This both
792   // sets the flags and leaves the object type in the type_reg register.
793   // It leaves the map in the map register (unless the type_reg and map register
794   // are the same register).  It leaves the heap object in the heap_object
795   // register unless the heap_object register is the same register as one of the
796   // other registers.
797   // Type_reg can be no_reg. In that case ip is used.
798   void CompareObjectType(Register heap_object, Register map, Register type_reg,
799                          InstanceType type);
800 
801   // Compare instance type in a map.  map contains a valid map object whose
802   // object type should be compared with the given type.  This both
803   // sets the flags and leaves the object type in the type_reg register.
804   void CompareInstanceType(Register map, Register type_reg, InstanceType type);
805 
806   // Check if a map for a JSObject indicates that the object can have both smi
807   // and HeapObject elements.  Jump to the specified label if it does not.
808   void CheckFastObjectElements(Register map, Register scratch, Label* fail);
809 
810   // Check if a map for a JSObject indicates that the object has fast smi only
811   // elements.  Jump to the specified label if it does not.
812   void CheckFastSmiElements(Register map, Register scratch, Label* fail);
813 
814   // Check to see if maybe_number can be stored as a double in
815   // FastDoubleElements. If it can, store it at the index specified by key in
816   // the FastDoubleElements array elements. Otherwise jump to fail.
817   void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
818                                    Register elements_reg, Register scratch1,
819                                    DoubleRegister double_scratch, Label* fail,
820                                    int elements_offset = 0);
821 
822   // Compare an object's map with the specified map and its transitioned
823   // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
824   // set with result of map compare. If multiple map compares are required, the
825   // compare sequences branches to early_success.
826   void CompareMap(Register obj, Register scratch, Handle<Map> map,
827                   Label* early_success);
828 
829   // As above, but the map of the object is already loaded into the register
830   // which is preserved by the code generated.
831   void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
832 
833   // Check if the map of an object is equal to a specified map and branch to
834   // label if not. Skip the smi check if not required (object is known to be a
835   // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
836   // against maps that are ElementsKind transition maps of the specified map.
837   void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
838                 SmiCheckType smi_check_type);
839 
840 
841   void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
842                 Label* fail, SmiCheckType smi_check_type);
843 
844 
845   // Check if the map of an object is equal to a specified weak map and branch
846   // to a specified target if equal. Skip the smi check if not required
847   // (object is known to be a heap object)
848   void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
849                        Handle<WeakCell> cell, Handle<Code> success,
850                        SmiCheckType smi_check_type);
851 
852   // Compare the given value and the value of weak cell.
853   void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch,
854                     CRegister cr = cr7);
855 
856   void GetWeakValue(Register value, Handle<WeakCell> cell);
857 
858   // Load the value of the weak cell in the value register. Branch to the given
859   // miss label if the weak cell was cleared.
860   void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
861 
862   // Compare the object in a register to a value from the root list.
863   // Uses the ip register as scratch.
864   void CompareRoot(Register obj, Heap::RootListIndex index);
PushRoot(Heap::RootListIndex index)865   void PushRoot(Heap::RootListIndex index) {
866     LoadRoot(r0, index);
867     Push(r0);
868   }
869 
870   // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with,Heap::RootListIndex index,Label * if_equal)871   void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
872     CompareRoot(with, index);
873     beq(if_equal);
874   }
875 
876   // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with,Heap::RootListIndex index,Label * if_not_equal)877   void JumpIfNotRoot(Register with, Heap::RootListIndex index,
878                      Label* if_not_equal) {
879     CompareRoot(with, index);
880     bne(if_not_equal);
881   }
882 
883   // Load and check the instance type of an object for being a string.
884   // Loads the type into the second argument register.
885   // Returns a condition that will be enabled if the object was a string.
IsObjectStringType(Register obj,Register type)886   Condition IsObjectStringType(Register obj, Register type) {
887     LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
888     lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
889     andi(r0, type, Operand(kIsNotStringMask));
890     DCHECK_EQ(0u, kStringTag);
891     return eq;
892   }
893 
894   // Get the number of least significant bits from a register
895   void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
896   void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
897 
898   // Load the value of a smi object into a double register.
899   void SmiToDouble(DoubleRegister value, Register smi);
900 
901   // Check if a double can be exactly represented as a signed 32-bit integer.
902   // CR_EQ in cr7 is set if true.
903   void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
904                          Register scratch2, DoubleRegister double_scratch);
905 
906   // Check if a double is equal to -0.0.
907   // CR_EQ in cr7 holds the result.
908   void TestDoubleIsMinusZero(DoubleRegister input, Register scratch1,
909                              Register scratch2);
910 
911   // Check the sign of a double.
912   // CR_LT in cr7 holds the result.
913   void TestDoubleSign(DoubleRegister input, Register scratch);
914   void TestHeapNumberSign(Register input, Register scratch);
915 
916   // Try to convert a double to a signed 32-bit integer.
917   // CR_EQ in cr7 is set and result assigned if the conversion is exact.
918   void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
919                              Register scratch, DoubleRegister double_scratch);
920 
921   // Floor a double and writes the value to the result register.
922   // Go to exact if the conversion is exact (to be able to test -0),
923   // fall through calling code if an overflow occurred, else go to done.
924   // In return, input_high is loaded with high bits of input.
925   void TryInt32Floor(Register result, DoubleRegister double_input,
926                      Register input_high, Register scratch,
927                      DoubleRegister double_scratch, Label* done, Label* exact);
928 
929   // Performs a truncating conversion of a floating point number as used by
930   // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
931   // succeeds, otherwise falls through if result is saturated. On return
932   // 'result' either holds answer, or is clobbered on fall through.
933   //
934   // Only public for the test code in test-code-stubs-arm.cc.
935   void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
936                                   Label* done);
937 
938   // Performs a truncating conversion of a floating point number as used by
939   // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
940   // Exits with 'result' holding the answer.
941   void TruncateDoubleToI(Register result, DoubleRegister double_input);
942 
943   // Performs a truncating conversion of a heap number as used by
944   // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
945   // must be different registers.  Exits with 'result' holding the answer.
946   void TruncateHeapNumberToI(Register result, Register object);
947 
948   // Converts the smi or heap number in object to an int32 using the rules
949   // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
950   // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
951   // different registers.
952   void TruncateNumberToI(Register object, Register result,
953                          Register heap_number_map, Register scratch1,
954                          Label* not_int32);
955 
956   // Overflow handling functions.
957   // Usage: call the appropriate arithmetic function and then call one of the
958   // flow control functions with the corresponding label.
959 
960   // Compute dst = left + right, setting condition codes. dst may be same as
961   // either left or right (or a unique register). left and right must not be
962   // the same register.
963   void AddAndCheckForOverflow(Register dst, Register left, Register right,
964                               Register overflow_dst, Register scratch = r0);
965   void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
966                               Register overflow_dst, Register scratch = r0);
967 
968   // Compute dst = left - right, setting condition codes. dst may be same as
969   // either left or right (or a unique register). left and right must not be
970   // the same register.
971   void SubAndCheckForOverflow(Register dst, Register left, Register right,
972                               Register overflow_dst, Register scratch = r0);
973 
BranchOnOverflow(Label * label)974   void BranchOnOverflow(Label* label) { blt(label, cr0); }
975 
BranchOnNoOverflow(Label * label)976   void BranchOnNoOverflow(Label* label) { bge(label, cr0); }
977 
RetOnOverflow(void)978   void RetOnOverflow(void) { Ret(lt, cr0); }
979 
RetOnNoOverflow(void)980   void RetOnNoOverflow(void) { Ret(ge, cr0); }
981 
982   // ---------------------------------------------------------------------------
983   // Runtime calls
984 
985   // Call a code stub.
986   void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
987                 Condition cond = al);
988 
989   // Call a code stub.
990   void TailCallStub(CodeStub* stub, Condition cond = al);
991 
992   // Call a runtime routine.
993   void CallRuntime(const Runtime::Function* f, int num_arguments,
994                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
CallRuntimeSaveDoubles(Runtime::FunctionId fid)995   void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
996     const Runtime::Function* function = Runtime::FunctionForId(fid);
997     CallRuntime(function, function->nargs, kSaveFPRegs);
998   }
999 
1000   // Convenience function: Same as above, but takes the fid instead.
1001   void CallRuntime(Runtime::FunctionId fid,
1002                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1003     const Runtime::Function* function = Runtime::FunctionForId(fid);
1004     CallRuntime(function, function->nargs, save_doubles);
1005   }
1006 
1007   // Convenience function: Same as above, but takes the fid instead.
1008   void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1009                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1010     CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1011   }
1012 
1013   // Convenience function: call an external reference.
1014   void CallExternalReference(const ExternalReference& ext, int num_arguments);
1015 
1016   // Convenience function: tail call a runtime routine (jump).
1017   void TailCallRuntime(Runtime::FunctionId fid);
1018 
1019   int CalculateStackPassedWords(int num_reg_arguments,
1020                                 int num_double_arguments);
1021 
1022   // Before calling a C-function from generated code, align arguments on stack.
1023   // After aligning the frame, non-register arguments must be stored in
1024   // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
1025   // are word sized. If double arguments are used, this function assumes that
1026   // all double arguments are stored before core registers; otherwise the
1027   // correct alignment of the double values is not guaranteed.
1028   // Some compilers/platforms require the stack to be aligned when calling
1029   // C++ code.
1030   // Needs a scratch register to do some arithmetic. This register will be
1031   // trashed.
1032   void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
1033                             Register scratch);
1034   void PrepareCallCFunction(int num_reg_arguments, Register scratch);
1035 
1036   // There are two ways of passing double arguments on ARM, depending on
1037   // whether soft or hard floating point ABI is used. These functions
1038   // abstract parameter passing for the three different ways we call
1039   // C functions from generated code.
1040   void MovToFloatParameter(DoubleRegister src);
1041   void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1042   void MovToFloatResult(DoubleRegister src);
1043 
1044   // Calls a C function and cleans up the space for arguments allocated
1045   // by PrepareCallCFunction. The called function is not allowed to trigger a
1046   // garbage collection, since that might move the code and invalidate the
1047   // return address (unless this is somehow accounted for by the called
1048   // function).
1049   void CallCFunction(ExternalReference function, int num_arguments);
1050   void CallCFunction(Register function, int num_arguments);
1051   void CallCFunction(ExternalReference function, int num_reg_arguments,
1052                      int num_double_arguments);
1053   void CallCFunction(Register function, int num_reg_arguments,
1054                      int num_double_arguments);
1055 
1056   void MovFromFloatParameter(DoubleRegister dst);
1057   void MovFromFloatResult(DoubleRegister dst);
1058 
1059   // Jump to a runtime routine.
1060   void JumpToExternalReference(const ExternalReference& builtin,
1061                                bool builtin_exit_frame = false);
1062 
CodeObject()1063   Handle<Object> CodeObject() {
1064     DCHECK(!code_object_.is_null());
1065     return code_object_;
1066   }
1067 
1068 
1069   // Emit code for a truncating division by a constant. The dividend register is
1070   // unchanged and ip gets clobbered. Dividend and result must be different.
1071   void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1072 
1073   // ---------------------------------------------------------------------------
1074   // StatsCounter support
1075 
1076   void SetCounter(StatsCounter* counter, int value, Register scratch1,
1077                   Register scratch2);
1078   void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1079                         Register scratch2);
1080   void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1081                         Register scratch2);
1082 
1083 
1084   // ---------------------------------------------------------------------------
1085   // Debugging
1086 
1087   // Calls Abort(msg) if the condition cond is not satisfied.
1088   // Use --debug_code to enable.
1089   void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
1090   void AssertFastElements(Register elements);
1091 
1092   // Like Assert(), but always enabled.
1093   void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
1094 
1095   // Print a message to stdout and abort execution.
1096   void Abort(BailoutReason reason);
1097 
1098   // Verify restrictions about code generated in stubs.
set_generating_stub(bool value)1099   void set_generating_stub(bool value) { generating_stub_ = value; }
generating_stub()1100   bool generating_stub() { return generating_stub_; }
set_has_frame(bool value)1101   void set_has_frame(bool value) { has_frame_ = value; }
has_frame()1102   bool has_frame() { return has_frame_; }
1103   inline bool AllowThisStubCall(CodeStub* stub);
1104 
1105   // ---------------------------------------------------------------------------
1106   // Number utilities
1107 
1108   // Check whether the value of reg is a power of two and not zero. If not
1109   // control continues at the label not_power_of_two. If reg is a power of two
1110   // the register scratch contains the value of (reg - 1) when control falls
1111   // through.
1112   void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
1113                                  Label* not_power_of_two_or_zero);
1114   // Check whether the value of reg is a power of two and not zero.
1115   // Control falls through if it is, with scratch containing the mask
1116   // value (reg - 1).
1117   // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1118   // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1119   // strictly positive but not a power of two.
1120   void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
1121                                        Label* zero_and_neg,
1122                                        Label* not_power_of_two);
1123 
1124   // ---------------------------------------------------------------------------
1125   // Bit testing/extraction
1126   //
1127   // Bit numbering is such that the least significant bit is bit 0
1128   // (for consistency between 32/64-bit).
1129 
1130   // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
1131   // and, if !test, shift them into the least significant bits of dst.
1132   inline void ExtractBitRange(Register dst, Register src, int rangeStart,
1133                               int rangeEnd, RCBit rc = LeaveRC,
1134                               bool test = false) {
1135     DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
1136     int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
1137     int width = rangeStart - rangeEnd + 1;
1138     if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
1139       // Prefer faster andi when applicable.
1140       andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
1141     } else {
1142 #if V8_TARGET_ARCH_PPC64
1143       rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
1144 #else
1145       rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1,
1146              rc);
1147 #endif
1148     }
1149   }
1150 
1151   inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
1152                          RCBit rc = LeaveRC, bool test = false) {
1153     ExtractBitRange(dst, src, bitNumber, bitNumber, rc, test);
1154   }
1155 
1156   // Extract consecutive bits (defined by mask) from src and place them
1157   // into the least significant bits of dst.
1158   inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
1159                              RCBit rc = LeaveRC, bool test = false) {
1160     int start = kBitsPerPointer - 1;
1161     int end;
1162     uintptr_t bit = (1L << start);
1163 
1164     while (bit && (mask & bit) == 0) {
1165       start--;
1166       bit >>= 1;
1167     }
1168     end = start;
1169     bit >>= 1;
1170 
1171     while (bit && (mask & bit)) {
1172       end--;
1173       bit >>= 1;
1174     }
1175 
1176     // 1-bits in mask must be contiguous
1177     DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
1178 
1179     ExtractBitRange(dst, src, start, end, rc, test);
1180   }
1181 
1182   // Test single bit in value.
1183   inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
1184     ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC, true);
1185   }
1186 
1187   // Test consecutive bit range in value.  Range is defined by
1188   // rangeStart - rangeEnd.
1189   inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
1190                            Register scratch = r0) {
1191     ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC, true);
1192   }
1193 
1194   // Test consecutive bit range in value.  Range is defined by mask.
1195   inline void TestBitMask(Register value, uintptr_t mask,
1196                           Register scratch = r0) {
1197     ExtractBitMask(scratch, value, mask, SetRC, true);
1198   }
1199 
1200 
1201   // ---------------------------------------------------------------------------
1202   // Smi utilities
1203 
1204   // Shift left by kSmiShift
1205   void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
1206   void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
1207     ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
1208   }
1209 
1210 #if !V8_TARGET_ARCH_PPC64
1211   // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1212   void SmiTagCheckOverflow(Register reg, Register overflow);
1213   void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1214 
JumpIfNotSmiCandidate(Register value,Register scratch,Label * not_smi_label)1215   inline void JumpIfNotSmiCandidate(Register value, Register scratch,
1216                                     Label* not_smi_label) {
1217     // High bits must be identical to fit into an Smi
1218     STATIC_ASSERT(kSmiShift == 1);
1219     addis(scratch, value, Operand(0x40000000u >> 16));
1220     cmpi(scratch, Operand::Zero());
1221     blt(not_smi_label);
1222   }
1223 #endif
TestUnsignedSmiCandidate(Register value,Register scratch)1224   inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
1225     // The test is different for unsigned int values. Since we need
1226     // the value to be in the range of a positive smi, we can't
1227     // handle any of the high bits being set in the value.
1228     TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift,
1229                  scratch);
1230   }
JumpIfNotUnsignedSmiCandidate(Register value,Register scratch,Label * not_smi_label)1231   inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
1232                                             Label* not_smi_label) {
1233     TestUnsignedSmiCandidate(value, scratch);
1234     bne(not_smi_label, cr0);
1235   }
1236 
1237   void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
1238 
1239   void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
1240     ShiftRightArithImm(dst, src, kSmiShift, rc);
1241   }
1242 
SmiToPtrArrayOffset(Register dst,Register src)1243   void SmiToPtrArrayOffset(Register dst, Register src) {
1244 #if V8_TARGET_ARCH_PPC64
1245     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
1246     ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
1247 #else
1248     STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
1249     ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
1250 #endif
1251   }
1252 
SmiToByteArrayOffset(Register dst,Register src)1253   void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
1254 
SmiToShortArrayOffset(Register dst,Register src)1255   void SmiToShortArrayOffset(Register dst, Register src) {
1256 #if V8_TARGET_ARCH_PPC64
1257     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
1258     ShiftRightArithImm(dst, src, kSmiShift - 1);
1259 #else
1260     STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
1261     if (!dst.is(src)) {
1262       mr(dst, src);
1263     }
1264 #endif
1265   }
1266 
SmiToIntArrayOffset(Register dst,Register src)1267   void SmiToIntArrayOffset(Register dst, Register src) {
1268 #if V8_TARGET_ARCH_PPC64
1269     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
1270     ShiftRightArithImm(dst, src, kSmiShift - 2);
1271 #else
1272     STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
1273     ShiftLeftImm(dst, src, Operand(2 - kSmiShift));
1274 #endif
1275   }
1276 
1277 #define SmiToFloatArrayOffset SmiToIntArrayOffset
1278 
SmiToDoubleArrayOffset(Register dst,Register src)1279   void SmiToDoubleArrayOffset(Register dst, Register src) {
1280 #if V8_TARGET_ARCH_PPC64
1281     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
1282     ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2);
1283 #else
1284     STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
1285     ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
1286 #endif
1287   }
1288 
SmiToArrayOffset(Register dst,Register src,int elementSizeLog2)1289   void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
1290     if (kSmiShift < elementSizeLog2) {
1291       ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift));
1292     } else if (kSmiShift > elementSizeLog2) {
1293       ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2);
1294     } else if (!dst.is(src)) {
1295       mr(dst, src);
1296     }
1297   }
1298 
IndexToArrayOffset(Register dst,Register src,int elementSizeLog2,bool isSmi)1299   void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
1300                           bool isSmi) {
1301     if (isSmi) {
1302       SmiToArrayOffset(dst, src, elementSizeLog2);
1303     } else {
1304       ShiftLeftImm(dst, src, Operand(elementSizeLog2));
1305     }
1306   }
1307 
1308   // Untag the source value into destination and jump if source is a smi.
1309   // Souce and destination can be the same register.
1310   void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1311 
1312   // Untag the source value into destination and jump if source is not a smi.
1313   // Souce and destination can be the same register.
1314   void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1315 
TestIfSmi(Register value,Register scratch)1316   inline void TestIfSmi(Register value, Register scratch) {
1317     TestBitRange(value, kSmiTagSize - 1, 0, scratch);
1318   }
1319 
TestIfPositiveSmi(Register value,Register scratch)1320   inline void TestIfPositiveSmi(Register value, Register scratch) {
1321 #if V8_TARGET_ARCH_PPC64
1322     rldicl(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize), SetRC);
1323 #else
1324     rlwinm(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize),
1325            kBitsPerPointer - 1, SetRC);
1326 #endif
1327   }
1328 
1329   // Jump the register contains a smi.
JumpIfSmi(Register value,Label * smi_label)1330   inline void JumpIfSmi(Register value, Label* smi_label) {
1331     TestIfSmi(value, r0);
1332     beq(smi_label, cr0);  // branch if SMI
1333   }
1334   // Jump if either of the registers contain a non-smi.
JumpIfNotSmi(Register value,Label * not_smi_label)1335   inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1336     TestIfSmi(value, r0);
1337     bne(not_smi_label, cr0);
1338   }
1339   // Jump if either of the registers contain a non-smi.
1340   void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1341   // Jump if either of the registers contain a smi.
1342   void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1343 
1344   // Abort execution if argument is a number, enabled via --debug-code.
1345   void AssertNotNumber(Register object);
1346 
1347   // Abort execution if argument is a smi, enabled via --debug-code.
1348   void AssertNotSmi(Register object);
1349   void AssertSmi(Register object);
1350 
1351 
1352 #if V8_TARGET_ARCH_PPC64
1353   inline void TestIfInt32(Register value, Register scratch,
1354                           CRegister cr = cr7) {
1355     // High bits must be identical to fit into an 32-bit integer
1356     extsw(scratch, value);
1357     cmp(scratch, value, cr);
1358   }
1359 #else
1360   inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
1361                           CRegister cr = cr7) {
1362     // High bits must be identical to fit into an 32-bit integer
1363     srawi(scratch, lo_word, 31);
1364     cmp(scratch, hi_word, cr);
1365   }
1366 #endif
1367 
1368 #if V8_TARGET_ARCH_PPC64
1369   // Ensure it is permissable to read/write int value directly from
1370   // upper half of the smi.
1371   STATIC_ASSERT(kSmiTag == 0);
1372   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
1373 #endif
1374 #if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
1375 #define SmiWordOffset(offset) (offset + kPointerSize / 2)
1376 #else
1377 #define SmiWordOffset(offset) offset
1378 #endif
1379 
1380   // Abort execution if argument is not a string, enabled via --debug-code.
1381   void AssertString(Register object);
1382 
1383   // Abort execution if argument is not a name, enabled via --debug-code.
1384   void AssertName(Register object);
1385 
1386   void AssertFunction(Register object);
1387 
1388   // Abort execution if argument is not a JSBoundFunction,
1389   // enabled via --debug-code.
1390   void AssertBoundFunction(Register object);
1391 
1392   // Abort execution if argument is not a JSGeneratorObject,
1393   // enabled via --debug-code.
1394   void AssertGeneratorObject(Register object);
1395 
1396   // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
1397   void AssertReceiver(Register object);
1398 
1399   // Abort execution if argument is not undefined or an AllocationSite, enabled
1400   // via --debug-code.
1401   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1402 
1403   // Abort execution if reg is not the root value with the given index,
1404   // enabled via --debug-code.
1405   void AssertIsRoot(Register reg, Heap::RootListIndex index);
1406 
1407   // ---------------------------------------------------------------------------
1408   // HeapNumber utilities
1409 
1410   void JumpIfNotHeapNumber(Register object, Register heap_number_map,
1411                            Register scratch, Label* on_not_heap_number);
1412 
1413   // ---------------------------------------------------------------------------
1414   // String utilities
1415 
1416   // Checks if both objects are sequential one-byte strings and jumps to label
1417   // if either is not. Assumes that neither object is a smi.
1418   void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1419                                                     Register object2,
1420                                                     Register scratch1,
1421                                                     Register scratch2,
1422                                                     Label* failure);
1423 
1424   // Checks if both objects are sequential one-byte strings and jumps to label
1425   // if either is not.
1426   void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1427                                              Register scratch1,
1428                                              Register scratch2,
1429                                              Label* not_flat_one_byte_strings);
1430 
1431   // Checks if both instance types are sequential one-byte strings and jumps to
1432   // label if either is not.
1433   void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1434       Register first_object_instance_type, Register second_object_instance_type,
1435       Register scratch1, Register scratch2, Label* failure);
1436 
1437   // Check if instance type is sequential one-byte string and jump to label if
1438   // it is not.
1439   void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1440                                                 Label* failure);
1441 
1442   void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1443 
1444   void EmitSeqStringSetCharCheck(Register string, Register index,
1445                                  Register value, uint32_t encoding_mask);
1446 
1447   // ---------------------------------------------------------------------------
1448   // Patching helpers.
1449 
1450   // Decode offset from constant pool load instruction(s).
1451   // Caller must place the instruction word at <location> in <result>.
1452   void DecodeConstantPoolOffset(Register result, Register location);
1453 
1454   void ClampUint8(Register output_reg, Register input_reg);
1455 
1456   // Saturate a value into 8-bit unsigned integer
1457   //   if input_value < 0, output_value is 0
1458   //   if input_value > 255, output_value is 255
1459   //   otherwise output_value is the (int)input_value (round to nearest)
1460   void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
1461                           DoubleRegister temp_double_reg);
1462 
1463 
1464   void LoadInstanceDescriptors(Register map, Register descriptors);
1465   void EnumLength(Register dst, Register map);
1466   void NumberOfOwnDescriptors(Register dst, Register map);
1467   void LoadAccessor(Register dst, Register holder, int accessor_index,
1468                     AccessorComponent accessor);
1469 
1470   template <typename Field>
1471   void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) {
1472     ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift,
1473                     rc);
1474   }
1475 
1476   template <typename Field>
1477   void DecodeField(Register reg, RCBit rc = LeaveRC) {
1478     DecodeField<Field>(reg, reg, rc);
1479   }
1480 
1481   template <typename Field>
DecodeFieldToSmi(Register dst,Register src)1482   void DecodeFieldToSmi(Register dst, Register src) {
1483 #if V8_TARGET_ARCH_PPC64
1484     DecodeField<Field>(dst, src);
1485     SmiTag(dst);
1486 #else
1487     // 32-bit can do this in one instruction:
1488     int start = Field::kSize + kSmiShift - 1;
1489     int end = kSmiShift;
1490     int rotate = kSmiShift - Field::kShift;
1491     if (rotate < 0) {
1492       rotate += kBitsPerPointer;
1493     }
1494     rlwinm(dst, src, rotate, kBitsPerPointer - start - 1,
1495            kBitsPerPointer - end - 1);
1496 #endif
1497   }
1498 
1499   template <typename Field>
DecodeFieldToSmi(Register reg)1500   void DecodeFieldToSmi(Register reg) {
1501     DecodeFieldToSmi<Field>(reg, reg);
1502   }
1503 
1504   // Load the type feedback vector from a JavaScript frame.
1505   void EmitLoadTypeFeedbackVector(Register vector);
1506 
1507   // Activation support.
1508   void EnterFrame(StackFrame::Type type,
1509                   bool load_constant_pool_pointer_reg = false);
1510   // Returns the pc offset at which the frame ends.
1511   int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
1512 
1513   void EnterBuiltinFrame(Register context, Register target, Register argc);
1514   void LeaveBuiltinFrame(Register context, Register target, Register argc);
1515 
1516   // Expects object in r3 and returns map with validated enum cache
1517   // in r3.  Assumes that any other register can be used as a scratch.
1518   void CheckEnumCache(Label* call_runtime);
1519 
1520   // AllocationMemento support. Arrays may have an associated
1521   // AllocationMemento object that can be checked for in order to pretransition
1522   // to another type.
1523   // On entry, receiver_reg should point to the array object.
1524   // scratch_reg gets clobbered.
1525   // If allocation info is present, condition flags are set to eq.
1526   void TestJSArrayForAllocationMemento(Register receiver_reg,
1527                                        Register scratch_reg,
1528                                        Register scratch2_reg,
1529                                        Label* no_memento_found);
1530 
JumpIfJSArrayHasAllocationMemento(Register receiver_reg,Register scratch_reg,Register scratch2_reg,Label * memento_found)1531   void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1532                                          Register scratch_reg,
1533                                          Register scratch2_reg,
1534                                          Label* memento_found) {
1535     Label no_memento_found;
1536     TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, scratch2_reg,
1537                                     &no_memento_found);
1538     beq(memento_found);
1539     bind(&no_memento_found);
1540   }
1541 
1542   // Jumps to found label if a prototype map has dictionary elements.
1543   void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1544                                         Register scratch1, Label* found);
1545 
1546   // Loads the constant pool pointer (kConstantPoolRegister).
1547   void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
1548       Register code_target_address);
1549   void LoadConstantPoolPointerRegister();
1550   void LoadConstantPoolPointerRegister(Register base, int code_entry_delta = 0);
1551 
AbortConstantPoolBuilding()1552   void AbortConstantPoolBuilding() {
1553 #ifdef DEBUG
1554     // Avoid DCHECK(!is_linked()) failure in ~Label()
1555     bind(ConstantPoolPosition());
1556 #endif
1557   }
1558 
1559  private:
1560   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1561 
1562   void CallCFunctionHelper(Register function, int num_reg_arguments,
1563                            int num_double_arguments);
1564 
1565   void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
1566             CRegister cr = cr7);
1567 
1568   // Helper functions for generating invokes.
1569   void InvokePrologue(const ParameterCount& expected,
1570                       const ParameterCount& actual, Label* done,
1571                       bool* definitely_mismatches, InvokeFlag flag,
1572                       const CallWrapper& call_wrapper);
1573 
1574   void InitializeNewString(Register string, Register length,
1575                            Heap::RootListIndex map_index, Register scratch1,
1576                            Register scratch2);
1577 
1578   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1579   void InNewSpace(Register object, Register scratch,
1580                   Condition cond,  // eq for new space, ne otherwise.
1581                   Label* branch);
1582 
1583   // Helper for finding the mark bits for an address.  Afterwards, the
1584   // bitmap register points at the word with the mark bits and the mask
1585   // the position of the first bit.  Leaves addr_reg unchanged.
1586   inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
1587                           Register mask_reg);
1588 
1589   static const RegList kSafepointSavedRegisters;
1590   static const int kNumSafepointSavedRegisters;
1591 
1592   // Compute memory operands for safepoint stack slots.
1593   static int SafepointRegisterStackIndex(int reg_code);
1594   MemOperand SafepointRegisterSlot(Register reg);
1595   MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1596 
1597   bool generating_stub_;
1598   bool has_frame_;
1599   // This handle will be patched with the code object on installation.
1600   Handle<Object> code_object_;
1601 
1602   // Needs access to SafepointRegisterStackIndex for compiled frame
1603   // traversal.
1604   friend class StandardFrame;
1605 };
1606 
1607 
1608 // The code patcher is used to patch (typically) small parts of code e.g. for
1609 // debugging and other types of instrumentation. When using the code patcher
1610 // the exact number of bytes specified must be emitted. It is not legal to emit
1611 // relocation information. If any of these constraints are violated it causes
1612 // an assertion to fail.
1613 class CodePatcher {
1614  public:
1615   enum FlushICache { FLUSH, DONT_FLUSH };
1616 
1617   CodePatcher(Isolate* isolate, byte* address, int instructions,
1618               FlushICache flush_cache = FLUSH);
1619   ~CodePatcher();
1620 
1621   // Macro assembler to emit code.
masm()1622   MacroAssembler* masm() { return &masm_; }
1623 
1624   // Emit an instruction directly.
1625   void Emit(Instr instr);
1626 
1627   // Emit the condition part of an instruction leaving the rest of the current
1628   // instruction unchanged.
1629   void EmitCondition(Condition cond);
1630 
1631  private:
1632   byte* address_;            // The address of the code being patched.
1633   int size_;                 // Number of bytes of the expected patch size.
1634   MacroAssembler masm_;      // Macro assembler used to generate the code.
1635   FlushICache flush_cache_;  // Whether to flush the I cache after patching.
1636 };
1637 
1638 
1639 // -----------------------------------------------------------------------------
1640 // Static helper functions.
1641 
1642 inline MemOperand ContextMemOperand(Register context, int index = 0) {
1643   return MemOperand(context, Context::SlotOffset(index));
1644 }
1645 
1646 
NativeContextMemOperand()1647 inline MemOperand NativeContextMemOperand() {
1648   return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
1649 }
1650 
1651 #define ACCESS_MASM(masm) masm->
1652 
1653 }  // namespace internal
1654 }  // namespace v8
1655 
1656 #endif  // V8_PPC_MACRO_ASSEMBLER_PPC_H_
1657