1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <limits.h>  // For LONG_MIN, LONG_MAX.
6 
7 #include "src/v8.h"
8 
9 #if V8_TARGET_ARCH_MIPS64
10 
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/cpu-profiler.h"
15 #include "src/debug.h"
16 #include "src/isolate-inl.h"
17 #include "src/runtime.h"
18 
19 namespace v8 {
20 namespace internal {
21 
MacroAssembler(Isolate * arg_isolate,void * buffer,int size)22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
23     : Assembler(arg_isolate, buffer, size),
24       generating_stub_(false),
25       has_frame_(false) {
26   if (isolate() != NULL) {
27     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
28                                   isolate());
29   }
30 }
31 
32 
Load(Register dst,const MemOperand & src,Representation r)33 void MacroAssembler::Load(Register dst,
34                           const MemOperand& src,
35                           Representation r) {
36   DCHECK(!r.IsDouble());
37   if (r.IsInteger8()) {
38     lb(dst, src);
39   } else if (r.IsUInteger8()) {
40     lbu(dst, src);
41   } else if (r.IsInteger16()) {
42     lh(dst, src);
43   } else if (r.IsUInteger16()) {
44     lhu(dst, src);
45   } else if (r.IsInteger32()) {
46     lw(dst, src);
47   } else {
48     ld(dst, src);
49   }
50 }
51 
52 
Store(Register src,const MemOperand & dst,Representation r)53 void MacroAssembler::Store(Register src,
54                            const MemOperand& dst,
55                            Representation r) {
56   DCHECK(!r.IsDouble());
57   if (r.IsInteger8() || r.IsUInteger8()) {
58     sb(src, dst);
59   } else if (r.IsInteger16() || r.IsUInteger16()) {
60     sh(src, dst);
61   } else if (r.IsInteger32()) {
62     sw(src, dst);
63   } else {
64     if (r.IsHeapObject()) {
65       AssertNotSmi(src);
66     } else if (r.IsSmi()) {
67       AssertSmi(src);
68     }
69     sd(src, dst);
70   }
71 }
72 
73 
LoadRoot(Register destination,Heap::RootListIndex index)74 void MacroAssembler::LoadRoot(Register destination,
75                               Heap::RootListIndex index) {
76   ld(destination, MemOperand(s6, index << kPointerSizeLog2));
77 }
78 
79 
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)80 void MacroAssembler::LoadRoot(Register destination,
81                               Heap::RootListIndex index,
82                               Condition cond,
83                               Register src1, const Operand& src2) {
84   Branch(2, NegateCondition(cond), src1, src2);
85   ld(destination, MemOperand(s6, index << kPointerSizeLog2));
86 }
87 
88 
StoreRoot(Register source,Heap::RootListIndex index)89 void MacroAssembler::StoreRoot(Register source,
90                                Heap::RootListIndex index) {
91   sd(source, MemOperand(s6, index << kPointerSizeLog2));
92 }
93 
94 
StoreRoot(Register source,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)95 void MacroAssembler::StoreRoot(Register source,
96                                Heap::RootListIndex index,
97                                Condition cond,
98                                Register src1, const Operand& src2) {
99   Branch(2, NegateCondition(cond), src1, src2);
100   sd(source, MemOperand(s6, index << kPointerSizeLog2));
101 }
102 
103 
104 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()105 void MacroAssembler::PushSafepointRegisters() {
106   // Safepoints expect a block of kNumSafepointRegisters values on the
107   // stack, so adjust the stack for unsaved registers.
108   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
109   DCHECK(num_unsaved >= 0);
110   if (num_unsaved > 0) {
111     Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
112   }
113   MultiPush(kSafepointSavedRegisters);
114 }
115 
116 
PopSafepointRegisters()117 void MacroAssembler::PopSafepointRegisters() {
118   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
119   MultiPop(kSafepointSavedRegisters);
120   if (num_unsaved > 0) {
121     Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
122   }
123 }
124 
125 
StoreToSafepointRegisterSlot(Register src,Register dst)126 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
127   sd(src, SafepointRegisterSlot(dst));
128 }
129 
130 
LoadFromSafepointRegisterSlot(Register dst,Register src)131 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
132   ld(dst, SafepointRegisterSlot(src));
133 }
134 
135 
SafepointRegisterStackIndex(int reg_code)136 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
137   // The registers are pushed starting with the highest encoding,
138   // which means that lowest encodings are closest to the stack pointer.
139   return kSafepointRegisterStackIndexMap[reg_code];
140 }
141 
142 
SafepointRegisterSlot(Register reg)143 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
144   return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
145 }
146 
147 
SafepointRegistersAndDoublesSlot(Register reg)148 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
149   UNIMPLEMENTED_MIPS();
150   // General purpose registers are pushed last on the stack.
151   int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
152   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
153   return MemOperand(sp, doubles_size + register_offset);
154 }
155 
156 
InNewSpace(Register object,Register scratch,Condition cc,Label * branch)157 void MacroAssembler::InNewSpace(Register object,
158                                 Register scratch,
159                                 Condition cc,
160                                 Label* branch) {
161   DCHECK(cc == eq || cc == ne);
162   And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
163   Branch(branch, cc, scratch,
164          Operand(ExternalReference::new_space_start(isolate())));
165 }
166 
167 
RecordWriteField(Register object,int offset,Register value,Register dst,RAStatus ra_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)168 void MacroAssembler::RecordWriteField(
169     Register object,
170     int offset,
171     Register value,
172     Register dst,
173     RAStatus ra_status,
174     SaveFPRegsMode save_fp,
175     RememberedSetAction remembered_set_action,
176     SmiCheck smi_check,
177     PointersToHereCheck pointers_to_here_check_for_value) {
178   DCHECK(!AreAliased(value, dst, t8, object));
179   // First, check if a write barrier is even needed. The tests below
180   // catch stores of Smis.
181   Label done;
182 
183   // Skip barrier if writing a smi.
184   if (smi_check == INLINE_SMI_CHECK) {
185     JumpIfSmi(value, &done);
186   }
187 
188   // Although the object register is tagged, the offset is relative to the start
189   // of the object, so so offset must be a multiple of kPointerSize.
190   DCHECK(IsAligned(offset, kPointerSize));
191 
192   Daddu(dst, object, Operand(offset - kHeapObjectTag));
193   if (emit_debug_code()) {
194     Label ok;
195     And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
196     Branch(&ok, eq, t8, Operand(zero_reg));
197     stop("Unaligned cell in write barrier");
198     bind(&ok);
199   }
200 
201   RecordWrite(object,
202               dst,
203               value,
204               ra_status,
205               save_fp,
206               remembered_set_action,
207               OMIT_SMI_CHECK,
208               pointers_to_here_check_for_value);
209 
210   bind(&done);
211 
212   // Clobber clobbered input registers when running with the debug-code flag
213   // turned on to provoke errors.
214   if (emit_debug_code()) {
215     li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
216     li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
217   }
218 }
219 
220 
221 // Will clobber 4 registers: object, map, dst, ip.  The
222 // register 'object' contains a heap object pointer.
RecordWriteForMap(Register object,Register map,Register dst,RAStatus ra_status,SaveFPRegsMode fp_mode)223 void MacroAssembler::RecordWriteForMap(Register object,
224                                        Register map,
225                                        Register dst,
226                                        RAStatus ra_status,
227                                        SaveFPRegsMode fp_mode) {
228   if (emit_debug_code()) {
229     DCHECK(!dst.is(at));
230     ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
231     Check(eq,
232           kWrongAddressOrValuePassedToRecordWrite,
233           dst,
234           Operand(isolate()->factory()->meta_map()));
235   }
236 
237   if (!FLAG_incremental_marking) {
238     return;
239   }
240 
241   if (emit_debug_code()) {
242     ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
243     Check(eq,
244           kWrongAddressOrValuePassedToRecordWrite,
245           map,
246           Operand(at));
247   }
248 
249   Label done;
250 
251   // A single check of the map's pages interesting flag suffices, since it is
252   // only set during incremental collection, and then it's also guaranteed that
253   // the from object's page's interesting flag is also set.  This optimization
254   // relies on the fact that maps can never be in new space.
255   CheckPageFlag(map,
256                 map,  // Used as scratch.
257                 MemoryChunk::kPointersToHereAreInterestingMask,
258                 eq,
259                 &done);
260 
261   Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
262   if (emit_debug_code()) {
263     Label ok;
264     And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
265     Branch(&ok, eq, at, Operand(zero_reg));
266     stop("Unaligned cell in write barrier");
267     bind(&ok);
268   }
269 
270   // Record the actual write.
271   if (ra_status == kRAHasNotBeenSaved) {
272     push(ra);
273   }
274   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
275                        fp_mode);
276   CallStub(&stub);
277   if (ra_status == kRAHasNotBeenSaved) {
278     pop(ra);
279   }
280 
281   bind(&done);
282 
283   // Count number of write barriers in generated code.
284   isolate()->counters()->write_barriers_static()->Increment();
285   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
286 
287   // Clobber clobbered registers when running with the debug-code flag
288   // turned on to provoke errors.
289   if (emit_debug_code()) {
290     li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
291     li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
292   }
293 }
294 
295 
296 // Will clobber 4 registers: object, address, scratch, ip.  The
297 // register 'object' contains a heap object pointer.  The heap object
298 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,RAStatus ra_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)299 void MacroAssembler::RecordWrite(
300     Register object,
301     Register address,
302     Register value,
303     RAStatus ra_status,
304     SaveFPRegsMode fp_mode,
305     RememberedSetAction remembered_set_action,
306     SmiCheck smi_check,
307     PointersToHereCheck pointers_to_here_check_for_value) {
308   DCHECK(!AreAliased(object, address, value, t8));
309   DCHECK(!AreAliased(object, address, value, t9));
310 
311   if (emit_debug_code()) {
312     ld(at, MemOperand(address));
313     Assert(
314         eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
315   }
316 
317   if (remembered_set_action == OMIT_REMEMBERED_SET &&
318       !FLAG_incremental_marking) {
319     return;
320   }
321 
322   // First, check if a write barrier is even needed. The tests below
323   // catch stores of smis and stores into the young generation.
324   Label done;
325 
326   if (smi_check == INLINE_SMI_CHECK) {
327     DCHECK_EQ(0, kSmiTag);
328     JumpIfSmi(value, &done);
329   }
330 
331   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
332     CheckPageFlag(value,
333                   value,  // Used as scratch.
334                   MemoryChunk::kPointersToHereAreInterestingMask,
335                   eq,
336                   &done);
337   }
338   CheckPageFlag(object,
339                 value,  // Used as scratch.
340                 MemoryChunk::kPointersFromHereAreInterestingMask,
341                 eq,
342                 &done);
343 
344   // Record the actual write.
345   if (ra_status == kRAHasNotBeenSaved) {
346     push(ra);
347   }
348   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
349                        fp_mode);
350   CallStub(&stub);
351   if (ra_status == kRAHasNotBeenSaved) {
352     pop(ra);
353   }
354 
355   bind(&done);
356 
357   // Count number of write barriers in generated code.
358   isolate()->counters()->write_barriers_static()->Increment();
359   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
360                    value);
361 
362   // Clobber clobbered registers when running with the debug-code flag
363   // turned on to provoke errors.
364   if (emit_debug_code()) {
365     li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
366     li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
367   }
368 }
369 
370 
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)371 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
372                                          Register address,
373                                          Register scratch,
374                                          SaveFPRegsMode fp_mode,
375                                          RememberedSetFinalAction and_then) {
376   Label done;
377   if (emit_debug_code()) {
378     Label ok;
379     JumpIfNotInNewSpace(object, scratch, &ok);
380     stop("Remembered set pointer is in new space");
381     bind(&ok);
382   }
383   // Load store buffer top.
384   ExternalReference store_buffer =
385       ExternalReference::store_buffer_top(isolate());
386   li(t8, Operand(store_buffer));
387   ld(scratch, MemOperand(t8));
388   // Store pointer to buffer and increment buffer top.
389   sd(address, MemOperand(scratch));
390   Daddu(scratch, scratch, kPointerSize);
391   // Write back new top of buffer.
392   sd(scratch, MemOperand(t8));
393   // Call stub on end of buffer.
394   // Check for end of buffer.
395   And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
396   DCHECK(!scratch.is(t8));
397   if (and_then == kFallThroughAtEnd) {
398     Branch(&done, eq, t8, Operand(zero_reg));
399   } else {
400     DCHECK(and_then == kReturnAtEnd);
401     Ret(eq, t8, Operand(zero_reg));
402   }
403   push(ra);
404   StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
405   CallStub(&store_buffer_overflow);
406   pop(ra);
407   bind(&done);
408   if (and_then == kReturnAtEnd) {
409     Ret();
410   }
411 }
412 
413 
414 // -----------------------------------------------------------------------------
415 // Allocation support.
416 
417 
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)418 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
419                                             Register scratch,
420                                             Label* miss) {
421   Label same_contexts;
422 
423   DCHECK(!holder_reg.is(scratch));
424   DCHECK(!holder_reg.is(at));
425   DCHECK(!scratch.is(at));
426 
427   // Load current lexical context from the stack frame.
428   ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
429   // In debug mode, make sure the lexical context is set.
430 #ifdef DEBUG
431   Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
432       scratch, Operand(zero_reg));
433 #endif
434 
435   // Load the native context of the current context.
436   int offset =
437       Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
438   ld(scratch, FieldMemOperand(scratch, offset));
439   ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
440 
441   // Check the context is a native context.
442   if (emit_debug_code()) {
443     push(holder_reg);  // Temporarily save holder on the stack.
444     // Read the first word and compare to the native_context_map.
445     ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
446     LoadRoot(at, Heap::kNativeContextMapRootIndex);
447     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
448           holder_reg, Operand(at));
449     pop(holder_reg);  // Restore holder.
450   }
451 
452   // Check if both contexts are the same.
453   ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
454   Branch(&same_contexts, eq, scratch, Operand(at));
455 
456   // Check the context is a native context.
457   if (emit_debug_code()) {
458     push(holder_reg);  // Temporarily save holder on the stack.
459     mov(holder_reg, at);  // Move at to its holding place.
460     LoadRoot(at, Heap::kNullValueRootIndex);
461     Check(ne, kJSGlobalProxyContextShouldNotBeNull,
462           holder_reg, Operand(at));
463 
464     ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
465     LoadRoot(at, Heap::kNativeContextMapRootIndex);
466     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
467           holder_reg, Operand(at));
468     // Restore at is not needed. at is reloaded below.
469     pop(holder_reg);  // Restore holder.
470     // Restore at to holder's context.
471     ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
472   }
473 
474   // Check that the security token in the calling global object is
475   // compatible with the security token in the receiving global
476   // object.
477   int token_offset = Context::kHeaderSize +
478                      Context::SECURITY_TOKEN_INDEX * kPointerSize;
479 
480   ld(scratch, FieldMemOperand(scratch, token_offset));
481   ld(at, FieldMemOperand(at, token_offset));
482   Branch(miss, ne, scratch, Operand(at));
483 
484   bind(&same_contexts);
485 }
486 
487 
488 // Compute the hash code from the untagged key.  This must be kept in sync with
489 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
490 // code-stub-hydrogen.cc
GetNumberHash(Register reg0,Register scratch)491 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
492   // First of all we assign the hash seed to scratch.
493   LoadRoot(scratch, Heap::kHashSeedRootIndex);
494   SmiUntag(scratch);
495 
496   // Xor original key with a seed.
497   xor_(reg0, reg0, scratch);
498 
499   // Compute the hash code from the untagged key.  This must be kept in sync
500   // with ComputeIntegerHash in utils.h.
501   //
502   // hash = ~hash + (hash << 15);
503   // The algorithm uses 32-bit integer values.
504   nor(scratch, reg0, zero_reg);
505   sll(at, reg0, 15);
506   addu(reg0, scratch, at);
507 
508   // hash = hash ^ (hash >> 12);
509   srl(at, reg0, 12);
510   xor_(reg0, reg0, at);
511 
512   // hash = hash + (hash << 2);
513   sll(at, reg0, 2);
514   addu(reg0, reg0, at);
515 
516   // hash = hash ^ (hash >> 4);
517   srl(at, reg0, 4);
518   xor_(reg0, reg0, at);
519 
520   // hash = hash * 2057;
521   sll(scratch, reg0, 11);
522   sll(at, reg0, 3);
523   addu(reg0, reg0, at);
524   addu(reg0, reg0, scratch);
525 
526   // hash = hash ^ (hash >> 16);
527   srl(at, reg0, 16);
528   xor_(reg0, reg0, at);
529 }
530 
531 
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register result,Register reg0,Register reg1,Register reg2)532 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
533                                               Register elements,
534                                               Register key,
535                                               Register result,
536                                               Register reg0,
537                                               Register reg1,
538                                               Register reg2) {
539   // Register use:
540   //
541   // elements - holds the slow-case elements of the receiver on entry.
542   //            Unchanged unless 'result' is the same register.
543   //
544   // key      - holds the smi key on entry.
545   //            Unchanged unless 'result' is the same register.
546   //
547   //
548   // result   - holds the result on exit if the load succeeded.
549   //            Allowed to be the same as 'key' or 'result'.
550   //            Unchanged on bailout so 'key' or 'result' can be used
551   //            in further computation.
552   //
553   // Scratch registers:
554   //
555   // reg0 - holds the untagged key on entry and holds the hash once computed.
556   //
557   // reg1 - Used to hold the capacity mask of the dictionary.
558   //
559   // reg2 - Used for the index into the dictionary.
560   // at   - Temporary (avoid MacroAssembler instructions also using 'at').
561   Label done;
562 
563   GetNumberHash(reg0, reg1);
564 
565   // Compute the capacity mask.
566   ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
567   SmiUntag(reg1, reg1);
568   Dsubu(reg1, reg1, Operand(1));
569 
570   // Generate an unrolled loop that performs a few probes before giving up.
571   for (int i = 0; i < kNumberDictionaryProbes; i++) {
572     // Use reg2 for index calculations and keep the hash intact in reg0.
573     mov(reg2, reg0);
574     // Compute the masked index: (hash + i + i * i) & mask.
575     if (i > 0) {
576       Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
577     }
578     and_(reg2, reg2, reg1);
579 
580     // Scale the index by multiplying by the element size.
581     DCHECK(SeededNumberDictionary::kEntrySize == 3);
582     dsll(at, reg2, 1);  // 2x.
583     daddu(reg2, reg2, at);  // reg2 = reg2 * 3.
584 
585     // Check if the key is identical to the name.
586     dsll(at, reg2, kPointerSizeLog2);
587     daddu(reg2, elements, at);
588 
589     ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
590     if (i != kNumberDictionaryProbes - 1) {
591       Branch(&done, eq, key, Operand(at));
592     } else {
593       Branch(miss, ne, key, Operand(at));
594     }
595   }
596 
597   bind(&done);
598   // Check that the value is a normal property.
599   // reg2: elements + (index * kPointerSize).
600   const int kDetailsOffset =
601       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
602   ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
603   And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
604   Branch(miss, ne, at, Operand(zero_reg));
605 
606   // Get the value at the masked, scaled index and return.
607   const int kValueOffset =
608       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
609   ld(result, FieldMemOperand(reg2, kValueOffset));
610 }
611 
612 
613 // ---------------------------------------------------------------------------
614 // Instruction macros.
615 
Addu(Register rd,Register rs,const Operand & rt)616 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
617   if (rt.is_reg()) {
618     addu(rd, rs, rt.rm());
619   } else {
620     if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
621       addiu(rd, rs, rt.imm64_);
622     } else {
623       // li handles the relocation.
624       DCHECK(!rs.is(at));
625       li(at, rt);
626       addu(rd, rs, at);
627     }
628   }
629 }
630 
631 
Daddu(Register rd,Register rs,const Operand & rt)632 void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
633   if (rt.is_reg()) {
634     daddu(rd, rs, rt.rm());
635   } else {
636     if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
637       daddiu(rd, rs, rt.imm64_);
638     } else {
639       // li handles the relocation.
640       DCHECK(!rs.is(at));
641       li(at, rt);
642       daddu(rd, rs, at);
643     }
644   }
645 }
646 
647 
Subu(Register rd,Register rs,const Operand & rt)648 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
649   if (rt.is_reg()) {
650     subu(rd, rs, rt.rm());
651   } else {
652     if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
653       addiu(rd, rs, -rt.imm64_);  // No subiu instr, use addiu(x, y, -imm).
654     } else {
655       // li handles the relocation.
656       DCHECK(!rs.is(at));
657       li(at, rt);
658       subu(rd, rs, at);
659     }
660   }
661 }
662 
663 
Dsubu(Register rd,Register rs,const Operand & rt)664 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
665   if (rt.is_reg()) {
666     dsubu(rd, rs, rt.rm());
667   } else {
668     if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
669       daddiu(rd, rs, -rt.imm64_);  // No subiu instr, use addiu(x, y, -imm).
670     } else {
671       // li handles the relocation.
672       DCHECK(!rs.is(at));
673       li(at, rt);
674       dsubu(rd, rs, at);
675     }
676   }
677 }
678 
679 
Mul(Register rd,Register rs,const Operand & rt)680 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
681   if (rt.is_reg()) {
682     mul(rd, rs, rt.rm());
683   } else {
684     // li handles the relocation.
685     DCHECK(!rs.is(at));
686     li(at, rt);
687     mul(rd, rs, at);
688   }
689 }
690 
691 
Mulh(Register rd,Register rs,const Operand & rt)692 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
693   if (rt.is_reg()) {
694     if (kArchVariant != kMips64r6) {
695       mult(rs, rt.rm());
696       mfhi(rd);
697     } else {
698       muh(rd, rs, rt.rm());
699     }
700   } else {
701     // li handles the relocation.
702     DCHECK(!rs.is(at));
703     li(at, rt);
704     if (kArchVariant != kMips64r6) {
705       mult(rs, at);
706       mfhi(rd);
707     } else {
708       muh(rd, rs, at);
709     }
710   }
711 }
712 
713 
Dmul(Register rd,Register rs,const Operand & rt)714 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
715   if (rt.is_reg()) {
716     if (kArchVariant == kMips64r6) {
717       dmul(rd, rs, rt.rm());
718     } else {
719       dmult(rs, rt.rm());
720       mflo(rd);
721     }
722   } else {
723     // li handles the relocation.
724     DCHECK(!rs.is(at));
725     li(at, rt);
726     if (kArchVariant == kMips64r6) {
727       dmul(rd, rs, at);
728     } else {
729       dmult(rs, at);
730       mflo(rd);
731     }
732   }
733 }
734 
735 
Dmulh(Register rd,Register rs,const Operand & rt)736 void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
737   if (rt.is_reg()) {
738     if (kArchVariant == kMips64r6) {
739       dmuh(rd, rs, rt.rm());
740     } else {
741       dmult(rs, rt.rm());
742       mfhi(rd);
743     }
744   } else {
745     // li handles the relocation.
746     DCHECK(!rs.is(at));
747     li(at, rt);
748     if (kArchVariant == kMips64r6) {
749       dmuh(rd, rs, at);
750     } else {
751       dmult(rs, at);
752       mfhi(rd);
753     }
754   }
755 }
756 
757 
Mult(Register rs,const Operand & rt)758 void MacroAssembler::Mult(Register rs, const Operand& rt) {
759   if (rt.is_reg()) {
760     mult(rs, rt.rm());
761   } else {
762     // li handles the relocation.
763     DCHECK(!rs.is(at));
764     li(at, rt);
765     mult(rs, at);
766   }
767 }
768 
769 
Dmult(Register rs,const Operand & rt)770 void MacroAssembler::Dmult(Register rs, const Operand& rt) {
771   if (rt.is_reg()) {
772     dmult(rs, rt.rm());
773   } else {
774     // li handles the relocation.
775     DCHECK(!rs.is(at));
776     li(at, rt);
777     dmult(rs, at);
778   }
779 }
780 
781 
Multu(Register rs,const Operand & rt)782 void MacroAssembler::Multu(Register rs, const Operand& rt) {
783   if (rt.is_reg()) {
784     multu(rs, rt.rm());
785   } else {
786     // li handles the relocation.
787     DCHECK(!rs.is(at));
788     li(at, rt);
789     multu(rs, at);
790   }
791 }
792 
793 
Dmultu(Register rs,const Operand & rt)794 void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
795   if (rt.is_reg()) {
796     dmultu(rs, rt.rm());
797   } else {
798     // li handles the relocation.
799     DCHECK(!rs.is(at));
800     li(at, rt);
801     dmultu(rs, at);
802   }
803 }
804 
805 
Div(Register rs,const Operand & rt)806 void MacroAssembler::Div(Register rs, const Operand& rt) {
807   if (rt.is_reg()) {
808     div(rs, rt.rm());
809   } else {
810     // li handles the relocation.
811     DCHECK(!rs.is(at));
812     li(at, rt);
813     div(rs, at);
814   }
815 }
816 
817 
Ddiv(Register rs,const Operand & rt)818 void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
819   if (rt.is_reg()) {
820     ddiv(rs, rt.rm());
821   } else {
822     // li handles the relocation.
823     DCHECK(!rs.is(at));
824     li(at, rt);
825     ddiv(rs, at);
826   }
827 }
828 
829 
Ddiv(Register rd,Register rs,const Operand & rt)830 void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
831   if (kArchVariant != kMips64r6) {
832     if (rt.is_reg()) {
833       ddiv(rs, rt.rm());
834       mflo(rd);
835     } else {
836       // li handles the relocation.
837       DCHECK(!rs.is(at));
838       li(at, rt);
839       ddiv(rs, at);
840       mflo(rd);
841     }
842   } else {
843     if (rt.is_reg()) {
844       ddiv(rd, rs, rt.rm());
845     } else {
846       // li handles the relocation.
847       DCHECK(!rs.is(at));
848       li(at, rt);
849       ddiv(rd, rs, at);
850     }
851   }
852 }
853 
854 
Divu(Register rs,const Operand & rt)855 void MacroAssembler::Divu(Register rs, const Operand& rt) {
856   if (rt.is_reg()) {
857     divu(rs, rt.rm());
858   } else {
859     // li handles the relocation.
860     DCHECK(!rs.is(at));
861     li(at, rt);
862     divu(rs, at);
863   }
864 }
865 
866 
Ddivu(Register rs,const Operand & rt)867 void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
868   if (rt.is_reg()) {
869     ddivu(rs, rt.rm());
870   } else {
871     // li handles the relocation.
872     DCHECK(!rs.is(at));
873     li(at, rt);
874     ddivu(rs, at);
875   }
876 }
877 
878 
Dmod(Register rd,Register rs,const Operand & rt)879 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
880   if (kArchVariant != kMips64r6) {
881     if (rt.is_reg()) {
882       ddiv(rs, rt.rm());
883       mfhi(rd);
884     } else {
885       // li handles the relocation.
886       DCHECK(!rs.is(at));
887       li(at, rt);
888       ddiv(rs, at);
889       mfhi(rd);
890     }
891   } else {
892     if (rt.is_reg()) {
893       dmod(rd, rs, rt.rm());
894     } else {
895       // li handles the relocation.
896       DCHECK(!rs.is(at));
897       li(at, rt);
898       dmod(rd, rs, at);
899     }
900   }
901 }
902 
903 
And(Register rd,Register rs,const Operand & rt)904 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
905   if (rt.is_reg()) {
906     and_(rd, rs, rt.rm());
907   } else {
908     if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
909       andi(rd, rs, rt.imm64_);
910     } else {
911       // li handles the relocation.
912       DCHECK(!rs.is(at));
913       li(at, rt);
914       and_(rd, rs, at);
915     }
916   }
917 }
918 
919 
Or(Register rd,Register rs,const Operand & rt)920 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
921   if (rt.is_reg()) {
922     or_(rd, rs, rt.rm());
923   } else {
924     if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
925       ori(rd, rs, rt.imm64_);
926     } else {
927       // li handles the relocation.
928       DCHECK(!rs.is(at));
929       li(at, rt);
930       or_(rd, rs, at);
931     }
932   }
933 }
934 
935 
Xor(Register rd,Register rs,const Operand & rt)936 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
937   if (rt.is_reg()) {
938     xor_(rd, rs, rt.rm());
939   } else {
940     if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
941       xori(rd, rs, rt.imm64_);
942     } else {
943       // li handles the relocation.
944       DCHECK(!rs.is(at));
945       li(at, rt);
946       xor_(rd, rs, at);
947     }
948   }
949 }
950 
951 
Nor(Register rd,Register rs,const Operand & rt)952 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
953   if (rt.is_reg()) {
954     nor(rd, rs, rt.rm());
955   } else {
956     // li handles the relocation.
957     DCHECK(!rs.is(at));
958     li(at, rt);
959     nor(rd, rs, at);
960   }
961 }
962 
963 
Neg(Register rs,const Operand & rt)964 void MacroAssembler::Neg(Register rs, const Operand& rt) {
965   DCHECK(rt.is_reg());
966   DCHECK(!at.is(rs));
967   DCHECK(!at.is(rt.rm()));
968   li(at, -1);
969   xor_(rs, rt.rm(), at);
970 }
971 
972 
Slt(Register rd,Register rs,const Operand & rt)973 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
974   if (rt.is_reg()) {
975     slt(rd, rs, rt.rm());
976   } else {
977     if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
978       slti(rd, rs, rt.imm64_);
979     } else {
980       // li handles the relocation.
981       DCHECK(!rs.is(at));
982       li(at, rt);
983       slt(rd, rs, at);
984     }
985   }
986 }
987 
988 
Sltu(Register rd,Register rs,const Operand & rt)989 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
990   if (rt.is_reg()) {
991     sltu(rd, rs, rt.rm());
992   } else {
993     if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
994       sltiu(rd, rs, rt.imm64_);
995     } else {
996       // li handles the relocation.
997       DCHECK(!rs.is(at));
998       li(at, rt);
999       sltu(rd, rs, at);
1000     }
1001   }
1002 }
1003 
1004 
Ror(Register rd,Register rs,const Operand & rt)1005 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1006   if (kArchVariant == kMips64r2) {
1007     if (rt.is_reg()) {
1008       rotrv(rd, rs, rt.rm());
1009     } else {
1010       rotr(rd, rs, rt.imm64_);
1011     }
1012   } else {
1013     if (rt.is_reg()) {
1014       subu(at, zero_reg, rt.rm());
1015       sllv(at, rs, at);
1016       srlv(rd, rs, rt.rm());
1017       or_(rd, rd, at);
1018     } else {
1019       if (rt.imm64_ == 0) {
1020         srl(rd, rs, 0);
1021       } else {
1022         srl(at, rs, rt.imm64_);
1023         sll(rd, rs, (0x20 - rt.imm64_) & 0x1f);
1024         or_(rd, rd, at);
1025       }
1026     }
1027   }
1028 }
1029 
1030 
Dror(Register rd,Register rs,const Operand & rt)1031 void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
1032   if (rt.is_reg()) {
1033     drotrv(rd, rs, rt.rm());
1034   } else {
1035     drotr(rd, rs, rt.imm64_);
1036   }
1037 }
1038 
1039 
Pref(int32_t hint,const MemOperand & rs)1040 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1041     pref(hint, rs);
1042 }
1043 
1044 
1045 // ------------Pseudo-instructions-------------
1046 
Ulw(Register rd,const MemOperand & rs)1047 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1048   lwr(rd, rs);
1049   lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1050 }
1051 
1052 
Usw(Register rd,const MemOperand & rs)1053 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1054   swr(rd, rs);
1055   swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1056 }
1057 
1058 
1059 // Do 64-bit load from unaligned address. Note this only handles
1060 // the specific case of 32-bit aligned, but not 64-bit aligned.
Uld(Register rd,const MemOperand & rs,Register scratch)1061 void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
1062   // Assert fail if the offset from start of object IS actually aligned.
1063   // ONLY use with known misalignment, since there is performance cost.
1064   DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1065   // TODO(plind): endian dependency.
1066   lwu(rd, rs);
1067   lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1068   dsll32(scratch, scratch, 0);
1069   Daddu(rd, rd, scratch);
1070 }
1071 
1072 
1073 // Do 64-bit store to unaligned address. Note this only handles
1074 // the specific case of 32-bit aligned, but not 64-bit aligned.
Usd(Register rd,const MemOperand & rs,Register scratch)1075 void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
1076   // Assert fail if the offset from start of object IS actually aligned.
1077   // ONLY use with known misalignment, since there is performance cost.
1078   DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1079   // TODO(plind): endian dependency.
1080   sw(rd, rs);
1081   dsrl32(scratch, rd, 0);
1082   sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1083 }
1084 
1085 
li(Register dst,Handle<Object> value,LiFlags mode)1086 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1087   AllowDeferredHandleDereference smi_check;
1088   if (value->IsSmi()) {
1089     li(dst, Operand(value), mode);
1090   } else {
1091     DCHECK(value->IsHeapObject());
1092     if (isolate()->heap()->InNewSpace(*value)) {
1093       Handle<Cell> cell = isolate()->factory()->NewCell(value);
1094       li(dst, Operand(cell));
1095       ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
1096     } else {
1097       li(dst, Operand(value));
1098     }
1099   }
1100 }
1101 
1102 
li(Register rd,Operand j,LiFlags mode)1103 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1104   DCHECK(!j.is_reg());
1105   BlockTrampolinePoolScope block_trampoline_pool(this);
1106   if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1107     // Normal load of an immediate value which does not need Relocation Info.
1108     if (is_int32(j.imm64_)) {
1109       if (is_int16(j.imm64_)) {
1110         daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1111       } else if (!(j.imm64_ & kHiMask)) {
1112         ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
1113       } else if (!(j.imm64_ & kImm16Mask)) {
1114         lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1115       } else {
1116         lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1117         ori(rd, rd, (j.imm64_ & kImm16Mask));
1118       }
1119     } else {
1120       lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1121       ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1122       dsll(rd, rd, 16);
1123       ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1124       dsll(rd, rd, 16);
1125       ori(rd, rd, j.imm64_ & kImm16Mask);
1126     }
1127   } else if (MustUseReg(j.rmode_)) {
1128     RecordRelocInfo(j.rmode_, j.imm64_);
1129     lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1130     ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1131     dsll(rd, rd, 16);
1132     ori(rd, rd, j.imm64_ & kImm16Mask);
1133   } else if (mode == ADDRESS_LOAD)  {
1134     // We always need the same number of instructions as we may need to patch
1135     // this code to load another value which may need all 4 instructions.
1136     lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1137     ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1138     dsll(rd, rd, 16);
1139     ori(rd, rd, j.imm64_ & kImm16Mask);
1140   } else {
1141     lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1142     ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1143     dsll(rd, rd, 16);
1144     ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1145     dsll(rd, rd, 16);
1146     ori(rd, rd, j.imm64_ & kImm16Mask);
1147   }
1148 }
1149 
1150 
MultiPush(RegList regs)1151 void MacroAssembler::MultiPush(RegList regs) {
1152   int16_t num_to_push = NumberOfBitsSet(regs);
1153   int16_t stack_offset = num_to_push * kPointerSize;
1154 
1155   Dsubu(sp, sp, Operand(stack_offset));
1156   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1157     if ((regs & (1 << i)) != 0) {
1158       stack_offset -= kPointerSize;
1159       sd(ToRegister(i), MemOperand(sp, stack_offset));
1160     }
1161   }
1162 }
1163 
1164 
MultiPushReversed(RegList regs)1165 void MacroAssembler::MultiPushReversed(RegList regs) {
1166   int16_t num_to_push = NumberOfBitsSet(regs);
1167   int16_t stack_offset = num_to_push * kPointerSize;
1168 
1169   Dsubu(sp, sp, Operand(stack_offset));
1170   for (int16_t i = 0; i < kNumRegisters; i++) {
1171     if ((regs & (1 << i)) != 0) {
1172       stack_offset -= kPointerSize;
1173       sd(ToRegister(i), MemOperand(sp, stack_offset));
1174     }
1175   }
1176 }
1177 
1178 
MultiPop(RegList regs)1179 void MacroAssembler::MultiPop(RegList regs) {
1180   int16_t stack_offset = 0;
1181 
1182   for (int16_t i = 0; i < kNumRegisters; i++) {
1183     if ((regs & (1 << i)) != 0) {
1184       ld(ToRegister(i), MemOperand(sp, stack_offset));
1185       stack_offset += kPointerSize;
1186     }
1187   }
1188   daddiu(sp, sp, stack_offset);
1189 }
1190 
1191 
MultiPopReversed(RegList regs)1192 void MacroAssembler::MultiPopReversed(RegList regs) {
1193   int16_t stack_offset = 0;
1194 
1195   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1196     if ((regs & (1 << i)) != 0) {
1197       ld(ToRegister(i), MemOperand(sp, stack_offset));
1198       stack_offset += kPointerSize;
1199     }
1200   }
1201   daddiu(sp, sp, stack_offset);
1202 }
1203 
1204 
MultiPushFPU(RegList regs)1205 void MacroAssembler::MultiPushFPU(RegList regs) {
1206   int16_t num_to_push = NumberOfBitsSet(regs);
1207   int16_t stack_offset = num_to_push * kDoubleSize;
1208 
1209   Dsubu(sp, sp, Operand(stack_offset));
1210   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1211     if ((regs & (1 << i)) != 0) {
1212       stack_offset -= kDoubleSize;
1213       sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1214     }
1215   }
1216 }
1217 
1218 
MultiPushReversedFPU(RegList regs)1219 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1220   int16_t num_to_push = NumberOfBitsSet(regs);
1221   int16_t stack_offset = num_to_push * kDoubleSize;
1222 
1223   Dsubu(sp, sp, Operand(stack_offset));
1224   for (int16_t i = 0; i < kNumRegisters; i++) {
1225     if ((regs & (1 << i)) != 0) {
1226       stack_offset -= kDoubleSize;
1227       sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1228     }
1229   }
1230 }
1231 
1232 
MultiPopFPU(RegList regs)1233 void MacroAssembler::MultiPopFPU(RegList regs) {
1234   int16_t stack_offset = 0;
1235 
1236   for (int16_t i = 0; i < kNumRegisters; i++) {
1237     if ((regs & (1 << i)) != 0) {
1238       ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1239       stack_offset += kDoubleSize;
1240     }
1241   }
1242   daddiu(sp, sp, stack_offset);
1243 }
1244 
1245 
MultiPopReversedFPU(RegList regs)1246 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1247   int16_t stack_offset = 0;
1248 
1249   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1250     if ((regs & (1 << i)) != 0) {
1251       ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1252       stack_offset += kDoubleSize;
1253     }
1254   }
1255   daddiu(sp, sp, stack_offset);
1256 }
1257 
1258 
FlushICache(Register address,unsigned instructions)1259 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
1260   RegList saved_regs = kJSCallerSaved | ra.bit();
1261   MultiPush(saved_regs);
1262   AllowExternalCallThatCantCauseGC scope(this);
1263 
1264   // Save to a0 in case address == a4.
1265   Move(a0, address);
1266   PrepareCallCFunction(2, a4);
1267 
1268   li(a1, instructions * kInstrSize);
1269   CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1270   MultiPop(saved_regs);
1271 }
1272 
1273 
Ext(Register rt,Register rs,uint16_t pos,uint16_t size)1274 void MacroAssembler::Ext(Register rt,
1275                          Register rs,
1276                          uint16_t pos,
1277                          uint16_t size) {
1278   DCHECK(pos < 32);
1279   DCHECK(pos + size < 33);
1280   ext_(rt, rs, pos, size);
1281 }
1282 
1283 
Ins(Register rt,Register rs,uint16_t pos,uint16_t size)1284 void MacroAssembler::Ins(Register rt,
1285                          Register rs,
1286                          uint16_t pos,
1287                          uint16_t size) {
1288   DCHECK(pos < 32);
1289   DCHECK(pos + size <= 32);
1290   DCHECK(size != 0);
1291   ins_(rt, rs, pos, size);
1292 }
1293 
1294 
Cvt_d_uw(FPURegister fd,FPURegister fs,FPURegister scratch)1295 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1296                               FPURegister fs,
1297                               FPURegister scratch) {
1298   // Move the data from fs to t8.
1299   mfc1(t8, fs);
1300   Cvt_d_uw(fd, t8, scratch);
1301 }
1302 
1303 
Cvt_d_uw(FPURegister fd,Register rs,FPURegister scratch)1304 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1305                               Register rs,
1306                               FPURegister scratch) {
1307   // Convert rs to a FP value in fd (and fd + 1).
1308   // We do this by converting rs minus the MSB to avoid sign conversion,
1309   // then adding 2^31 to the result (if needed).
1310 
1311   DCHECK(!fd.is(scratch));
1312   DCHECK(!rs.is(t9));
1313   DCHECK(!rs.is(at));
1314 
1315   // Save rs's MSB to t9.
1316   Ext(t9, rs, 31, 1);
1317   // Remove rs's MSB.
1318   Ext(at, rs, 0, 31);
1319   // Move the result to fd.
1320   mtc1(at, fd);
1321   mthc1(zero_reg, fd);
1322 
1323   // Convert fd to a real FP value.
1324   cvt_d_w(fd, fd);
1325 
1326   Label conversion_done;
1327 
1328   // If rs's MSB was 0, it's done.
1329   // Otherwise we need to add that to the FP register.
1330   Branch(&conversion_done, eq, t9, Operand(zero_reg));
1331 
1332   // Load 2^31 into f20 as its float representation.
1333   li(at, 0x41E00000);
1334   mtc1(zero_reg, scratch);
1335   mthc1(at, scratch);
1336   // Add it to fd.
1337   add_d(fd, fd, scratch);
1338 
1339   bind(&conversion_done);
1340 }
1341 
1342 
Round_l_d(FPURegister fd,FPURegister fs)1343 void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
1344   round_l_d(fd, fs);
1345 }
1346 
1347 
Floor_l_d(FPURegister fd,FPURegister fs)1348 void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
1349   floor_l_d(fd, fs);
1350 }
1351 
1352 
Ceil_l_d(FPURegister fd,FPURegister fs)1353 void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
1354   ceil_l_d(fd, fs);
1355 }
1356 
1357 
Trunc_l_d(FPURegister fd,FPURegister fs)1358 void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
1359   trunc_l_d(fd, fs);
1360 }
1361 
1362 
Trunc_l_ud(FPURegister fd,FPURegister fs,FPURegister scratch)1363 void MacroAssembler::Trunc_l_ud(FPURegister fd,
1364                                 FPURegister fs,
1365                                 FPURegister scratch) {
1366   // Load to GPR.
1367   dmfc1(t8, fs);
1368   // Reset sign bit.
1369   li(at, 0x7fffffffffffffff);
1370   and_(t8, t8, at);
1371   dmtc1(t8, fs);
1372   trunc_l_d(fd, fs);
1373 }
1374 
1375 
Trunc_uw_d(FPURegister fd,FPURegister fs,FPURegister scratch)1376 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1377                                 FPURegister fs,
1378                                 FPURegister scratch) {
1379   Trunc_uw_d(fs, t8, scratch);
1380   mtc1(t8, fd);
1381 }
1382 
1383 
Trunc_w_d(FPURegister fd,FPURegister fs)1384 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1385   trunc_w_d(fd, fs);
1386 }
1387 
1388 
Round_w_d(FPURegister fd,FPURegister fs)1389 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1390   round_w_d(fd, fs);
1391 }
1392 
1393 
Floor_w_d(FPURegister fd,FPURegister fs)1394 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1395   floor_w_d(fd, fs);
1396 }
1397 
1398 
Ceil_w_d(FPURegister fd,FPURegister fs)1399 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1400   ceil_w_d(fd, fs);
1401 }
1402 
1403 
Trunc_uw_d(FPURegister fd,Register rs,FPURegister scratch)1404 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1405                                 Register rs,
1406                                 FPURegister scratch) {
1407   DCHECK(!fd.is(scratch));
1408   DCHECK(!rs.is(at));
1409 
1410   // Load 2^31 into scratch as its float representation.
1411   li(at, 0x41E00000);
1412   mtc1(zero_reg, scratch);
1413   mthc1(at, scratch);
1414   // Test if scratch > fd.
1415   // If fd < 2^31 we can convert it normally.
1416   Label simple_convert;
1417   BranchF(&simple_convert, NULL, lt, fd, scratch);
1418 
1419   // First we subtract 2^31 from fd, then trunc it to rs
1420   // and add 2^31 to rs.
1421   sub_d(scratch, fd, scratch);
1422   trunc_w_d(scratch, scratch);
1423   mfc1(rs, scratch);
1424   Or(rs, rs, 1 << 31);
1425 
1426   Label done;
1427   Branch(&done);
1428   // Simple conversion.
1429   bind(&simple_convert);
1430   trunc_w_d(scratch, fd);
1431   mfc1(rs, scratch);
1432 
1433   bind(&done);
1434 }
1435 
1436 
Madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft,FPURegister scratch)1437 void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
1438     FPURegister ft, FPURegister scratch) {
1439   if (0) {  // TODO(plind): find reasonable arch-variant symbol names.
1440     madd_d(fd, fr, fs, ft);
1441   } else {
1442     // Can not change source regs's value.
1443     DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
1444     mul_d(scratch, fs, ft);
1445     add_d(fd, fr, scratch);
1446   }
1447 }
1448 
1449 
BranchF(Label * target,Label * nan,Condition cc,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)1450 void MacroAssembler::BranchF(Label* target,
1451                              Label* nan,
1452                              Condition cc,
1453                              FPURegister cmp1,
1454                              FPURegister cmp2,
1455                              BranchDelaySlot bd) {
1456   BlockTrampolinePoolScope block_trampoline_pool(this);
1457   if (cc == al) {
1458     Branch(bd, target);
1459     return;
1460   }
1461 
1462   DCHECK(nan || target);
1463   // Check for unordered (NaN) cases.
1464   if (nan) {
1465     if (kArchVariant != kMips64r6) {
1466       c(UN, D, cmp1, cmp2);
1467       bc1t(nan);
1468     } else {
1469       // Use f31 for comparison result. It has to be unavailable to lithium
1470       // register allocator.
1471       DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
1472       cmp(UN, L, f31, cmp1, cmp2);
1473       bc1nez(nan, f31);
1474     }
1475   }
1476 
1477   if (kArchVariant != kMips64r6) {
1478     if (target) {
1479       // Here NaN cases were either handled by this function or are assumed to
1480       // have been handled by the caller.
1481       switch (cc) {
1482         case lt:
1483           c(OLT, D, cmp1, cmp2);
1484           bc1t(target);
1485           break;
1486         case gt:
1487           c(ULE, D, cmp1, cmp2);
1488           bc1f(target);
1489           break;
1490         case ge:
1491           c(ULT, D, cmp1, cmp2);
1492           bc1f(target);
1493           break;
1494         case le:
1495           c(OLE, D, cmp1, cmp2);
1496           bc1t(target);
1497           break;
1498         case eq:
1499           c(EQ, D, cmp1, cmp2);
1500           bc1t(target);
1501           break;
1502         case ueq:
1503           c(UEQ, D, cmp1, cmp2);
1504           bc1t(target);
1505           break;
1506         case ne:
1507           c(EQ, D, cmp1, cmp2);
1508           bc1f(target);
1509           break;
1510         case nue:
1511           c(UEQ, D, cmp1, cmp2);
1512           bc1f(target);
1513           break;
1514         default:
1515           CHECK(0);
1516       }
1517     }
1518   } else {
1519     if (target) {
1520       // Here NaN cases were either handled by this function or are assumed to
1521       // have been handled by the caller.
1522       // Unsigned conditions are treated as their signed counterpart.
1523       // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode.
1524       DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
1525       switch (cc) {
1526         case lt:
1527           cmp(OLT, L, f31, cmp1, cmp2);
1528           bc1nez(target, f31);
1529           break;
1530         case gt:
1531           cmp(ULE, L, f31, cmp1, cmp2);
1532           bc1eqz(target, f31);
1533           break;
1534         case ge:
1535           cmp(ULT, L, f31, cmp1, cmp2);
1536           bc1eqz(target, f31);
1537           break;
1538         case le:
1539           cmp(OLE, L, f31, cmp1, cmp2);
1540           bc1nez(target, f31);
1541           break;
1542         case eq:
1543           cmp(EQ, L, f31, cmp1, cmp2);
1544           bc1nez(target, f31);
1545           break;
1546         case ueq:
1547           cmp(UEQ, L, f31, cmp1, cmp2);
1548           bc1nez(target, f31);
1549           break;
1550         case ne:
1551           cmp(EQ, L, f31, cmp1, cmp2);
1552           bc1eqz(target, f31);
1553           break;
1554         case nue:
1555           cmp(UEQ, L, f31, cmp1, cmp2);
1556           bc1eqz(target, f31);
1557           break;
1558         default:
1559           CHECK(0);
1560       }
1561     }
1562   }
1563 
1564   if (bd == PROTECT) {
1565     nop();
1566   }
1567 }
1568 
1569 
Move(FPURegister dst,double imm)1570 void MacroAssembler::Move(FPURegister dst, double imm) {
1571   static const DoubleRepresentation minus_zero(-0.0);
1572   static const DoubleRepresentation zero(0.0);
1573   DoubleRepresentation value_rep(imm);
1574   // Handle special values first.
1575   bool force_load = dst.is(kDoubleRegZero);
1576   if (value_rep == zero && !force_load) {
1577     mov_d(dst, kDoubleRegZero);
1578   } else if (value_rep == minus_zero && !force_load) {
1579     neg_d(dst, kDoubleRegZero);
1580   } else {
1581     uint32_t lo, hi;
1582     DoubleAsTwoUInt32(imm, &lo, &hi);
1583     // Move the low part of the double into the lower bits of the corresponding
1584     // FPU register.
1585     if (lo != 0) {
1586       li(at, Operand(lo));
1587       mtc1(at, dst);
1588     } else {
1589       mtc1(zero_reg, dst);
1590     }
1591     // Move the high part of the double into the high bits of the corresponding
1592     // FPU register.
1593     if (hi != 0) {
1594       li(at, Operand(hi));
1595       mthc1(at, dst);
1596     } else {
1597       mthc1(zero_reg, dst);
1598     }
1599   }
1600 }
1601 
1602 
Movz(Register rd,Register rs,Register rt)1603 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1604   if (kArchVariant == kMips64r6) {
1605     Label done;
1606     Branch(&done, ne, rt, Operand(zero_reg));
1607     mov(rd, rs);
1608     bind(&done);
1609   } else {
1610     movz(rd, rs, rt);
1611   }
1612 }
1613 
1614 
Movn(Register rd,Register rs,Register rt)1615 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1616   if (kArchVariant == kMips64r6) {
1617     Label done;
1618     Branch(&done, eq, rt, Operand(zero_reg));
1619     mov(rd, rs);
1620     bind(&done);
1621   } else {
1622     movn(rd, rs, rt);
1623   }
1624 }
1625 
1626 
Movt(Register rd,Register rs,uint16_t cc)1627 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1628   movt(rd, rs, cc);
1629 }
1630 
1631 
Movf(Register rd,Register rs,uint16_t cc)1632 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1633   movf(rd, rs, cc);
1634 }
1635 
1636 
Clz(Register rd,Register rs)1637 void MacroAssembler::Clz(Register rd, Register rs) {
1638   clz(rd, rs);
1639 }
1640 
1641 
EmitFPUTruncate(FPURoundingMode rounding_mode,Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch,Register except_flag,CheckForInexactConversion check_inexact)1642 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1643                                      Register result,
1644                                      DoubleRegister double_input,
1645                                      Register scratch,
1646                                      DoubleRegister double_scratch,
1647                                      Register except_flag,
1648                                      CheckForInexactConversion check_inexact) {
1649   DCHECK(!result.is(scratch));
1650   DCHECK(!double_input.is(double_scratch));
1651   DCHECK(!except_flag.is(scratch));
1652 
1653   Label done;
1654 
1655   // Clear the except flag (0 = no exception)
1656   mov(except_flag, zero_reg);
1657 
1658   // Test for values that can be exactly represented as a signed 32-bit integer.
1659   cvt_w_d(double_scratch, double_input);
1660   mfc1(result, double_scratch);
1661   cvt_d_w(double_scratch, double_scratch);
1662   BranchF(&done, NULL, eq, double_input, double_scratch);
1663 
1664   int32_t except_mask = kFCSRFlagMask;  // Assume interested in all exceptions.
1665 
1666   if (check_inexact == kDontCheckForInexactConversion) {
1667     // Ignore inexact exceptions.
1668     except_mask &= ~kFCSRInexactFlagMask;
1669   }
1670 
1671   // Save FCSR.
1672   cfc1(scratch, FCSR);
1673   // Disable FPU exceptions.
1674   ctc1(zero_reg, FCSR);
1675 
1676   // Do operation based on rounding mode.
1677   switch (rounding_mode) {
1678     case kRoundToNearest:
1679       Round_w_d(double_scratch, double_input);
1680       break;
1681     case kRoundToZero:
1682       Trunc_w_d(double_scratch, double_input);
1683       break;
1684     case kRoundToPlusInf:
1685       Ceil_w_d(double_scratch, double_input);
1686       break;
1687     case kRoundToMinusInf:
1688       Floor_w_d(double_scratch, double_input);
1689       break;
1690   }  // End of switch-statement.
1691 
1692   // Retrieve FCSR.
1693   cfc1(except_flag, FCSR);
1694   // Restore FCSR.
1695   ctc1(scratch, FCSR);
1696   // Move the converted value into the result register.
1697   mfc1(result, double_scratch);
1698 
1699   // Check for fpu exceptions.
1700   And(except_flag, except_flag, Operand(except_mask));
1701 
1702   bind(&done);
1703 }
1704 
1705 
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)1706 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1707                                                 DoubleRegister double_input,
1708                                                 Label* done) {
1709   DoubleRegister single_scratch = kLithiumScratchDouble.low();
1710   Register scratch = at;
1711   Register scratch2 = t9;
1712 
1713   // Clear cumulative exception flags and save the FCSR.
1714   cfc1(scratch2, FCSR);
1715   ctc1(zero_reg, FCSR);
1716   // Try a conversion to a signed integer.
1717   trunc_w_d(single_scratch, double_input);
1718   mfc1(result, single_scratch);
1719   // Retrieve and restore the FCSR.
1720   cfc1(scratch, FCSR);
1721   ctc1(scratch2, FCSR);
1722   // Check for overflow and NaNs.
1723   And(scratch,
1724       scratch,
1725       kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1726   // If we had no exceptions we are done.
1727   Branch(done, eq, scratch, Operand(zero_reg));
1728 }
1729 
1730 
TruncateDoubleToI(Register result,DoubleRegister double_input)1731 void MacroAssembler::TruncateDoubleToI(Register result,
1732                                        DoubleRegister double_input) {
1733   Label done;
1734 
1735   TryInlineTruncateDoubleToI(result, double_input, &done);
1736 
1737   // If we fell through then inline version didn't succeed - call stub instead.
1738   push(ra);
1739   Dsubu(sp, sp, Operand(kDoubleSize));  // Put input on stack.
1740   sdc1(double_input, MemOperand(sp, 0));
1741 
1742   DoubleToIStub stub(isolate(), sp, result, 0, true, true);
1743   CallStub(&stub);
1744 
1745   Daddu(sp, sp, Operand(kDoubleSize));
1746   pop(ra);
1747 
1748   bind(&done);
1749 }
1750 
1751 
TruncateHeapNumberToI(Register result,Register object)1752 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1753   Label done;
1754   DoubleRegister double_scratch = f12;
1755   DCHECK(!result.is(object));
1756 
1757   ldc1(double_scratch,
1758        MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1759   TryInlineTruncateDoubleToI(result, double_scratch, &done);
1760 
1761   // If we fell through then inline version didn't succeed - call stub instead.
1762   push(ra);
1763   DoubleToIStub stub(isolate(),
1764                      object,
1765                      result,
1766                      HeapNumber::kValueOffset - kHeapObjectTag,
1767                      true,
1768                      true);
1769   CallStub(&stub);
1770   pop(ra);
1771 
1772   bind(&done);
1773 }
1774 
1775 
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch,Label * not_number)1776 void MacroAssembler::TruncateNumberToI(Register object,
1777                                        Register result,
1778                                        Register heap_number_map,
1779                                        Register scratch,
1780                                        Label* not_number) {
1781   Label done;
1782   DCHECK(!result.is(object));
1783 
1784   UntagAndJumpIfSmi(result, object, &done);
1785   JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1786   TruncateHeapNumberToI(result, object);
1787 
1788   bind(&done);
1789 }
1790 
1791 
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)1792 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1793                                          Register src,
1794                                          int num_least_bits) {
1795   // Ext(dst, src, kSmiTagSize, num_least_bits);
1796   SmiUntag(dst, src);
1797   And(dst, dst, Operand((1 << num_least_bits) - 1));
1798 }
1799 
1800 
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)1801 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1802                                            Register src,
1803                                            int num_least_bits) {
1804   DCHECK(!src.is(dst));
1805   And(dst, src, Operand((1 << num_least_bits) - 1));
1806 }
1807 
1808 
1809 // Emulated condtional branches do not emit a nop in the branch delay slot.
1810 //
1811 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1812 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK(                                \
1813     (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
1814     (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1815 
1816 
Branch(int16_t offset,BranchDelaySlot bdslot)1817 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1818   BranchShort(offset, bdslot);
1819 }
1820 
1821 
Branch(int16_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1822 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1823                             const Operand& rt,
1824                             BranchDelaySlot bdslot) {
1825   BranchShort(offset, cond, rs, rt, bdslot);
1826 }
1827 
1828 
Branch(Label * L,BranchDelaySlot bdslot)1829 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1830   if (L->is_bound()) {
1831     if (is_near(L)) {
1832       BranchShort(L, bdslot);
1833     } else {
1834       Jr(L, bdslot);
1835     }
1836   } else {
1837     if (is_trampoline_emitted()) {
1838       Jr(L, bdslot);
1839     } else {
1840       BranchShort(L, bdslot);
1841     }
1842   }
1843 }
1844 
1845 
Branch(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1846 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1847                             const Operand& rt,
1848                             BranchDelaySlot bdslot) {
1849   if (L->is_bound()) {
1850     if (is_near(L)) {
1851       BranchShort(L, cond, rs, rt, bdslot);
1852     } else {
1853       if (cond != cc_always) {
1854         Label skip;
1855         Condition neg_cond = NegateCondition(cond);
1856         BranchShort(&skip, neg_cond, rs, rt);
1857         Jr(L, bdslot);
1858         bind(&skip);
1859       } else {
1860         Jr(L, bdslot);
1861       }
1862     }
1863   } else {
1864     if (is_trampoline_emitted()) {
1865       if (cond != cc_always) {
1866         Label skip;
1867         Condition neg_cond = NegateCondition(cond);
1868         BranchShort(&skip, neg_cond, rs, rt);
1869         Jr(L, bdslot);
1870         bind(&skip);
1871       } else {
1872         Jr(L, bdslot);
1873       }
1874     } else {
1875       BranchShort(L, cond, rs, rt, bdslot);
1876     }
1877   }
1878 }
1879 
1880 
Branch(Label * L,Condition cond,Register rs,Heap::RootListIndex index,BranchDelaySlot bdslot)1881 void MacroAssembler::Branch(Label* L,
1882                             Condition cond,
1883                             Register rs,
1884                             Heap::RootListIndex index,
1885                             BranchDelaySlot bdslot) {
1886   LoadRoot(at, index);
1887   Branch(L, cond, rs, Operand(at), bdslot);
1888 }
1889 
1890 
BranchShort(int16_t offset,BranchDelaySlot bdslot)1891 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1892   b(offset);
1893 
1894   // Emit a nop in the branch delay slot if required.
1895   if (bdslot == PROTECT)
1896     nop();
1897 }
1898 
1899 
BranchShort(int16_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1900 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1901                                  const Operand& rt,
1902                                  BranchDelaySlot bdslot) {
1903   BRANCH_ARGS_CHECK(cond, rs, rt);
1904   DCHECK(!rs.is(zero_reg));
1905   Register r2 = no_reg;
1906   Register scratch = at;
1907 
1908   if (rt.is_reg()) {
1909     // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1910     // rt.
1911     BlockTrampolinePoolScope block_trampoline_pool(this);
1912     r2 = rt.rm_;
1913     switch (cond) {
1914       case cc_always:
1915         b(offset);
1916         break;
1917       case eq:
1918         beq(rs, r2, offset);
1919         break;
1920       case ne:
1921         bne(rs, r2, offset);
1922         break;
1923       // Signed comparison.
1924       case greater:
1925         if (r2.is(zero_reg)) {
1926           bgtz(rs, offset);
1927         } else {
1928           slt(scratch, r2, rs);
1929           bne(scratch, zero_reg, offset);
1930         }
1931         break;
1932       case greater_equal:
1933         if (r2.is(zero_reg)) {
1934           bgez(rs, offset);
1935         } else {
1936           slt(scratch, rs, r2);
1937           beq(scratch, zero_reg, offset);
1938         }
1939         break;
1940       case less:
1941         if (r2.is(zero_reg)) {
1942           bltz(rs, offset);
1943         } else {
1944           slt(scratch, rs, r2);
1945           bne(scratch, zero_reg, offset);
1946         }
1947         break;
1948       case less_equal:
1949         if (r2.is(zero_reg)) {
1950           blez(rs, offset);
1951         } else {
1952           slt(scratch, r2, rs);
1953           beq(scratch, zero_reg, offset);
1954         }
1955         break;
1956       // Unsigned comparison.
1957       case Ugreater:
1958         if (r2.is(zero_reg)) {
1959           bgtz(rs, offset);
1960         } else {
1961           sltu(scratch, r2, rs);
1962           bne(scratch, zero_reg, offset);
1963         }
1964         break;
1965       case Ugreater_equal:
1966         if (r2.is(zero_reg)) {
1967           bgez(rs, offset);
1968         } else {
1969           sltu(scratch, rs, r2);
1970           beq(scratch, zero_reg, offset);
1971         }
1972         break;
1973       case Uless:
1974         if (r2.is(zero_reg)) {
1975           // No code needs to be emitted.
1976           return;
1977         } else {
1978           sltu(scratch, rs, r2);
1979           bne(scratch, zero_reg, offset);
1980         }
1981         break;
1982       case Uless_equal:
1983         if (r2.is(zero_reg)) {
1984           b(offset);
1985         } else {
1986           sltu(scratch, r2, rs);
1987           beq(scratch, zero_reg, offset);
1988         }
1989         break;
1990       default:
1991         UNREACHABLE();
1992     }
1993   } else {
1994     // Be careful to always use shifted_branch_offset only just before the
1995     // branch instruction, as the location will be remember for patching the
1996     // target.
1997     BlockTrampolinePoolScope block_trampoline_pool(this);
1998     switch (cond) {
1999       case cc_always:
2000         b(offset);
2001         break;
2002       case eq:
2003         // We don't want any other register but scratch clobbered.
2004         DCHECK(!scratch.is(rs));
2005         r2 = scratch;
2006         li(r2, rt);
2007         beq(rs, r2, offset);
2008         break;
2009       case ne:
2010         // We don't want any other register but scratch clobbered.
2011         DCHECK(!scratch.is(rs));
2012         r2 = scratch;
2013         li(r2, rt);
2014         bne(rs, r2, offset);
2015         break;
2016       // Signed comparison.
2017       case greater:
2018         if (rt.imm64_ == 0) {
2019           bgtz(rs, offset);
2020         } else {
2021           r2 = scratch;
2022           li(r2, rt);
2023           slt(scratch, r2, rs);
2024           bne(scratch, zero_reg, offset);
2025         }
2026         break;
2027       case greater_equal:
2028         if (rt.imm64_ == 0) {
2029           bgez(rs, offset);
2030         } else if (is_int16(rt.imm64_)) {
2031           slti(scratch, rs, rt.imm64_);
2032           beq(scratch, zero_reg, offset);
2033         } else {
2034           r2 = scratch;
2035           li(r2, rt);
2036           slt(scratch, rs, r2);
2037           beq(scratch, zero_reg, offset);
2038         }
2039         break;
2040       case less:
2041         if (rt.imm64_ == 0) {
2042           bltz(rs, offset);
2043         } else if (is_int16(rt.imm64_)) {
2044           slti(scratch, rs, rt.imm64_);
2045           bne(scratch, zero_reg, offset);
2046         } else {
2047           r2 = scratch;
2048           li(r2, rt);
2049           slt(scratch, rs, r2);
2050           bne(scratch, zero_reg, offset);
2051         }
2052         break;
2053       case less_equal:
2054         if (rt.imm64_ == 0) {
2055           blez(rs, offset);
2056         } else {
2057           r2 = scratch;
2058           li(r2, rt);
2059           slt(scratch, r2, rs);
2060           beq(scratch, zero_reg, offset);
2061        }
2062        break;
2063       // Unsigned comparison.
2064       case Ugreater:
2065         if (rt.imm64_ == 0) {
2066           bgtz(rs, offset);
2067         } else {
2068           r2 = scratch;
2069           li(r2, rt);
2070           sltu(scratch, r2, rs);
2071           bne(scratch, zero_reg, offset);
2072         }
2073         break;
2074       case Ugreater_equal:
2075         if (rt.imm64_ == 0) {
2076           bgez(rs, offset);
2077         } else if (is_int16(rt.imm64_)) {
2078           sltiu(scratch, rs, rt.imm64_);
2079           beq(scratch, zero_reg, offset);
2080         } else {
2081           r2 = scratch;
2082           li(r2, rt);
2083           sltu(scratch, rs, r2);
2084           beq(scratch, zero_reg, offset);
2085         }
2086         break;
2087       case Uless:
2088         if (rt.imm64_ == 0) {
2089           // No code needs to be emitted.
2090           return;
2091         } else if (is_int16(rt.imm64_)) {
2092           sltiu(scratch, rs, rt.imm64_);
2093           bne(scratch, zero_reg, offset);
2094         } else {
2095           r2 = scratch;
2096           li(r2, rt);
2097           sltu(scratch, rs, r2);
2098           bne(scratch, zero_reg, offset);
2099         }
2100         break;
2101       case Uless_equal:
2102         if (rt.imm64_ == 0) {
2103           b(offset);
2104         } else {
2105           r2 = scratch;
2106           li(r2, rt);
2107           sltu(scratch, r2, rs);
2108           beq(scratch, zero_reg, offset);
2109         }
2110         break;
2111       default:
2112         UNREACHABLE();
2113     }
2114   }
2115   // Emit a nop in the branch delay slot if required.
2116   if (bdslot == PROTECT)
2117     nop();
2118 }
2119 
2120 
BranchShort(Label * L,BranchDelaySlot bdslot)2121 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2122   // We use branch_offset as an argument for the branch instructions to be sure
2123   // it is called just before generating the branch instruction, as needed.
2124 
2125   b(shifted_branch_offset(L, false));
2126 
2127   // Emit a nop in the branch delay slot if required.
2128   if (bdslot == PROTECT)
2129     nop();
2130 }
2131 
2132 
BranchShort(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2133 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
2134                                  const Operand& rt,
2135                                  BranchDelaySlot bdslot) {
2136   BRANCH_ARGS_CHECK(cond, rs, rt);
2137 
2138   int32_t offset = 0;
2139   Register r2 = no_reg;
2140   Register scratch = at;
2141   if (rt.is_reg()) {
2142     BlockTrampolinePoolScope block_trampoline_pool(this);
2143     r2 = rt.rm_;
2144     // Be careful to always use shifted_branch_offset only just before the
2145     // branch instruction, as the location will be remember for patching the
2146     // target.
2147     switch (cond) {
2148       case cc_always:
2149         offset = shifted_branch_offset(L, false);
2150         b(offset);
2151         break;
2152       case eq:
2153         offset = shifted_branch_offset(L, false);
2154         beq(rs, r2, offset);
2155         break;
2156       case ne:
2157         offset = shifted_branch_offset(L, false);
2158         bne(rs, r2, offset);
2159         break;
2160       // Signed comparison.
2161       case greater:
2162         if (r2.is(zero_reg)) {
2163           offset = shifted_branch_offset(L, false);
2164           bgtz(rs, offset);
2165         } else {
2166           slt(scratch, r2, rs);
2167           offset = shifted_branch_offset(L, false);
2168           bne(scratch, zero_reg, offset);
2169         }
2170         break;
2171       case greater_equal:
2172         if (r2.is(zero_reg)) {
2173           offset = shifted_branch_offset(L, false);
2174           bgez(rs, offset);
2175         } else {
2176           slt(scratch, rs, r2);
2177           offset = shifted_branch_offset(L, false);
2178           beq(scratch, zero_reg, offset);
2179         }
2180         break;
2181       case less:
2182         if (r2.is(zero_reg)) {
2183           offset = shifted_branch_offset(L, false);
2184           bltz(rs, offset);
2185         } else {
2186           slt(scratch, rs, r2);
2187           offset = shifted_branch_offset(L, false);
2188           bne(scratch, zero_reg, offset);
2189         }
2190         break;
2191       case less_equal:
2192         if (r2.is(zero_reg)) {
2193           offset = shifted_branch_offset(L, false);
2194           blez(rs, offset);
2195         } else {
2196           slt(scratch, r2, rs);
2197           offset = shifted_branch_offset(L, false);
2198           beq(scratch, zero_reg, offset);
2199         }
2200         break;
2201       // Unsigned comparison.
2202       case Ugreater:
2203         if (r2.is(zero_reg)) {
2204           offset = shifted_branch_offset(L, false);
2205            bgtz(rs, offset);
2206         } else {
2207           sltu(scratch, r2, rs);
2208           offset = shifted_branch_offset(L, false);
2209           bne(scratch, zero_reg, offset);
2210         }
2211         break;
2212       case Ugreater_equal:
2213         if (r2.is(zero_reg)) {
2214           offset = shifted_branch_offset(L, false);
2215           bgez(rs, offset);
2216         } else {
2217           sltu(scratch, rs, r2);
2218           offset = shifted_branch_offset(L, false);
2219           beq(scratch, zero_reg, offset);
2220         }
2221         break;
2222       case Uless:
2223         if (r2.is(zero_reg)) {
2224           // No code needs to be emitted.
2225           return;
2226         } else {
2227           sltu(scratch, rs, r2);
2228           offset = shifted_branch_offset(L, false);
2229           bne(scratch, zero_reg, offset);
2230         }
2231         break;
2232       case Uless_equal:
2233         if (r2.is(zero_reg)) {
2234           offset = shifted_branch_offset(L, false);
2235           b(offset);
2236         } else {
2237           sltu(scratch, r2, rs);
2238           offset = shifted_branch_offset(L, false);
2239           beq(scratch, zero_reg, offset);
2240         }
2241         break;
2242       default:
2243         UNREACHABLE();
2244     }
2245   } else {
2246     // Be careful to always use shifted_branch_offset only just before the
2247     // branch instruction, as the location will be remember for patching the
2248     // target.
2249     BlockTrampolinePoolScope block_trampoline_pool(this);
2250     switch (cond) {
2251       case cc_always:
2252         offset = shifted_branch_offset(L, false);
2253         b(offset);
2254         break;
2255       case eq:
2256         DCHECK(!scratch.is(rs));
2257         r2 = scratch;
2258         li(r2, rt);
2259         offset = shifted_branch_offset(L, false);
2260         beq(rs, r2, offset);
2261         break;
2262       case ne:
2263         DCHECK(!scratch.is(rs));
2264         r2 = scratch;
2265         li(r2, rt);
2266         offset = shifted_branch_offset(L, false);
2267         bne(rs, r2, offset);
2268         break;
2269       // Signed comparison.
2270       case greater:
2271         if (rt.imm64_ == 0) {
2272           offset = shifted_branch_offset(L, false);
2273           bgtz(rs, offset);
2274         } else {
2275           DCHECK(!scratch.is(rs));
2276           r2 = scratch;
2277           li(r2, rt);
2278           slt(scratch, r2, rs);
2279           offset = shifted_branch_offset(L, false);
2280           bne(scratch, zero_reg, offset);
2281         }
2282         break;
2283       case greater_equal:
2284         if (rt.imm64_ == 0) {
2285           offset = shifted_branch_offset(L, false);
2286           bgez(rs, offset);
2287         } else if (is_int16(rt.imm64_)) {
2288           slti(scratch, rs, rt.imm64_);
2289           offset = shifted_branch_offset(L, false);
2290           beq(scratch, zero_reg, offset);
2291         } else {
2292           DCHECK(!scratch.is(rs));
2293           r2 = scratch;
2294           li(r2, rt);
2295           slt(scratch, rs, r2);
2296           offset = shifted_branch_offset(L, false);
2297           beq(scratch, zero_reg, offset);
2298         }
2299         break;
2300       case less:
2301         if (rt.imm64_ == 0) {
2302           offset = shifted_branch_offset(L, false);
2303           bltz(rs, offset);
2304         } else if (is_int16(rt.imm64_)) {
2305           slti(scratch, rs, rt.imm64_);
2306           offset = shifted_branch_offset(L, false);
2307           bne(scratch, zero_reg, offset);
2308         } else {
2309           DCHECK(!scratch.is(rs));
2310           r2 = scratch;
2311           li(r2, rt);
2312           slt(scratch, rs, r2);
2313           offset = shifted_branch_offset(L, false);
2314           bne(scratch, zero_reg, offset);
2315         }
2316         break;
2317       case less_equal:
2318         if (rt.imm64_ == 0) {
2319           offset = shifted_branch_offset(L, false);
2320           blez(rs, offset);
2321         } else {
2322           DCHECK(!scratch.is(rs));
2323           r2 = scratch;
2324           li(r2, rt);
2325           slt(scratch, r2, rs);
2326           offset = shifted_branch_offset(L, false);
2327           beq(scratch, zero_reg, offset);
2328         }
2329         break;
2330       // Unsigned comparison.
2331       case Ugreater:
2332         if (rt.imm64_ == 0) {
2333           offset = shifted_branch_offset(L, false);
2334           bne(rs, zero_reg, offset);
2335         } else {
2336           DCHECK(!scratch.is(rs));
2337           r2 = scratch;
2338           li(r2, rt);
2339           sltu(scratch, r2, rs);
2340           offset = shifted_branch_offset(L, false);
2341           bne(scratch, zero_reg, offset);
2342         }
2343         break;
2344       case Ugreater_equal:
2345         if (rt.imm64_ == 0) {
2346           offset = shifted_branch_offset(L, false);
2347           bgez(rs, offset);
2348         } else if (is_int16(rt.imm64_)) {
2349           sltiu(scratch, rs, rt.imm64_);
2350           offset = shifted_branch_offset(L, false);
2351           beq(scratch, zero_reg, offset);
2352         } else {
2353           DCHECK(!scratch.is(rs));
2354           r2 = scratch;
2355           li(r2, rt);
2356           sltu(scratch, rs, r2);
2357           offset = shifted_branch_offset(L, false);
2358           beq(scratch, zero_reg, offset);
2359         }
2360         break;
2361      case Uless:
2362         if (rt.imm64_ == 0) {
2363           // No code needs to be emitted.
2364           return;
2365         } else if (is_int16(rt.imm64_)) {
2366           sltiu(scratch, rs, rt.imm64_);
2367           offset = shifted_branch_offset(L, false);
2368           bne(scratch, zero_reg, offset);
2369         } else {
2370           DCHECK(!scratch.is(rs));
2371           r2 = scratch;
2372           li(r2, rt);
2373           sltu(scratch, rs, r2);
2374           offset = shifted_branch_offset(L, false);
2375           bne(scratch, zero_reg, offset);
2376         }
2377         break;
2378       case Uless_equal:
2379         if (rt.imm64_ == 0) {
2380           offset = shifted_branch_offset(L, false);
2381           beq(rs, zero_reg, offset);
2382         } else {
2383           DCHECK(!scratch.is(rs));
2384           r2 = scratch;
2385           li(r2, rt);
2386           sltu(scratch, r2, rs);
2387           offset = shifted_branch_offset(L, false);
2388           beq(scratch, zero_reg, offset);
2389         }
2390         break;
2391       default:
2392         UNREACHABLE();
2393     }
2394   }
2395   // Check that offset could actually hold on an int16_t.
2396   DCHECK(is_int16(offset));
2397   // Emit a nop in the branch delay slot if required.
2398   if (bdslot == PROTECT)
2399     nop();
2400 }
2401 
2402 
BranchAndLink(int16_t offset,BranchDelaySlot bdslot)2403 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2404   BranchAndLinkShort(offset, bdslot);
2405 }
2406 
2407 
BranchAndLink(int16_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2408 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2409                                    const Operand& rt,
2410                                    BranchDelaySlot bdslot) {
2411   BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2412 }
2413 
2414 
BranchAndLink(Label * L,BranchDelaySlot bdslot)2415 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2416   if (L->is_bound()) {
2417     if (is_near(L)) {
2418       BranchAndLinkShort(L, bdslot);
2419     } else {
2420       Jalr(L, bdslot);
2421     }
2422   } else {
2423     if (is_trampoline_emitted()) {
2424       Jalr(L, bdslot);
2425     } else {
2426       BranchAndLinkShort(L, bdslot);
2427     }
2428   }
2429 }
2430 
2431 
BranchAndLink(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2432 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2433                                    const Operand& rt,
2434                                    BranchDelaySlot bdslot) {
2435   if (L->is_bound()) {
2436     if (is_near(L)) {
2437       BranchAndLinkShort(L, cond, rs, rt, bdslot);
2438     } else {
2439       Label skip;
2440       Condition neg_cond = NegateCondition(cond);
2441       BranchShort(&skip, neg_cond, rs, rt);
2442       Jalr(L, bdslot);
2443       bind(&skip);
2444     }
2445   } else {
2446     if (is_trampoline_emitted()) {
2447       Label skip;
2448       Condition neg_cond = NegateCondition(cond);
2449       BranchShort(&skip, neg_cond, rs, rt);
2450       Jalr(L, bdslot);
2451       bind(&skip);
2452     } else {
2453       BranchAndLinkShort(L, cond, rs, rt, bdslot);
2454     }
2455   }
2456 }
2457 
2458 
2459 // We need to use a bgezal or bltzal, but they can't be used directly with the
2460 // slt instructions. We could use sub or add instead but we would miss overflow
2461 // cases, so we keep slt and add an intermediate third instruction.
BranchAndLinkShort(int16_t offset,BranchDelaySlot bdslot)2462 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2463                                         BranchDelaySlot bdslot) {
2464   bal(offset);
2465 
2466   // Emit a nop in the branch delay slot if required.
2467   if (bdslot == PROTECT)
2468     nop();
2469 }
2470 
2471 
BranchAndLinkShort(int16_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2472 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2473                                         Register rs, const Operand& rt,
2474                                         BranchDelaySlot bdslot) {
2475   BRANCH_ARGS_CHECK(cond, rs, rt);
2476   Register r2 = no_reg;
2477   Register scratch = at;
2478 
2479   if (rt.is_reg()) {
2480     r2 = rt.rm_;
2481   } else if (cond != cc_always) {
2482     r2 = scratch;
2483     li(r2, rt);
2484   }
2485 
2486   {
2487     BlockTrampolinePoolScope block_trampoline_pool(this);
2488     switch (cond) {
2489       case cc_always:
2490         bal(offset);
2491         break;
2492       case eq:
2493         bne(rs, r2, 2);
2494         nop();
2495         bal(offset);
2496         break;
2497       case ne:
2498         beq(rs, r2, 2);
2499         nop();
2500         bal(offset);
2501         break;
2502 
2503       // Signed comparison.
2504       case greater:
2505         // rs > rt
2506         slt(scratch, r2, rs);
2507         beq(scratch, zero_reg, 2);
2508         nop();
2509         bal(offset);
2510         break;
2511       case greater_equal:
2512         // rs >= rt
2513         slt(scratch, rs, r2);
2514         bne(scratch, zero_reg, 2);
2515         nop();
2516         bal(offset);
2517         break;
2518       case less:
2519         // rs < r2
2520         slt(scratch, rs, r2);
2521         bne(scratch, zero_reg, 2);
2522         nop();
2523         bal(offset);
2524         break;
2525       case less_equal:
2526         // rs <= r2
2527         slt(scratch, r2, rs);
2528         bne(scratch, zero_reg, 2);
2529         nop();
2530         bal(offset);
2531         break;
2532 
2533 
2534       // Unsigned comparison.
2535       case Ugreater:
2536         // rs > rt
2537         sltu(scratch, r2, rs);
2538         beq(scratch, zero_reg, 2);
2539         nop();
2540         bal(offset);
2541         break;
2542       case Ugreater_equal:
2543         // rs >= rt
2544         sltu(scratch, rs, r2);
2545         bne(scratch, zero_reg, 2);
2546         nop();
2547         bal(offset);
2548         break;
2549       case Uless:
2550         // rs < r2
2551         sltu(scratch, rs, r2);
2552         bne(scratch, zero_reg, 2);
2553         nop();
2554         bal(offset);
2555         break;
2556       case Uless_equal:
2557         // rs <= r2
2558         sltu(scratch, r2, rs);
2559         bne(scratch, zero_reg, 2);
2560         nop();
2561         bal(offset);
2562         break;
2563       default:
2564         UNREACHABLE();
2565     }
2566   }
2567   // Emit a nop in the branch delay slot if required.
2568   if (bdslot == PROTECT)
2569     nop();
2570 }
2571 
2572 
BranchAndLinkShort(Label * L,BranchDelaySlot bdslot)2573 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2574   bal(shifted_branch_offset(L, false));
2575 
2576   // Emit a nop in the branch delay slot if required.
2577   if (bdslot == PROTECT)
2578     nop();
2579 }
2580 
2581 
BranchAndLinkShort(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2582 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2583                                         const Operand& rt,
2584                                         BranchDelaySlot bdslot) {
2585   BRANCH_ARGS_CHECK(cond, rs, rt);
2586 
2587   int32_t offset = 0;
2588   Register r2 = no_reg;
2589   Register scratch = at;
2590   if (rt.is_reg()) {
2591     r2 = rt.rm_;
2592   } else if (cond != cc_always) {
2593     r2 = scratch;
2594     li(r2, rt);
2595   }
2596 
2597   {
2598     BlockTrampolinePoolScope block_trampoline_pool(this);
2599     switch (cond) {
2600       case cc_always:
2601         offset = shifted_branch_offset(L, false);
2602         bal(offset);
2603         break;
2604       case eq:
2605         bne(rs, r2, 2);
2606         nop();
2607         offset = shifted_branch_offset(L, false);
2608         bal(offset);
2609         break;
2610       case ne:
2611         beq(rs, r2, 2);
2612         nop();
2613         offset = shifted_branch_offset(L, false);
2614         bal(offset);
2615         break;
2616 
2617       // Signed comparison.
2618       case greater:
2619         // rs > rt
2620         slt(scratch, r2, rs);
2621         beq(scratch, zero_reg, 2);
2622         nop();
2623         offset = shifted_branch_offset(L, false);
2624         bal(offset);
2625         break;
2626       case greater_equal:
2627         // rs >= rt
2628         slt(scratch, rs, r2);
2629         bne(scratch, zero_reg, 2);
2630         nop();
2631         offset = shifted_branch_offset(L, false);
2632         bal(offset);
2633         break;
2634       case less:
2635         // rs < r2
2636         slt(scratch, rs, r2);
2637         bne(scratch, zero_reg, 2);
2638         nop();
2639         offset = shifted_branch_offset(L, false);
2640         bal(offset);
2641         break;
2642       case less_equal:
2643         // rs <= r2
2644         slt(scratch, r2, rs);
2645         bne(scratch, zero_reg, 2);
2646         nop();
2647         offset = shifted_branch_offset(L, false);
2648         bal(offset);
2649         break;
2650 
2651 
2652       // Unsigned comparison.
2653       case Ugreater:
2654         // rs > rt
2655         sltu(scratch, r2, rs);
2656         beq(scratch, zero_reg, 2);
2657         nop();
2658         offset = shifted_branch_offset(L, false);
2659         bal(offset);
2660         break;
2661       case Ugreater_equal:
2662         // rs >= rt
2663         sltu(scratch, rs, r2);
2664         bne(scratch, zero_reg, 2);
2665         nop();
2666         offset = shifted_branch_offset(L, false);
2667         bal(offset);
2668         break;
2669       case Uless:
2670         // rs < r2
2671         sltu(scratch, rs, r2);
2672         bne(scratch, zero_reg, 2);
2673         nop();
2674         offset = shifted_branch_offset(L, false);
2675         bal(offset);
2676         break;
2677       case Uless_equal:
2678         // rs <= r2
2679         sltu(scratch, r2, rs);
2680         bne(scratch, zero_reg, 2);
2681         nop();
2682         offset = shifted_branch_offset(L, false);
2683         bal(offset);
2684         break;
2685 
2686       default:
2687         UNREACHABLE();
2688     }
2689   }
2690   // Check that offset could actually hold on an int16_t.
2691   DCHECK(is_int16(offset));
2692 
2693   // Emit a nop in the branch delay slot if required.
2694   if (bdslot == PROTECT)
2695     nop();
2696 }
2697 
2698 
Jump(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2699 void MacroAssembler::Jump(Register target,
2700                           Condition cond,
2701                           Register rs,
2702                           const Operand& rt,
2703                           BranchDelaySlot bd) {
2704   BlockTrampolinePoolScope block_trampoline_pool(this);
2705   if (cond == cc_always) {
2706     jr(target);
2707   } else {
2708     BRANCH_ARGS_CHECK(cond, rs, rt);
2709     Branch(2, NegateCondition(cond), rs, rt);
2710     jr(target);
2711   }
2712   // Emit a nop in the branch delay slot if required.
2713   if (bd == PROTECT)
2714     nop();
2715 }
2716 
2717 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2718 void MacroAssembler::Jump(intptr_t target,
2719                           RelocInfo::Mode rmode,
2720                           Condition cond,
2721                           Register rs,
2722                           const Operand& rt,
2723                           BranchDelaySlot bd) {
2724   Label skip;
2725   if (cond != cc_always) {
2726     Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2727   }
2728   // The first instruction of 'li' may be placed in the delay slot.
2729   // This is not an issue, t9 is expected to be clobbered anyway.
2730   li(t9, Operand(target, rmode));
2731   Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2732   bind(&skip);
2733 }
2734 
2735 
Jump(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2736 void MacroAssembler::Jump(Address target,
2737                           RelocInfo::Mode rmode,
2738                           Condition cond,
2739                           Register rs,
2740                           const Operand& rt,
2741                           BranchDelaySlot bd) {
2742   DCHECK(!RelocInfo::IsCodeTarget(rmode));
2743   Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2744 }
2745 
2746 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2747 void MacroAssembler::Jump(Handle<Code> code,
2748                           RelocInfo::Mode rmode,
2749                           Condition cond,
2750                           Register rs,
2751                           const Operand& rt,
2752                           BranchDelaySlot bd) {
2753   DCHECK(RelocInfo::IsCodeTarget(rmode));
2754   AllowDeferredHandleDereference embedding_raw_address;
2755   Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2756 }
2757 
2758 
CallSize(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2759 int MacroAssembler::CallSize(Register target,
2760                              Condition cond,
2761                              Register rs,
2762                              const Operand& rt,
2763                              BranchDelaySlot bd) {
2764   int size = 0;
2765 
2766   if (cond == cc_always) {
2767     size += 1;
2768   } else {
2769     size += 3;
2770   }
2771 
2772   if (bd == PROTECT)
2773     size += 1;
2774 
2775   return size * kInstrSize;
2776 }
2777 
2778 
2779 // Note: To call gcc-compiled C code on mips, you must call thru t9.
Call(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2780 void MacroAssembler::Call(Register target,
2781                           Condition cond,
2782                           Register rs,
2783                           const Operand& rt,
2784                           BranchDelaySlot bd) {
2785   BlockTrampolinePoolScope block_trampoline_pool(this);
2786   Label start;
2787   bind(&start);
2788   if (cond == cc_always) {
2789     jalr(target);
2790   } else {
2791     BRANCH_ARGS_CHECK(cond, rs, rt);
2792     Branch(2, NegateCondition(cond), rs, rt);
2793     jalr(target);
2794   }
2795   // Emit a nop in the branch delay slot if required.
2796   if (bd == PROTECT)
2797     nop();
2798 
2799   DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
2800             SizeOfCodeGeneratedSince(&start));
2801 }
2802 
2803 
CallSize(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2804 int MacroAssembler::CallSize(Address target,
2805                              RelocInfo::Mode rmode,
2806                              Condition cond,
2807                              Register rs,
2808                              const Operand& rt,
2809                              BranchDelaySlot bd) {
2810   int size = CallSize(t9, cond, rs, rt, bd);
2811   return size + 4 * kInstrSize;
2812 }
2813 
2814 
Call(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2815 void MacroAssembler::Call(Address target,
2816                           RelocInfo::Mode rmode,
2817                           Condition cond,
2818                           Register rs,
2819                           const Operand& rt,
2820                           BranchDelaySlot bd) {
2821   BlockTrampolinePoolScope block_trampoline_pool(this);
2822   Label start;
2823   bind(&start);
2824   int64_t target_int = reinterpret_cast<int64_t>(target);
2825   // Must record previous source positions before the
2826   // li() generates a new code target.
2827   positions_recorder()->WriteRecordedPositions();
2828   li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
2829   Call(t9, cond, rs, rt, bd);
2830   DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2831             SizeOfCodeGeneratedSince(&start));
2832 }
2833 
2834 
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2835 int MacroAssembler::CallSize(Handle<Code> code,
2836                              RelocInfo::Mode rmode,
2837                              TypeFeedbackId ast_id,
2838                              Condition cond,
2839                              Register rs,
2840                              const Operand& rt,
2841                              BranchDelaySlot bd) {
2842   AllowDeferredHandleDereference using_raw_address;
2843   return CallSize(reinterpret_cast<Address>(code.location()),
2844       rmode, cond, rs, rt, bd);
2845 }
2846 
2847 
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2848 void MacroAssembler::Call(Handle<Code> code,
2849                           RelocInfo::Mode rmode,
2850                           TypeFeedbackId ast_id,
2851                           Condition cond,
2852                           Register rs,
2853                           const Operand& rt,
2854                           BranchDelaySlot bd) {
2855   BlockTrampolinePoolScope block_trampoline_pool(this);
2856   Label start;
2857   bind(&start);
2858   DCHECK(RelocInfo::IsCodeTarget(rmode));
2859   if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2860     SetRecordedAstId(ast_id);
2861     rmode = RelocInfo::CODE_TARGET_WITH_ID;
2862   }
2863   AllowDeferredHandleDereference embedding_raw_address;
2864   Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2865   DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2866             SizeOfCodeGeneratedSince(&start));
2867 }
2868 
2869 
Ret(Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2870 void MacroAssembler::Ret(Condition cond,
2871                          Register rs,
2872                          const Operand& rt,
2873                          BranchDelaySlot bd) {
2874   Jump(ra, cond, rs, rt, bd);
2875 }
2876 
2877 
J(Label * L,BranchDelaySlot bdslot)2878 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2879   BlockTrampolinePoolScope block_trampoline_pool(this);
2880 
2881   uint64_t imm28;
2882   imm28 = jump_address(L);
2883   imm28 &= kImm28Mask;
2884   { BlockGrowBufferScope block_buf_growth(this);
2885     // Buffer growth (and relocation) must be blocked for internal references
2886     // until associated instructions are emitted and available to be patched.
2887     RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2888     j(imm28);
2889   }
2890   // Emit a nop in the branch delay slot if required.
2891   if (bdslot == PROTECT)
2892     nop();
2893 }
2894 
2895 
Jr(Label * L,BranchDelaySlot bdslot)2896 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2897   BlockTrampolinePoolScope block_trampoline_pool(this);
2898 
2899   uint64_t imm64;
2900   imm64 = jump_address(L);
2901   { BlockGrowBufferScope block_buf_growth(this);
2902     // Buffer growth (and relocation) must be blocked for internal references
2903     // until associated instructions are emitted and available to be patched.
2904     RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2905     li(at, Operand(imm64), ADDRESS_LOAD);
2906   }
2907   jr(at);
2908 
2909   // Emit a nop in the branch delay slot if required.
2910   if (bdslot == PROTECT)
2911     nop();
2912 }
2913 
2914 
Jalr(Label * L,BranchDelaySlot bdslot)2915 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2916   BlockTrampolinePoolScope block_trampoline_pool(this);
2917 
2918   uint64_t imm64;
2919   imm64 = jump_address(L);
2920   { BlockGrowBufferScope block_buf_growth(this);
2921     // Buffer growth (and relocation) must be blocked for internal references
2922     // until associated instructions are emitted and available to be patched.
2923     RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2924     li(at, Operand(imm64), ADDRESS_LOAD);
2925   }
2926   jalr(at);
2927 
2928   // Emit a nop in the branch delay slot if required.
2929   if (bdslot == PROTECT)
2930     nop();
2931 }
2932 
2933 
DropAndRet(int drop)2934 void MacroAssembler::DropAndRet(int drop) {
2935   Ret(USE_DELAY_SLOT);
2936   daddiu(sp, sp, drop * kPointerSize);
2937 }
2938 
DropAndRet(int drop,Condition cond,Register r1,const Operand & r2)2939 void MacroAssembler::DropAndRet(int drop,
2940                                 Condition cond,
2941                                 Register r1,
2942                                 const Operand& r2) {
2943   // Both Drop and Ret need to be conditional.
2944   Label skip;
2945   if (cond != cc_always) {
2946     Branch(&skip, NegateCondition(cond), r1, r2);
2947   }
2948 
2949   Drop(drop);
2950   Ret();
2951 
2952   if (cond != cc_always) {
2953     bind(&skip);
2954   }
2955 }
2956 
2957 
Drop(int count,Condition cond,Register reg,const Operand & op)2958 void MacroAssembler::Drop(int count,
2959                           Condition cond,
2960                           Register reg,
2961                           const Operand& op) {
2962   if (count <= 0) {
2963     return;
2964   }
2965 
2966   Label skip;
2967 
2968   if (cond != al) {
2969      Branch(&skip, NegateCondition(cond), reg, op);
2970   }
2971 
2972   daddiu(sp, sp, count * kPointerSize);
2973 
2974   if (cond != al) {
2975     bind(&skip);
2976   }
2977 }
2978 
2979 
2980 
Swap(Register reg1,Register reg2,Register scratch)2981 void MacroAssembler::Swap(Register reg1,
2982                           Register reg2,
2983                           Register scratch) {
2984   if (scratch.is(no_reg)) {
2985     Xor(reg1, reg1, Operand(reg2));
2986     Xor(reg2, reg2, Operand(reg1));
2987     Xor(reg1, reg1, Operand(reg2));
2988   } else {
2989     mov(scratch, reg1);
2990     mov(reg1, reg2);
2991     mov(reg2, scratch);
2992   }
2993 }
2994 
2995 
Call(Label * target)2996 void MacroAssembler::Call(Label* target) {
2997   BranchAndLink(target);
2998 }
2999 
3000 
Push(Handle<Object> handle)3001 void MacroAssembler::Push(Handle<Object> handle) {
3002   li(at, Operand(handle));
3003   push(at);
3004 }
3005 
3006 
PushRegisterAsTwoSmis(Register src,Register scratch)3007 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
3008   DCHECK(!src.is(scratch));
3009   mov(scratch, src);
3010   dsrl32(src, src, 0);
3011   dsll32(src, src, 0);
3012   push(src);
3013   dsll32(scratch, scratch, 0);
3014   push(scratch);
3015 }
3016 
3017 
PopRegisterAsTwoSmis(Register dst,Register scratch)3018 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
3019   DCHECK(!dst.is(scratch));
3020   pop(scratch);
3021   dsrl32(scratch, scratch, 0);
3022   pop(dst);
3023   dsrl32(dst, dst, 0);
3024   dsll32(dst, dst, 0);
3025   or_(dst, dst, scratch);
3026 }
3027 
3028 
DebugBreak()3029 void MacroAssembler::DebugBreak() {
3030   PrepareCEntryArgs(0);
3031   PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
3032   CEntryStub ces(isolate(), 1);
3033   DCHECK(AllowThisStubCall(&ces));
3034   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3035 }
3036 
3037 
3038 // ---------------------------------------------------------------------------
3039 // Exception handling.
3040 
PushTryHandler(StackHandler::Kind kind,int handler_index)3041 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3042                                     int handler_index) {
3043   // Adjust this code if not the case.
3044   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3045   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3046   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3047   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3048   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3049   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3050 
3051   // For the JSEntry handler, we must preserve a0-a3 and s0.
3052   // a5-a7 are available. We will build up the handler from the bottom by
3053   // pushing on the stack.
3054   // Set up the code object (a5) and the state (a6) for pushing.
3055   unsigned state =
3056       StackHandler::IndexField::encode(handler_index) |
3057       StackHandler::KindField::encode(kind);
3058   li(a5, Operand(CodeObject()), CONSTANT_SIZE);
3059   li(a6, Operand(state));
3060 
3061   // Push the frame pointer, context, state, and code object.
3062   if (kind == StackHandler::JS_ENTRY) {
3063     DCHECK_EQ(Smi::FromInt(0), 0);
3064     // The second zero_reg indicates no context.
3065     // The first zero_reg is the NULL frame pointer.
3066     // The operands are reversed to match the order of MultiPush/Pop.
3067     Push(zero_reg, zero_reg, a6, a5);
3068   } else {
3069     MultiPush(a5.bit() | a6.bit() | cp.bit() | fp.bit());
3070   }
3071 
3072   // Link the current handler as the next handler.
3073   li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3074   ld(a5, MemOperand(a6));
3075   push(a5);
3076   // Set this new handler as the current one.
3077   sd(sp, MemOperand(a6));
3078 }
3079 
3080 
PopTryHandler()3081 void MacroAssembler::PopTryHandler() {
3082   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3083   pop(a1);
3084   Daddu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
3085   li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3086   sd(a1, MemOperand(at));
3087 }
3088 
3089 
JumpToHandlerEntry()3090 void MacroAssembler::JumpToHandlerEntry() {
3091   // Compute the handler entry address and jump to it.  The handler table is
3092   // a fixed array of (smi-tagged) code offsets.
3093   // v0 = exception, a1 = code object, a2 = state.
3094   Uld(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));
3095   Daddu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3096   dsrl(a2, a2, StackHandler::kKindWidth);  // Handler index.
3097   dsll(a2, a2, kPointerSizeLog2);
3098   Daddu(a2, a3, a2);
3099   ld(a2, MemOperand(a2));  // Smi-tagged offset.
3100   Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
3101   dsra32(t9, a2, 0);
3102   Daddu(t9, t9, a1);
3103   Jump(t9);  // Jump.
3104 }
3105 
3106 
Throw(Register value)3107 void MacroAssembler::Throw(Register value) {
3108   // Adjust this code if not the case.
3109   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3110   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3111   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3112   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3113   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3114   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3115 
3116   // The exception is expected in v0.
3117   Move(v0, value);
3118 
3119   // Drop the stack pointer to the top of the top handler.
3120   li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
3121                                    isolate())));
3122   ld(sp, MemOperand(a3));
3123 
3124   // Restore the next handler.
3125   pop(a2);
3126   sd(a2, MemOperand(a3));
3127 
3128   // Get the code object (a1) and state (a2).  Restore the context and frame
3129   // pointer.
3130   MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
3131 
3132   // If the handler is a JS frame, restore the context to the frame.
3133   // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
3134   // or cp.
3135   Label done;
3136   Branch(&done, eq, cp, Operand(zero_reg));
3137   sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3138   bind(&done);
3139 
3140   JumpToHandlerEntry();
3141 }
3142 
3143 
ThrowUncatchable(Register value)3144 void MacroAssembler::ThrowUncatchable(Register value) {
3145   // Adjust this code if not the case.
3146   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3147   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3148   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3149   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3150   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3151   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3152 
3153   // The exception is expected in v0.
3154   if (!value.is(v0)) {
3155     mov(v0, value);
3156   }
3157   // Drop the stack pointer to the top of the top stack handler.
3158   li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3159   ld(sp, MemOperand(a3));
3160 
3161   // Unwind the handlers until the ENTRY handler is found.
3162   Label fetch_next, check_kind;
3163   jmp(&check_kind);
3164   bind(&fetch_next);
3165   ld(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
3166 
3167   bind(&check_kind);
3168   STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
3169   ld(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
3170   And(a2, a2, Operand(StackHandler::KindField::kMask));
3171   Branch(&fetch_next, ne, a2, Operand(zero_reg));
3172 
3173   // Set the top handler address to next handler past the top ENTRY handler.
3174   pop(a2);
3175   sd(a2, MemOperand(a3));
3176 
3177   // Get the code object (a1) and state (a2).  Clear the context and frame
3178   // pointer (0 was saved in the handler).
3179   MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
3180 
3181   JumpToHandlerEntry();
3182 }
3183 
3184 
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)3185 void MacroAssembler::Allocate(int object_size,
3186                               Register result,
3187                               Register scratch1,
3188                               Register scratch2,
3189                               Label* gc_required,
3190                               AllocationFlags flags) {
3191   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3192   if (!FLAG_inline_new) {
3193     if (emit_debug_code()) {
3194       // Trash the registers to simulate an allocation failure.
3195       li(result, 0x7091);
3196       li(scratch1, 0x7191);
3197       li(scratch2, 0x7291);
3198     }
3199     jmp(gc_required);
3200     return;
3201   }
3202 
3203   DCHECK(!result.is(scratch1));
3204   DCHECK(!result.is(scratch2));
3205   DCHECK(!scratch1.is(scratch2));
3206   DCHECK(!scratch1.is(t9));
3207   DCHECK(!scratch2.is(t9));
3208   DCHECK(!result.is(t9));
3209 
3210   // Make object size into bytes.
3211   if ((flags & SIZE_IN_WORDS) != 0) {
3212     object_size *= kPointerSize;
3213   }
3214   DCHECK(0 == (object_size & kObjectAlignmentMask));
3215 
3216   // Check relative positions of allocation top and limit addresses.
3217   // ARM adds additional checks to make sure the ldm instruction can be
3218   // used. On MIPS we don't have ldm so we don't need additional checks either.
3219   ExternalReference allocation_top =
3220       AllocationUtils::GetAllocationTopReference(isolate(), flags);
3221   ExternalReference allocation_limit =
3222       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3223 
3224   intptr_t top   =
3225       reinterpret_cast<intptr_t>(allocation_top.address());
3226   intptr_t limit =
3227       reinterpret_cast<intptr_t>(allocation_limit.address());
3228   DCHECK((limit - top) == kPointerSize);
3229 
3230   // Set up allocation top address and object size registers.
3231   Register topaddr = scratch1;
3232   li(topaddr, Operand(allocation_top));
3233 
3234   // This code stores a temporary value in t9.
3235   if ((flags & RESULT_CONTAINS_TOP) == 0) {
3236     // Load allocation top into result and allocation limit into t9.
3237     ld(result, MemOperand(topaddr));
3238     ld(t9, MemOperand(topaddr, kPointerSize));
3239   } else {
3240     if (emit_debug_code()) {
3241       // Assert that result actually contains top on entry. t9 is used
3242       // immediately below so this use of t9 does not cause difference with
3243       // respect to register content between debug and release mode.
3244       ld(t9, MemOperand(topaddr));
3245       Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3246     }
3247     // Load allocation limit into t9. Result already contains allocation top.
3248     ld(t9, MemOperand(topaddr, limit - top));
3249   }
3250 
3251   DCHECK(kPointerSize == kDoubleSize);
3252   if (emit_debug_code()) {
3253     And(at, result, Operand(kDoubleAlignmentMask));
3254     Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3255   }
3256 
3257   // Calculate new top and bail out if new space is exhausted. Use result
3258   // to calculate the new top.
3259   Daddu(scratch2, result, Operand(object_size));
3260   Branch(gc_required, Ugreater, scratch2, Operand(t9));
3261   sd(scratch2, MemOperand(topaddr));
3262 
3263   // Tag object if requested.
3264   if ((flags & TAG_OBJECT) != 0) {
3265     Daddu(result, result, Operand(kHeapObjectTag));
3266   }
3267 }
3268 
3269 
Allocate(Register object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)3270 void MacroAssembler::Allocate(Register object_size,
3271                               Register result,
3272                               Register scratch1,
3273                               Register scratch2,
3274                               Label* gc_required,
3275                               AllocationFlags flags) {
3276   if (!FLAG_inline_new) {
3277     if (emit_debug_code()) {
3278       // Trash the registers to simulate an allocation failure.
3279       li(result, 0x7091);
3280       li(scratch1, 0x7191);
3281       li(scratch2, 0x7291);
3282     }
3283     jmp(gc_required);
3284     return;
3285   }
3286 
3287   DCHECK(!result.is(scratch1));
3288   DCHECK(!result.is(scratch2));
3289   DCHECK(!scratch1.is(scratch2));
3290   DCHECK(!object_size.is(t9));
3291   DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3292 
3293   // Check relative positions of allocation top and limit addresses.
3294   // ARM adds additional checks to make sure the ldm instruction can be
3295   // used. On MIPS we don't have ldm so we don't need additional checks either.
3296   ExternalReference allocation_top =
3297       AllocationUtils::GetAllocationTopReference(isolate(), flags);
3298   ExternalReference allocation_limit =
3299       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3300   intptr_t top   =
3301       reinterpret_cast<intptr_t>(allocation_top.address());
3302   intptr_t limit =
3303       reinterpret_cast<intptr_t>(allocation_limit.address());
3304   DCHECK((limit - top) == kPointerSize);
3305 
3306   // Set up allocation top address and object size registers.
3307   Register topaddr = scratch1;
3308   li(topaddr, Operand(allocation_top));
3309 
3310   // This code stores a temporary value in t9.
3311   if ((flags & RESULT_CONTAINS_TOP) == 0) {
3312     // Load allocation top into result and allocation limit into t9.
3313     ld(result, MemOperand(topaddr));
3314     ld(t9, MemOperand(topaddr, kPointerSize));
3315   } else {
3316     if (emit_debug_code()) {
3317       // Assert that result actually contains top on entry. t9 is used
3318       // immediately below so this use of t9 does not cause difference with
3319       // respect to register content between debug and release mode.
3320       ld(t9, MemOperand(topaddr));
3321       Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3322     }
3323     // Load allocation limit into t9. Result already contains allocation top.
3324     ld(t9, MemOperand(topaddr, limit - top));
3325   }
3326 
3327   DCHECK(kPointerSize == kDoubleSize);
3328   if (emit_debug_code()) {
3329     And(at, result, Operand(kDoubleAlignmentMask));
3330     Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3331   }
3332 
3333   // Calculate new top and bail out if new space is exhausted. Use result
3334   // to calculate the new top. Object size may be in words so a shift is
3335   // required to get the number of bytes.
3336   if ((flags & SIZE_IN_WORDS) != 0) {
3337     dsll(scratch2, object_size, kPointerSizeLog2);
3338     Daddu(scratch2, result, scratch2);
3339   } else {
3340     Daddu(scratch2, result, Operand(object_size));
3341   }
3342   Branch(gc_required, Ugreater, scratch2, Operand(t9));
3343 
3344   // Update allocation top. result temporarily holds the new top.
3345   if (emit_debug_code()) {
3346     And(t9, scratch2, Operand(kObjectAlignmentMask));
3347     Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3348   }
3349   sd(scratch2, MemOperand(topaddr));
3350 
3351   // Tag object if requested.
3352   if ((flags & TAG_OBJECT) != 0) {
3353     Daddu(result, result, Operand(kHeapObjectTag));
3354   }
3355 }
3356 
3357 
UndoAllocationInNewSpace(Register object,Register scratch)3358 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3359                                               Register scratch) {
3360   ExternalReference new_space_allocation_top =
3361       ExternalReference::new_space_allocation_top_address(isolate());
3362 
3363   // Make sure the object has no tag before resetting top.
3364   And(object, object, Operand(~kHeapObjectTagMask));
3365 #ifdef DEBUG
3366   // Check that the object un-allocated is below the current top.
3367   li(scratch, Operand(new_space_allocation_top));
3368   ld(scratch, MemOperand(scratch));
3369   Check(less, kUndoAllocationOfNonAllocatedMemory,
3370       object, Operand(scratch));
3371 #endif
3372   // Write the address of the object to un-allocate as the current top.
3373   li(scratch, Operand(new_space_allocation_top));
3374   sd(object, MemOperand(scratch));
3375 }
3376 
3377 
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)3378 void MacroAssembler::AllocateTwoByteString(Register result,
3379                                            Register length,
3380                                            Register scratch1,
3381                                            Register scratch2,
3382                                            Register scratch3,
3383                                            Label* gc_required) {
3384   // Calculate the number of bytes needed for the characters in the string while
3385   // observing object alignment.
3386   DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3387   dsll(scratch1, length, 1);  // Length in bytes, not chars.
3388   daddiu(scratch1, scratch1,
3389        kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3390   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3391 
3392   // Allocate two-byte string in new space.
3393   Allocate(scratch1,
3394            result,
3395            scratch2,
3396            scratch3,
3397            gc_required,
3398            TAG_OBJECT);
3399 
3400   // Set the map, length and hash field.
3401   InitializeNewString(result,
3402                       length,
3403                       Heap::kStringMapRootIndex,
3404                       scratch1,
3405                       scratch2);
3406 }
3407 
3408 
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)3409 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3410                                            Register scratch1, Register scratch2,
3411                                            Register scratch3,
3412                                            Label* gc_required) {
3413   // Calculate the number of bytes needed for the characters in the string
3414   // while observing object alignment.
3415   DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3416   DCHECK(kCharSize == 1);
3417   daddiu(scratch1, length,
3418       kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3419   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3420 
3421   // Allocate one-byte string in new space.
3422   Allocate(scratch1,
3423            result,
3424            scratch2,
3425            scratch3,
3426            gc_required,
3427            TAG_OBJECT);
3428 
3429   // Set the map, length and hash field.
3430   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3431                       scratch1, scratch2);
3432 }
3433 
3434 
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3435 void MacroAssembler::AllocateTwoByteConsString(Register result,
3436                                                Register length,
3437                                                Register scratch1,
3438                                                Register scratch2,
3439                                                Label* gc_required) {
3440   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3441            TAG_OBJECT);
3442   InitializeNewString(result,
3443                       length,
3444                       Heap::kConsStringMapRootIndex,
3445                       scratch1,
3446                       scratch2);
3447 }
3448 
3449 
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3450 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3451                                                Register scratch1,
3452                                                Register scratch2,
3453                                                Label* gc_required) {
3454   Allocate(ConsString::kSize,
3455            result,
3456            scratch1,
3457            scratch2,
3458            gc_required,
3459            TAG_OBJECT);
3460 
3461   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3462                       scratch1, scratch2);
3463 }
3464 
3465 
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3466 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3467                                                  Register length,
3468                                                  Register scratch1,
3469                                                  Register scratch2,
3470                                                  Label* gc_required) {
3471   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3472            TAG_OBJECT);
3473 
3474   InitializeNewString(result,
3475                       length,
3476                       Heap::kSlicedStringMapRootIndex,
3477                       scratch1,
3478                       scratch2);
3479 }
3480 
3481 
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3482 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3483                                                  Register length,
3484                                                  Register scratch1,
3485                                                  Register scratch2,
3486                                                  Label* gc_required) {
3487   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3488            TAG_OBJECT);
3489 
3490   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3491                       scratch1, scratch2);
3492 }
3493 
3494 
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)3495 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3496                                                      Label* not_unique_name) {
3497   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3498   Label succeed;
3499   And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3500   Branch(&succeed, eq, at, Operand(zero_reg));
3501   Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3502 
3503   bind(&succeed);
3504 }
3505 
3506 
3507 // Allocates a heap number or jumps to the label if the young space is full and
3508 // a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * need_gc,TaggingMode tagging_mode,MutableMode mode)3509 void MacroAssembler::AllocateHeapNumber(Register result,
3510                                         Register scratch1,
3511                                         Register scratch2,
3512                                         Register heap_number_map,
3513                                         Label* need_gc,
3514                                         TaggingMode tagging_mode,
3515                                         MutableMode mode) {
3516   // Allocate an object in the heap for the heap number and tag it as a heap
3517   // object.
3518   Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3519            tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3520 
3521   Heap::RootListIndex map_index = mode == MUTABLE
3522       ? Heap::kMutableHeapNumberMapRootIndex
3523       : Heap::kHeapNumberMapRootIndex;
3524   AssertIsRoot(heap_number_map, map_index);
3525 
3526   // Store heap number map in the allocated object.
3527   if (tagging_mode == TAG_RESULT) {
3528     sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3529   } else {
3530     sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3531   }
3532 }
3533 
3534 
AllocateHeapNumberWithValue(Register result,FPURegister value,Register scratch1,Register scratch2,Label * gc_required)3535 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3536                                                  FPURegister value,
3537                                                  Register scratch1,
3538                                                  Register scratch2,
3539                                                  Label* gc_required) {
3540   LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3541   AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3542   sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3543 }
3544 
3545 
3546 // Copies a fixed number of fields of heap objects from src to dst.
CopyFields(Register dst,Register src,RegList temps,int field_count)3547 void MacroAssembler::CopyFields(Register dst,
3548                                 Register src,
3549                                 RegList temps,
3550                                 int field_count) {
3551   DCHECK((temps & dst.bit()) == 0);
3552   DCHECK((temps & src.bit()) == 0);
3553   // Primitive implementation using only one temporary register.
3554 
3555   Register tmp = no_reg;
3556   // Find a temp register in temps list.
3557   for (int i = 0; i < kNumRegisters; i++) {
3558     if ((temps & (1 << i)) != 0) {
3559       tmp.code_ = i;
3560       break;
3561     }
3562   }
3563   DCHECK(!tmp.is(no_reg));
3564 
3565   for (int i = 0; i < field_count; i++) {
3566     ld(tmp, FieldMemOperand(src, i * kPointerSize));
3567     sd(tmp, FieldMemOperand(dst, i * kPointerSize));
3568   }
3569 }
3570 
3571 
CopyBytes(Register src,Register dst,Register length,Register scratch)3572 void MacroAssembler::CopyBytes(Register src,
3573                                Register dst,
3574                                Register length,
3575                                Register scratch) {
3576   Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3577 
3578   // Align src before copying in word size chunks.
3579   Branch(&byte_loop, le, length, Operand(kPointerSize));
3580   bind(&align_loop_1);
3581   And(scratch, src, kPointerSize - 1);
3582   Branch(&word_loop, eq, scratch, Operand(zero_reg));
3583   lbu(scratch, MemOperand(src));
3584   Daddu(src, src, 1);
3585   sb(scratch, MemOperand(dst));
3586   Daddu(dst, dst, 1);
3587   Dsubu(length, length, Operand(1));
3588   Branch(&align_loop_1, ne, length, Operand(zero_reg));
3589 
3590   // Copy bytes in word size chunks.
3591   bind(&word_loop);
3592   if (emit_debug_code()) {
3593     And(scratch, src, kPointerSize - 1);
3594     Assert(eq, kExpectingAlignmentForCopyBytes,
3595         scratch, Operand(zero_reg));
3596   }
3597   Branch(&byte_loop, lt, length, Operand(kPointerSize));
3598   ld(scratch, MemOperand(src));
3599   Daddu(src, src, kPointerSize);
3600 
3601   // TODO(kalmard) check if this can be optimized to use sw in most cases.
3602   // Can't use unaligned access - copy byte by byte.
3603   sb(scratch, MemOperand(dst, 0));
3604   dsrl(scratch, scratch, 8);
3605   sb(scratch, MemOperand(dst, 1));
3606   dsrl(scratch, scratch, 8);
3607   sb(scratch, MemOperand(dst, 2));
3608   dsrl(scratch, scratch, 8);
3609   sb(scratch, MemOperand(dst, 3));
3610   dsrl(scratch, scratch, 8);
3611   sb(scratch, MemOperand(dst, 4));
3612   dsrl(scratch, scratch, 8);
3613   sb(scratch, MemOperand(dst, 5));
3614   dsrl(scratch, scratch, 8);
3615   sb(scratch, MemOperand(dst, 6));
3616   dsrl(scratch, scratch, 8);
3617   sb(scratch, MemOperand(dst, 7));
3618   Daddu(dst, dst, 8);
3619 
3620   Dsubu(length, length, Operand(kPointerSize));
3621   Branch(&word_loop);
3622 
3623   // Copy the last bytes if any left.
3624   bind(&byte_loop);
3625   Branch(&done, eq, length, Operand(zero_reg));
3626   bind(&byte_loop_1);
3627   lbu(scratch, MemOperand(src));
3628   Daddu(src, src, 1);
3629   sb(scratch, MemOperand(dst));
3630   Daddu(dst, dst, 1);
3631   Dsubu(length, length, Operand(1));
3632   Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3633   bind(&done);
3634 }
3635 
3636 
InitializeFieldsWithFiller(Register start_offset,Register end_offset,Register filler)3637 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3638                                                 Register end_offset,
3639                                                 Register filler) {
3640   Label loop, entry;
3641   Branch(&entry);
3642   bind(&loop);
3643   sd(filler, MemOperand(start_offset));
3644   Daddu(start_offset, start_offset, kPointerSize);
3645   bind(&entry);
3646   Branch(&loop, lt, start_offset, Operand(end_offset));
3647 }
3648 
3649 
CheckFastElements(Register map,Register scratch,Label * fail)3650 void MacroAssembler::CheckFastElements(Register map,
3651                                        Register scratch,
3652                                        Label* fail) {
3653   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3654   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3655   STATIC_ASSERT(FAST_ELEMENTS == 2);
3656   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3657   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3658   Branch(fail, hi, scratch,
3659          Operand(Map::kMaximumBitField2FastHoleyElementValue));
3660 }
3661 
3662 
CheckFastObjectElements(Register map,Register scratch,Label * fail)3663 void MacroAssembler::CheckFastObjectElements(Register map,
3664                                              Register scratch,
3665                                              Label* fail) {
3666   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3667   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3668   STATIC_ASSERT(FAST_ELEMENTS == 2);
3669   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3670   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3671   Branch(fail, ls, scratch,
3672          Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3673   Branch(fail, hi, scratch,
3674          Operand(Map::kMaximumBitField2FastHoleyElementValue));
3675 }
3676 
3677 
CheckFastSmiElements(Register map,Register scratch,Label * fail)3678 void MacroAssembler::CheckFastSmiElements(Register map,
3679                                           Register scratch,
3680                                           Label* fail) {
3681   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3682   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3683   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3684   Branch(fail, hi, scratch,
3685          Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3686 }
3687 
3688 
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,Register scratch2,Register scratch3,Label * fail,int elements_offset)3689 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3690                                                  Register key_reg,
3691                                                  Register elements_reg,
3692                                                  Register scratch1,
3693                                                  Register scratch2,
3694                                                  Register scratch3,
3695                                                  Label* fail,
3696                                                  int elements_offset) {
3697   Label smi_value, maybe_nan, have_double_value, is_nan, done;
3698   Register mantissa_reg = scratch2;
3699   Register exponent_reg = scratch3;
3700 
3701   // Handle smi values specially.
3702   JumpIfSmi(value_reg, &smi_value);
3703 
3704   // Ensure that the object is a heap number
3705   CheckMap(value_reg,
3706            scratch1,
3707            Heap::kHeapNumberMapRootIndex,
3708            fail,
3709            DONT_DO_SMI_CHECK);
3710 
3711   // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3712   // in the exponent.
3713   li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3714   lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3715   Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3716 
3717   lwu(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3718 
3719   bind(&have_double_value);
3720   // dsll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3721   dsra(scratch1, key_reg, 32 - kDoubleSizeLog2);
3722   Daddu(scratch1, scratch1, elements_reg);
3723   sw(mantissa_reg, FieldMemOperand(
3724      scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
3725   uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
3726       sizeof(kHoleNanLower32);
3727   sw(exponent_reg, FieldMemOperand(scratch1, offset));
3728   jmp(&done);
3729 
3730   bind(&maybe_nan);
3731   // Could be NaN, Infinity or -Infinity. If fraction is not zero, it's NaN,
3732   // otherwise it's Infinity or -Infinity, and the non-NaN code path applies.
3733   lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3734   Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3735   bind(&is_nan);
3736   // Load canonical NaN for storing into the double array.
3737   LoadRoot(at, Heap::kNanValueRootIndex);
3738   lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
3739   lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
3740   jmp(&have_double_value);
3741 
3742   bind(&smi_value);
3743   Daddu(scratch1, elements_reg,
3744       Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3745               elements_offset));
3746   // dsll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3747   dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
3748   Daddu(scratch1, scratch1, scratch2);
3749   // scratch1 is now effective address of the double element
3750 
3751   Register untagged_value = elements_reg;
3752   SmiUntag(untagged_value, value_reg);
3753   mtc1(untagged_value, f2);
3754   cvt_d_w(f0, f2);
3755   sdc1(f0, MemOperand(scratch1, 0));
3756   bind(&done);
3757 }
3758 
3759 
CompareMapAndBranch(Register obj,Register scratch,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)3760 void MacroAssembler::CompareMapAndBranch(Register obj,
3761                                          Register scratch,
3762                                          Handle<Map> map,
3763                                          Label* early_success,
3764                                          Condition cond,
3765                                          Label* branch_to) {
3766   ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3767   CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3768 }
3769 
3770 
CompareMapAndBranch(Register obj_map,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)3771 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3772                                          Handle<Map> map,
3773                                          Label* early_success,
3774                                          Condition cond,
3775                                          Label* branch_to) {
3776   Branch(branch_to, cond, obj_map, Operand(map));
3777 }
3778 
3779 
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)3780 void MacroAssembler::CheckMap(Register obj,
3781                               Register scratch,
3782                               Handle<Map> map,
3783                               Label* fail,
3784                               SmiCheckType smi_check_type) {
3785   if (smi_check_type == DO_SMI_CHECK) {
3786     JumpIfSmi(obj, fail);
3787   }
3788   Label success;
3789   CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3790   bind(&success);
3791 }
3792 
3793 
DispatchMap(Register obj,Register scratch,Handle<Map> map,Handle<Code> success,SmiCheckType smi_check_type)3794 void MacroAssembler::DispatchMap(Register obj,
3795                                  Register scratch,
3796                                  Handle<Map> map,
3797                                  Handle<Code> success,
3798                                  SmiCheckType smi_check_type) {
3799   Label fail;
3800   if (smi_check_type == DO_SMI_CHECK) {
3801     JumpIfSmi(obj, &fail);
3802   }
3803   ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3804   Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3805   bind(&fail);
3806 }
3807 
3808 
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)3809 void MacroAssembler::CheckMap(Register obj,
3810                               Register scratch,
3811                               Heap::RootListIndex index,
3812                               Label* fail,
3813                               SmiCheckType smi_check_type) {
3814   if (smi_check_type == DO_SMI_CHECK) {
3815     JumpIfSmi(obj, fail);
3816   }
3817   ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3818   LoadRoot(at, index);
3819   Branch(fail, ne, scratch, Operand(at));
3820 }
3821 
3822 
MovFromFloatResult(const DoubleRegister dst)3823 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
3824   if (IsMipsSoftFloatABI) {
3825     Move(dst, v0, v1);
3826   } else {
3827     Move(dst, f0);  // Reg f0 is o32 ABI FP return value.
3828   }
3829 }
3830 
3831 
MovFromFloatParameter(const DoubleRegister dst)3832 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
3833   if (IsMipsSoftFloatABI) {
3834     Move(dst, a0, a1);
3835   } else {
3836     Move(dst, f12);  // Reg f12 is o32 ABI FP first argument value.
3837   }
3838 }
3839 
3840 
MovToFloatParameter(DoubleRegister src)3841 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
3842   if (!IsMipsSoftFloatABI) {
3843     Move(f12, src);
3844   } else {
3845     Move(a0, a1, src);
3846   }
3847 }
3848 
3849 
MovToFloatResult(DoubleRegister src)3850 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
3851   if (!IsMipsSoftFloatABI) {
3852     Move(f0, src);
3853   } else {
3854     Move(v0, v1, src);
3855   }
3856 }
3857 
3858 
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)3859 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3860                                           DoubleRegister src2) {
3861   if (!IsMipsSoftFloatABI) {
3862     const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
3863     if (src2.is(f12)) {
3864       DCHECK(!src1.is(fparg2));
3865       Move(fparg2, src2);
3866       Move(f12, src1);
3867     } else {
3868       Move(f12, src1);
3869       Move(fparg2, src2);
3870     }
3871   } else {
3872     Move(a0, a1, src1);
3873     Move(a2, a3, src2);
3874   }
3875 }
3876 
3877 
3878 // -----------------------------------------------------------------------------
3879 // JavaScript invokes.
3880 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Handle<Code> code_constant,Register code_reg,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)3881 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3882                                     const ParameterCount& actual,
3883                                     Handle<Code> code_constant,
3884                                     Register code_reg,
3885                                     Label* done,
3886                                     bool* definitely_mismatches,
3887                                     InvokeFlag flag,
3888                                     const CallWrapper& call_wrapper) {
3889   bool definitely_matches = false;
3890   *definitely_mismatches = false;
3891   Label regular_invoke;
3892 
3893   // Check whether the expected and actual arguments count match. If not,
3894   // setup registers according to contract with ArgumentsAdaptorTrampoline:
3895   //  a0: actual arguments count
3896   //  a1: function (passed through to callee)
3897   //  a2: expected arguments count
3898 
3899   // The code below is made a lot easier because the calling code already sets
3900   // up actual and expected registers according to the contract if values are
3901   // passed in registers.
3902   DCHECK(actual.is_immediate() || actual.reg().is(a0));
3903   DCHECK(expected.is_immediate() || expected.reg().is(a2));
3904   DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3905 
3906   if (expected.is_immediate()) {
3907     DCHECK(actual.is_immediate());
3908     if (expected.immediate() == actual.immediate()) {
3909       definitely_matches = true;
3910     } else {
3911       li(a0, Operand(actual.immediate()));
3912       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3913       if (expected.immediate() == sentinel) {
3914         // Don't worry about adapting arguments for builtins that
3915         // don't want that done. Skip adaption code by making it look
3916         // like we have a match between expected and actual number of
3917         // arguments.
3918         definitely_matches = true;
3919       } else {
3920         *definitely_mismatches = true;
3921         li(a2, Operand(expected.immediate()));
3922       }
3923     }
3924   } else if (actual.is_immediate()) {
3925     Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3926     li(a0, Operand(actual.immediate()));
3927   } else {
3928     Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
3929   }
3930 
3931   if (!definitely_matches) {
3932     if (!code_constant.is_null()) {
3933       li(a3, Operand(code_constant));
3934       daddiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3935     }
3936 
3937     Handle<Code> adaptor =
3938         isolate()->builtins()->ArgumentsAdaptorTrampoline();
3939     if (flag == CALL_FUNCTION) {
3940       call_wrapper.BeforeCall(CallSize(adaptor));
3941       Call(adaptor);
3942       call_wrapper.AfterCall();
3943       if (!*definitely_mismatches) {
3944         Branch(done);
3945       }
3946     } else {
3947       Jump(adaptor, RelocInfo::CODE_TARGET);
3948     }
3949     bind(&regular_invoke);
3950   }
3951 }
3952 
3953 
InvokeCode(Register code,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3954 void MacroAssembler::InvokeCode(Register code,
3955                                 const ParameterCount& expected,
3956                                 const ParameterCount& actual,
3957                                 InvokeFlag flag,
3958                                 const CallWrapper& call_wrapper) {
3959   // You can't call a function without a valid frame.
3960   DCHECK(flag == JUMP_FUNCTION || has_frame());
3961 
3962   Label done;
3963 
3964   bool definitely_mismatches = false;
3965   InvokePrologue(expected, actual, Handle<Code>::null(), code,
3966                  &done, &definitely_mismatches, flag,
3967                  call_wrapper);
3968   if (!definitely_mismatches) {
3969     if (flag == CALL_FUNCTION) {
3970       call_wrapper.BeforeCall(CallSize(code));
3971       Call(code);
3972       call_wrapper.AfterCall();
3973     } else {
3974       DCHECK(flag == JUMP_FUNCTION);
3975       Jump(code);
3976     }
3977     // Continue here if InvokePrologue does handle the invocation due to
3978     // mismatched parameter counts.
3979     bind(&done);
3980   }
3981 }
3982 
3983 
InvokeFunction(Register function,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3984 void MacroAssembler::InvokeFunction(Register function,
3985                                     const ParameterCount& actual,
3986                                     InvokeFlag flag,
3987                                     const CallWrapper& call_wrapper) {
3988   // You can't call a function without a valid frame.
3989   DCHECK(flag == JUMP_FUNCTION || has_frame());
3990 
3991   // Contract with called JS functions requires that function is passed in a1.
3992   DCHECK(function.is(a1));
3993   Register expected_reg = a2;
3994   Register code_reg = a3;
3995   ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3996   ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3997   // The argument count is stored as int32_t on 64-bit platforms.
3998   // TODO(plind): Smi on 32-bit platforms.
3999   lw(expected_reg,
4000       FieldMemOperand(code_reg,
4001                       SharedFunctionInfo::kFormalParameterCountOffset));
4002   ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4003   ParameterCount expected(expected_reg);
4004   InvokeCode(code_reg, expected, actual, flag, call_wrapper);
4005 }
4006 
4007 
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4008 void MacroAssembler::InvokeFunction(Register function,
4009                                     const ParameterCount& expected,
4010                                     const ParameterCount& actual,
4011                                     InvokeFlag flag,
4012                                     const CallWrapper& call_wrapper) {
4013   // You can't call a function without a valid frame.
4014   DCHECK(flag == JUMP_FUNCTION || has_frame());
4015 
4016   // Contract with called JS functions requires that function is passed in a1.
4017   DCHECK(function.is(a1));
4018 
4019   // Get the function and setup the context.
4020   ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4021 
4022   // We call indirectly through the code field in the function to
4023   // allow recompilation to take effect without changing any of the
4024   // call sites.
4025   ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4026   InvokeCode(a3, expected, actual, flag, call_wrapper);
4027 }
4028 
4029 
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4030 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4031                                     const ParameterCount& expected,
4032                                     const ParameterCount& actual,
4033                                     InvokeFlag flag,
4034                                     const CallWrapper& call_wrapper) {
4035   li(a1, function);
4036   InvokeFunction(a1, expected, actual, flag, call_wrapper);
4037 }
4038 
4039 
IsObjectJSObjectType(Register heap_object,Register map,Register scratch,Label * fail)4040 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
4041                                           Register map,
4042                                           Register scratch,
4043                                           Label* fail) {
4044   ld(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
4045   IsInstanceJSObjectType(map, scratch, fail);
4046 }
4047 
4048 
IsInstanceJSObjectType(Register map,Register scratch,Label * fail)4049 void MacroAssembler::IsInstanceJSObjectType(Register map,
4050                                             Register scratch,
4051                                             Label* fail) {
4052   lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
4053   Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4054   Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4055 }
4056 
4057 
IsObjectJSStringType(Register object,Register scratch,Label * fail)4058 void MacroAssembler::IsObjectJSStringType(Register object,
4059                                           Register scratch,
4060                                           Label* fail) {
4061   DCHECK(kNotStringTag != 0);
4062 
4063   ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4064   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4065   And(scratch, scratch, Operand(kIsNotStringMask));
4066   Branch(fail, ne, scratch, Operand(zero_reg));
4067 }
4068 
4069 
IsObjectNameType(Register object,Register scratch,Label * fail)4070 void MacroAssembler::IsObjectNameType(Register object,
4071                                       Register scratch,
4072                                       Label* fail) {
4073   ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4074   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4075   Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4076 }
4077 
4078 
4079 // ---------------------------------------------------------------------------
4080 // Support functions.
4081 
4082 
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss,bool miss_on_bound_function)4083 void MacroAssembler::TryGetFunctionPrototype(Register function,
4084                                              Register result,
4085                                              Register scratch,
4086                                              Label* miss,
4087                                              bool miss_on_bound_function) {
4088   Label non_instance;
4089   if (miss_on_bound_function) {
4090     // Check that the receiver isn't a smi.
4091     JumpIfSmi(function, miss);
4092 
4093     // Check that the function really is a function.  Load map into result reg.
4094     GetObjectType(function, result, scratch);
4095     Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
4096 
4097     ld(scratch,
4098        FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
4099     lwu(scratch,
4100         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
4101     And(scratch, scratch,
4102         Operand(1 << SharedFunctionInfo::kBoundFunction));
4103     Branch(miss, ne, scratch, Operand(zero_reg));
4104 
4105     // Make sure that the function has an instance prototype.
4106     lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
4107     And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
4108     Branch(&non_instance, ne, scratch, Operand(zero_reg));
4109   }
4110 
4111   // Get the prototype or initial map from the function.
4112   ld(result,
4113      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4114 
4115   // If the prototype or initial map is the hole, don't return it and
4116   // simply miss the cache instead. This will allow us to allocate a
4117   // prototype object on-demand in the runtime system.
4118   LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4119   Branch(miss, eq, result, Operand(t8));
4120 
4121   // If the function does not have an initial map, we're done.
4122   Label done;
4123   GetObjectType(result, scratch, scratch);
4124   Branch(&done, ne, scratch, Operand(MAP_TYPE));
4125 
4126   // Get the prototype from the initial map.
4127   ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
4128 
4129   if (miss_on_bound_function) {
4130     jmp(&done);
4131 
4132     // Non-instance prototype: Fetch prototype from constructor field
4133     // in initial map.
4134     bind(&non_instance);
4135     ld(result, FieldMemOperand(result, Map::kConstructorOffset));
4136   }
4137 
4138   // All done.
4139   bind(&done);
4140 }
4141 
4142 
GetObjectType(Register object,Register map,Register type_reg)4143 void MacroAssembler::GetObjectType(Register object,
4144                                    Register map,
4145                                    Register type_reg) {
4146   ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
4147   lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4148 }
4149 
4150 
4151 // -----------------------------------------------------------------------------
4152 // Runtime calls.
4153 
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)4154 void MacroAssembler::CallStub(CodeStub* stub,
4155                               TypeFeedbackId ast_id,
4156                               Condition cond,
4157                               Register r1,
4158                               const Operand& r2,
4159                               BranchDelaySlot bd) {
4160   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
4161   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4162        cond, r1, r2, bd);
4163 }
4164 
4165 
TailCallStub(CodeStub * stub,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)4166 void MacroAssembler::TailCallStub(CodeStub* stub,
4167                                   Condition cond,
4168                                   Register r1,
4169                                   const Operand& r2,
4170                                   BranchDelaySlot bd) {
4171   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4172 }
4173 
4174 
AddressOffset(ExternalReference ref0,ExternalReference ref1)4175 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4176   int64_t offset = (ref0.address() - ref1.address());
4177   DCHECK(static_cast<int>(offset) == offset);
4178   return static_cast<int>(offset);
4179 }
4180 
4181 
CallApiFunctionAndReturn(Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand return_value_operand,MemOperand * context_restore_operand)4182 void MacroAssembler::CallApiFunctionAndReturn(
4183     Register function_address,
4184     ExternalReference thunk_ref,
4185     int stack_space,
4186     MemOperand return_value_operand,
4187     MemOperand* context_restore_operand) {
4188   ExternalReference next_address =
4189       ExternalReference::handle_scope_next_address(isolate());
4190   const int kNextOffset = 0;
4191   const int kLimitOffset = AddressOffset(
4192       ExternalReference::handle_scope_limit_address(isolate()),
4193       next_address);
4194   const int kLevelOffset = AddressOffset(
4195       ExternalReference::handle_scope_level_address(isolate()),
4196       next_address);
4197 
4198   DCHECK(function_address.is(a1) || function_address.is(a2));
4199 
4200   Label profiler_disabled;
4201   Label end_profiler_check;
4202   li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
4203   lb(t9, MemOperand(t9, 0));
4204   Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
4205 
4206   // Additional parameter is the address of the actual callback.
4207   li(t9, Operand(thunk_ref));
4208   jmp(&end_profiler_check);
4209 
4210   bind(&profiler_disabled);
4211   mov(t9, function_address);
4212   bind(&end_profiler_check);
4213 
4214   // Allocate HandleScope in callee-save registers.
4215   li(s3, Operand(next_address));
4216   ld(s0, MemOperand(s3, kNextOffset));
4217   ld(s1, MemOperand(s3, kLimitOffset));
4218   ld(s2, MemOperand(s3, kLevelOffset));
4219   Daddu(s2, s2, Operand(1));
4220   sd(s2, MemOperand(s3, kLevelOffset));
4221 
4222   if (FLAG_log_timer_events) {
4223     FrameScope frame(this, StackFrame::MANUAL);
4224     PushSafepointRegisters();
4225     PrepareCallCFunction(1, a0);
4226     li(a0, Operand(ExternalReference::isolate_address(isolate())));
4227     CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
4228     PopSafepointRegisters();
4229   }
4230 
4231   // Native call returns to the DirectCEntry stub which redirects to the
4232   // return address pushed on stack (could have moved after GC).
4233   // DirectCEntry stub itself is generated early and never moves.
4234   DirectCEntryStub stub(isolate());
4235   stub.GenerateCall(this, t9);
4236 
4237   if (FLAG_log_timer_events) {
4238     FrameScope frame(this, StackFrame::MANUAL);
4239     PushSafepointRegisters();
4240     PrepareCallCFunction(1, a0);
4241     li(a0, Operand(ExternalReference::isolate_address(isolate())));
4242     CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
4243     PopSafepointRegisters();
4244   }
4245 
4246   Label promote_scheduled_exception;
4247   Label exception_handled;
4248   Label delete_allocated_handles;
4249   Label leave_exit_frame;
4250   Label return_value_loaded;
4251 
4252   // Load value from ReturnValue.
4253   ld(v0, return_value_operand);
4254   bind(&return_value_loaded);
4255 
4256   // No more valid handles (the result handle was the last one). Restore
4257   // previous handle scope.
4258   sd(s0, MemOperand(s3, kNextOffset));
4259   if (emit_debug_code()) {
4260     ld(a1, MemOperand(s3, kLevelOffset));
4261     Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
4262   }
4263   Dsubu(s2, s2, Operand(1));
4264   sd(s2, MemOperand(s3, kLevelOffset));
4265   ld(at, MemOperand(s3, kLimitOffset));
4266   Branch(&delete_allocated_handles, ne, s1, Operand(at));
4267 
4268   // Check if the function scheduled an exception.
4269   bind(&leave_exit_frame);
4270   LoadRoot(a4, Heap::kTheHoleValueRootIndex);
4271   li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
4272   ld(a5, MemOperand(at));
4273   Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
4274   bind(&exception_handled);
4275 
4276   bool restore_context = context_restore_operand != NULL;
4277   if (restore_context) {
4278     ld(cp, *context_restore_operand);
4279   }
4280   li(s0, Operand(stack_space));
4281   LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
4282 
4283   bind(&promote_scheduled_exception);
4284   {
4285     FrameScope frame(this, StackFrame::INTERNAL);
4286     CallExternalReference(
4287         ExternalReference(Runtime::kPromoteScheduledException, isolate()),
4288         0);
4289   }
4290   jmp(&exception_handled);
4291 
4292   // HandleScope limit has changed. Delete allocated extensions.
4293   bind(&delete_allocated_handles);
4294   sd(s1, MemOperand(s3, kLimitOffset));
4295   mov(s0, v0);
4296   mov(a0, v0);
4297   PrepareCallCFunction(1, s1);
4298   li(a0, Operand(ExternalReference::isolate_address(isolate())));
4299   CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4300       1);
4301   mov(v0, s0);
4302   jmp(&leave_exit_frame);
4303 }
4304 
4305 
AllowThisStubCall(CodeStub * stub)4306 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4307   return has_frame_ || !stub->SometimesSetsUpAFrame();
4308 }
4309 
4310 
IndexFromHash(Register hash,Register index)4311 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4312   // If the hash field contains an array index pick it out. The assert checks
4313   // that the constants for the maximum number of digits for an array index
4314   // cached in the hash field and the number of bits reserved for it does not
4315   // conflict.
4316   DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4317          (1 << String::kArrayIndexValueBits));
4318   DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4319 }
4320 
4321 
ObjectToDoubleFPURegister(Register object,FPURegister result,Register scratch1,Register scratch2,Register heap_number_map,Label * not_number,ObjectToDoubleFlags flags)4322 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4323                                                FPURegister result,
4324                                                Register scratch1,
4325                                                Register scratch2,
4326                                                Register heap_number_map,
4327                                                Label* not_number,
4328                                                ObjectToDoubleFlags flags) {
4329   Label done;
4330   if ((flags & OBJECT_NOT_SMI) == 0) {
4331     Label not_smi;
4332     JumpIfNotSmi(object, &not_smi);
4333     // Remove smi tag and convert to double.
4334     // dsra(scratch1, object, kSmiTagSize);
4335     dsra32(scratch1, object, 0);
4336     mtc1(scratch1, result);
4337     cvt_d_w(result, result);
4338     Branch(&done);
4339     bind(&not_smi);
4340   }
4341   // Check for heap number and load double value from it.
4342   ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4343   Branch(not_number, ne, scratch1, Operand(heap_number_map));
4344 
4345   if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4346     // If exponent is all ones the number is either a NaN or +/-Infinity.
4347     Register exponent = scratch1;
4348     Register mask_reg = scratch2;
4349     lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4350     li(mask_reg, HeapNumber::kExponentMask);
4351 
4352     And(exponent, exponent, mask_reg);
4353     Branch(not_number, eq, exponent, Operand(mask_reg));
4354   }
4355   ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4356   bind(&done);
4357 }
4358 
4359 
SmiToDoubleFPURegister(Register smi,FPURegister value,Register scratch1)4360 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4361                                             FPURegister value,
4362                                             Register scratch1) {
4363   // dsra(scratch1, smi, kSmiTagSize);
4364   dsra32(scratch1, smi, 0);
4365   mtc1(scratch1, value);
4366   cvt_d_w(value, value);
4367 }
4368 
4369 
AdduAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)4370 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4371                                              Register left,
4372                                              Register right,
4373                                              Register overflow_dst,
4374                                              Register scratch) {
4375   DCHECK(!dst.is(overflow_dst));
4376   DCHECK(!dst.is(scratch));
4377   DCHECK(!overflow_dst.is(scratch));
4378   DCHECK(!overflow_dst.is(left));
4379   DCHECK(!overflow_dst.is(right));
4380 
4381   if (left.is(right) && dst.is(left)) {
4382     DCHECK(!dst.is(t9));
4383     DCHECK(!scratch.is(t9));
4384     DCHECK(!left.is(t9));
4385     DCHECK(!right.is(t9));
4386     DCHECK(!overflow_dst.is(t9));
4387     mov(t9, right);
4388     right = t9;
4389   }
4390 
4391   if (dst.is(left)) {
4392     mov(scratch, left);  // Preserve left.
4393     daddu(dst, left, right);  // Left is overwritten.
4394     xor_(scratch, dst, scratch);  // Original left.
4395     xor_(overflow_dst, dst, right);
4396     and_(overflow_dst, overflow_dst, scratch);
4397   } else if (dst.is(right)) {
4398     mov(scratch, right);  // Preserve right.
4399     daddu(dst, left, right);  // Right is overwritten.
4400     xor_(scratch, dst, scratch);  // Original right.
4401     xor_(overflow_dst, dst, left);
4402     and_(overflow_dst, overflow_dst, scratch);
4403   } else {
4404     daddu(dst, left, right);
4405     xor_(overflow_dst, dst, left);
4406     xor_(scratch, dst, right);
4407     and_(overflow_dst, scratch, overflow_dst);
4408   }
4409 }
4410 
4411 
SubuAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)4412 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4413                                              Register left,
4414                                              Register right,
4415                                              Register overflow_dst,
4416                                              Register scratch) {
4417   DCHECK(!dst.is(overflow_dst));
4418   DCHECK(!dst.is(scratch));
4419   DCHECK(!overflow_dst.is(scratch));
4420   DCHECK(!overflow_dst.is(left));
4421   DCHECK(!overflow_dst.is(right));
4422   DCHECK(!scratch.is(left));
4423   DCHECK(!scratch.is(right));
4424 
4425   // This happens with some crankshaft code. Since Subu works fine if
4426   // left == right, let's not make that restriction here.
4427   if (left.is(right)) {
4428     mov(dst, zero_reg);
4429     mov(overflow_dst, zero_reg);
4430     return;
4431   }
4432 
4433   if (dst.is(left)) {
4434     mov(scratch, left);  // Preserve left.
4435     dsubu(dst, left, right);  // Left is overwritten.
4436     xor_(overflow_dst, dst, scratch);  // scratch is original left.
4437     xor_(scratch, scratch, right);  // scratch is original left.
4438     and_(overflow_dst, scratch, overflow_dst);
4439   } else if (dst.is(right)) {
4440     mov(scratch, right);  // Preserve right.
4441     dsubu(dst, left, right);  // Right is overwritten.
4442     xor_(overflow_dst, dst, left);
4443     xor_(scratch, left, scratch);  // Original right.
4444     and_(overflow_dst, scratch, overflow_dst);
4445   } else {
4446     dsubu(dst, left, right);
4447     xor_(overflow_dst, dst, left);
4448     xor_(scratch, left, right);
4449     and_(overflow_dst, scratch, overflow_dst);
4450   }
4451 }
4452 
4453 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)4454 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4455                                  int num_arguments,
4456                                  SaveFPRegsMode save_doubles) {
4457   // All parameters are on the stack. v0 has the return value after call.
4458 
4459   // If the expected number of arguments of the runtime function is
4460   // constant, we check that the actual number of arguments match the
4461   // expectation.
4462   CHECK(f->nargs < 0 || f->nargs == num_arguments);
4463 
4464   // TODO(1236192): Most runtime routines don't need the number of
4465   // arguments passed in because it is constant. At some point we
4466   // should remove this need and make the runtime routine entry code
4467   // smarter.
4468   PrepareCEntryArgs(num_arguments);
4469   PrepareCEntryFunction(ExternalReference(f, isolate()));
4470   CEntryStub stub(isolate(), 1, save_doubles);
4471   CallStub(&stub);
4472 }
4473 
4474 
CallExternalReference(const ExternalReference & ext,int num_arguments,BranchDelaySlot bd)4475 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4476                                            int num_arguments,
4477                                            BranchDelaySlot bd) {
4478   PrepareCEntryArgs(num_arguments);
4479   PrepareCEntryFunction(ext);
4480 
4481   CEntryStub stub(isolate(), 1);
4482   CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4483 }
4484 
4485 
TailCallExternalReference(const ExternalReference & ext,int num_arguments,int result_size)4486 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4487                                                int num_arguments,
4488                                                int result_size) {
4489   // TODO(1236192): Most runtime routines don't need the number of
4490   // arguments passed in because it is constant. At some point we
4491   // should remove this need and make the runtime routine entry code
4492   // smarter.
4493   PrepareCEntryArgs(num_arguments);
4494   JumpToExternalReference(ext);
4495 }
4496 
4497 
TailCallRuntime(Runtime::FunctionId fid,int num_arguments,int result_size)4498 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4499                                      int num_arguments,
4500                                      int result_size) {
4501   TailCallExternalReference(ExternalReference(fid, isolate()),
4502                             num_arguments,
4503                             result_size);
4504 }
4505 
4506 
JumpToExternalReference(const ExternalReference & builtin,BranchDelaySlot bd)4507 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4508                                              BranchDelaySlot bd) {
4509   PrepareCEntryFunction(builtin);
4510   CEntryStub stub(isolate(), 1);
4511   Jump(stub.GetCode(),
4512        RelocInfo::CODE_TARGET,
4513        al,
4514        zero_reg,
4515        Operand(zero_reg),
4516        bd);
4517 }
4518 
4519 
InvokeBuiltin(Builtins::JavaScript id,InvokeFlag flag,const CallWrapper & call_wrapper)4520 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4521                                    InvokeFlag flag,
4522                                    const CallWrapper& call_wrapper) {
4523   // You can't call a builtin without a valid frame.
4524   DCHECK(flag == JUMP_FUNCTION || has_frame());
4525 
4526   GetBuiltinEntry(t9, id);
4527   if (flag == CALL_FUNCTION) {
4528     call_wrapper.BeforeCall(CallSize(t9));
4529     Call(t9);
4530     call_wrapper.AfterCall();
4531   } else {
4532     DCHECK(flag == JUMP_FUNCTION);
4533     Jump(t9);
4534   }
4535 }
4536 
4537 
GetBuiltinFunction(Register target,Builtins::JavaScript id)4538 void MacroAssembler::GetBuiltinFunction(Register target,
4539                                         Builtins::JavaScript id) {
4540   // Load the builtins object into target register.
4541   ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4542   ld(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4543   // Load the JavaScript builtin function from the builtins object.
4544   ld(target, FieldMemOperand(target,
4545                           JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4546 }
4547 
4548 
GetBuiltinEntry(Register target,Builtins::JavaScript id)4549 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4550   DCHECK(!target.is(a1));
4551   GetBuiltinFunction(a1, id);
4552   // Load the code entry point from the builtins object.
4553   ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4554 }
4555 
4556 
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)4557 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4558                                 Register scratch1, Register scratch2) {
4559   if (FLAG_native_code_counters && counter->Enabled()) {
4560     li(scratch1, Operand(value));
4561     li(scratch2, Operand(ExternalReference(counter)));
4562     sd(scratch1, MemOperand(scratch2));
4563   }
4564 }
4565 
4566 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)4567 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4568                                       Register scratch1, Register scratch2) {
4569   DCHECK(value > 0);
4570   if (FLAG_native_code_counters && counter->Enabled()) {
4571     li(scratch2, Operand(ExternalReference(counter)));
4572     ld(scratch1, MemOperand(scratch2));
4573     Daddu(scratch1, scratch1, Operand(value));
4574     sd(scratch1, MemOperand(scratch2));
4575   }
4576 }
4577 
4578 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)4579 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4580                                       Register scratch1, Register scratch2) {
4581   DCHECK(value > 0);
4582   if (FLAG_native_code_counters && counter->Enabled()) {
4583     li(scratch2, Operand(ExternalReference(counter)));
4584     ld(scratch1, MemOperand(scratch2));
4585     Dsubu(scratch1, scratch1, Operand(value));
4586     sd(scratch1, MemOperand(scratch2));
4587   }
4588 }
4589 
4590 
4591 // -----------------------------------------------------------------------------
4592 // Debugging.
4593 
Assert(Condition cc,BailoutReason reason,Register rs,Operand rt)4594 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4595                             Register rs, Operand rt) {
4596   if (emit_debug_code())
4597     Check(cc, reason, rs, rt);
4598 }
4599 
4600 
AssertFastElements(Register elements)4601 void MacroAssembler::AssertFastElements(Register elements) {
4602   if (emit_debug_code()) {
4603     DCHECK(!elements.is(at));
4604     Label ok;
4605     push(elements);
4606     ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4607     LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4608     Branch(&ok, eq, elements, Operand(at));
4609     LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4610     Branch(&ok, eq, elements, Operand(at));
4611     LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4612     Branch(&ok, eq, elements, Operand(at));
4613     Abort(kJSObjectWithFastElementsMapHasSlowElements);
4614     bind(&ok);
4615     pop(elements);
4616   }
4617 }
4618 
4619 
Check(Condition cc,BailoutReason reason,Register rs,Operand rt)4620 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4621                            Register rs, Operand rt) {
4622   Label L;
4623   Branch(&L, cc, rs, rt);
4624   Abort(reason);
4625   // Will not return here.
4626   bind(&L);
4627 }
4628 
4629 
Abort(BailoutReason reason)4630 void MacroAssembler::Abort(BailoutReason reason) {
4631   Label abort_start;
4632   bind(&abort_start);
4633 #ifdef DEBUG
4634   const char* msg = GetBailoutReason(reason);
4635   if (msg != NULL) {
4636     RecordComment("Abort message: ");
4637     RecordComment(msg);
4638   }
4639 
4640   if (FLAG_trap_on_abort) {
4641     stop(msg);
4642     return;
4643   }
4644 #endif
4645 
4646   li(a0, Operand(Smi::FromInt(reason)));
4647   push(a0);
4648   // Disable stub call restrictions to always allow calls to abort.
4649   if (!has_frame_) {
4650     // We don't actually want to generate a pile of code for this, so just
4651     // claim there is a stack frame, without generating one.
4652     FrameScope scope(this, StackFrame::NONE);
4653     CallRuntime(Runtime::kAbort, 1);
4654   } else {
4655     CallRuntime(Runtime::kAbort, 1);
4656   }
4657   // Will not return here.
4658   if (is_trampoline_pool_blocked()) {
4659     // If the calling code cares about the exact number of
4660     // instructions generated, we insert padding here to keep the size
4661     // of the Abort macro constant.
4662     // Currently in debug mode with debug_code enabled the number of
4663     // generated instructions is 10, so we use this as a maximum value.
4664     static const int kExpectedAbortInstructions = 10;
4665     int abort_instructions = InstructionsGeneratedSince(&abort_start);
4666     DCHECK(abort_instructions <= kExpectedAbortInstructions);
4667     while (abort_instructions++ < kExpectedAbortInstructions) {
4668       nop();
4669     }
4670   }
4671 }
4672 
4673 
LoadContext(Register dst,int context_chain_length)4674 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4675   if (context_chain_length > 0) {
4676     // Move up the chain of contexts to the context containing the slot.
4677     ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4678     for (int i = 1; i < context_chain_length; i++) {
4679       ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4680     }
4681   } else {
4682     // Slot is in the current function context.  Move it into the
4683     // destination register in case we store into it (the write barrier
4684     // cannot be allowed to destroy the context in esi).
4685     Move(dst, cp);
4686   }
4687 }
4688 
4689 
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)4690 void MacroAssembler::LoadTransitionedArrayMapConditional(
4691     ElementsKind expected_kind,
4692     ElementsKind transitioned_kind,
4693     Register map_in_out,
4694     Register scratch,
4695     Label* no_map_match) {
4696   // Load the global or builtins object from the current context.
4697   ld(scratch,
4698      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4699   ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4700 
4701   // Check that the function's map is the same as the expected cached map.
4702   ld(scratch,
4703      MemOperand(scratch,
4704                 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4705   size_t offset = expected_kind * kPointerSize +
4706       FixedArrayBase::kHeaderSize;
4707   ld(at, FieldMemOperand(scratch, offset));
4708   Branch(no_map_match, ne, map_in_out, Operand(at));
4709 
4710   // Use the transitioned cached map.
4711   offset = transitioned_kind * kPointerSize +
4712       FixedArrayBase::kHeaderSize;
4713   ld(map_in_out, FieldMemOperand(scratch, offset));
4714 }
4715 
4716 
LoadGlobalFunction(int index,Register function)4717 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4718   // Load the global or builtins object from the current context.
4719   ld(function,
4720      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4721   // Load the native context from the global or builtins object.
4722   ld(function, FieldMemOperand(function,
4723                                GlobalObject::kNativeContextOffset));
4724   // Load the function from the native context.
4725   ld(function, MemOperand(function, Context::SlotOffset(index)));
4726 }
4727 
4728 
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)4729 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4730                                                   Register map,
4731                                                   Register scratch) {
4732   // Load the initial map. The global functions all have initial maps.
4733   ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4734   if (emit_debug_code()) {
4735     Label ok, fail;
4736     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4737     Branch(&ok);
4738     bind(&fail);
4739     Abort(kGlobalFunctionsMustHaveInitialMap);
4740     bind(&ok);
4741   }
4742 }
4743 
4744 
StubPrologue()4745 void MacroAssembler::StubPrologue() {
4746     Push(ra, fp, cp);
4747     Push(Smi::FromInt(StackFrame::STUB));
4748     // Adjust FP to point to saved FP.
4749     Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4750 }
4751 
4752 
Prologue(bool code_pre_aging)4753 void MacroAssembler::Prologue(bool code_pre_aging) {
4754   PredictableCodeSizeScope predictible_code_size_scope(
4755       this, kNoCodeAgeSequenceLength);
4756   // The following three instructions must remain together and unmodified
4757   // for code aging to work properly.
4758   if (code_pre_aging) {
4759     // Pre-age the code.
4760     Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4761     nop(Assembler::CODE_AGE_MARKER_NOP);
4762     // Load the stub address to t9 and call it,
4763     // GetCodeAgeAndParity() extracts the stub address from this instruction.
4764     li(t9,
4765        Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
4766        ADDRESS_LOAD);
4767     nop();  // Prevent jalr to jal optimization.
4768     jalr(t9, a0);
4769     nop();  // Branch delay slot nop.
4770     nop();  // Pad the empty space.
4771   } else {
4772     Push(ra, fp, cp, a1);
4773     nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4774     nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4775     nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4776     // Adjust fp to point to caller's fp.
4777     Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4778   }
4779 }
4780 
4781 
EnterFrame(StackFrame::Type type)4782 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4783   daddiu(sp, sp, -5 * kPointerSize);
4784   li(t8, Operand(Smi::FromInt(type)));
4785   li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4786   sd(ra, MemOperand(sp, 4 * kPointerSize));
4787   sd(fp, MemOperand(sp, 3 * kPointerSize));
4788   sd(cp, MemOperand(sp, 2 * kPointerSize));
4789   sd(t8, MemOperand(sp, 1 * kPointerSize));
4790   sd(t9, MemOperand(sp, 0 * kPointerSize));
4791   // Adjust FP to point to saved FP.
4792   Daddu(fp, sp,
4793        Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4794 }
4795 
4796 
LeaveFrame(StackFrame::Type type)4797 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4798   mov(sp, fp);
4799   ld(fp, MemOperand(sp, 0 * kPointerSize));
4800   ld(ra, MemOperand(sp, 1 * kPointerSize));
4801   daddiu(sp, sp, 2 * kPointerSize);
4802 }
4803 
4804 
EnterExitFrame(bool save_doubles,int stack_space)4805 void MacroAssembler::EnterExitFrame(bool save_doubles,
4806                                     int stack_space) {
4807   // Set up the frame structure on the stack.
4808   STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4809   STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4810   STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4811 
4812   // This is how the stack will look:
4813   // fp + 2 (==kCallerSPDisplacement) - old stack's end
4814   // [fp + 1 (==kCallerPCOffset)] - saved old ra
4815   // [fp + 0 (==kCallerFPOffset)] - saved old fp
4816   // [fp - 1 (==kSPOffset)] - sp of the called function
4817   // [fp - 2 (==kCodeOffset)] - CodeObject
4818   // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4819   //   new stack (will contain saved ra)
4820 
4821   // Save registers.
4822   daddiu(sp, sp, -4 * kPointerSize);
4823   sd(ra, MemOperand(sp, 3 * kPointerSize));
4824   sd(fp, MemOperand(sp, 2 * kPointerSize));
4825   daddiu(fp, sp, 2 * kPointerSize);  // Set up new frame pointer.
4826 
4827   if (emit_debug_code()) {
4828     sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4829   }
4830 
4831   // Accessed from ExitFrame::code_slot.
4832   li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4833   sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4834 
4835   // Save the frame pointer and the context in top.
4836   li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4837   sd(fp, MemOperand(t8));
4838   li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4839   sd(cp, MemOperand(t8));
4840 
4841   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4842   if (save_doubles) {
4843     // The stack is already aligned to 0 modulo 8 for stores with sdc1.
4844     int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
4845     int space = kNumOfSavedRegisters * kDoubleSize ;
4846     Dsubu(sp, sp, Operand(space));
4847     // Remember: we only need to save every 2nd double FPU value.
4848     for (int i = 0; i < kNumOfSavedRegisters; i++) {
4849       FPURegister reg = FPURegister::from_code(2 * i);
4850       sdc1(reg, MemOperand(sp, i * kDoubleSize));
4851     }
4852   }
4853 
4854   // Reserve place for the return address, stack space and an optional slot
4855   // (used by the DirectCEntryStub to hold the return value if a struct is
4856   // returned) and align the frame preparing for calling the runtime function.
4857   DCHECK(stack_space >= 0);
4858   Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4859   if (frame_alignment > 0) {
4860     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4861     And(sp, sp, Operand(-frame_alignment));  // Align stack.
4862   }
4863 
4864   // Set the exit frame sp value to point just before the return address
4865   // location.
4866   daddiu(at, sp, kPointerSize);
4867   sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4868 }
4869 
4870 
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool do_return)4871 void MacroAssembler::LeaveExitFrame(bool save_doubles,
4872                                     Register argument_count,
4873                                     bool restore_context,
4874                                     bool do_return) {
4875   // Optionally restore all double registers.
4876   if (save_doubles) {
4877     // Remember: we only need to restore every 2nd double FPU value.
4878     int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
4879     Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize +
4880         kNumOfSavedRegisters * kDoubleSize));
4881     for (int i = 0; i < kNumOfSavedRegisters; i++) {
4882       FPURegister reg = FPURegister::from_code(2 * i);
4883       ldc1(reg, MemOperand(t8, i  * kDoubleSize));
4884     }
4885   }
4886 
4887   // Clear top frame.
4888   li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4889   sd(zero_reg, MemOperand(t8));
4890 
4891   // Restore current context from top and clear it in debug mode.
4892   if (restore_context) {
4893     li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4894     ld(cp, MemOperand(t8));
4895   }
4896 #ifdef DEBUG
4897   li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4898   sd(a3, MemOperand(t8));
4899 #endif
4900 
4901   // Pop the arguments, restore registers, and return.
4902   mov(sp, fp);  // Respect ABI stack constraint.
4903   ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4904   ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4905 
4906   if (argument_count.is_valid()) {
4907     dsll(t8, argument_count, kPointerSizeLog2);
4908     daddu(sp, sp, t8);
4909   }
4910 
4911   if (do_return) {
4912     Ret(USE_DELAY_SLOT);
4913     // If returning, the instruction in the delay slot will be the addiu below.
4914   }
4915   daddiu(sp, sp, 2 * kPointerSize);
4916 }
4917 
4918 
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)4919 void MacroAssembler::InitializeNewString(Register string,
4920                                          Register length,
4921                                          Heap::RootListIndex map_index,
4922                                          Register scratch1,
4923                                          Register scratch2) {
4924   // dsll(scratch1, length, kSmiTagSize);
4925   dsll32(scratch1, length, 0);
4926   LoadRoot(scratch2, map_index);
4927   sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
4928   li(scratch1, Operand(String::kEmptyHashField));
4929   sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4930   sd(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4931 }
4932 
4933 
ActivationFrameAlignment()4934 int MacroAssembler::ActivationFrameAlignment() {
4935 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
4936   // Running on the real platform. Use the alignment as mandated by the local
4937   // environment.
4938   // Note: This will break if we ever start generating snapshots on one Mips
4939   // platform for another Mips platform with a different alignment.
4940   return base::OS::ActivationFrameAlignment();
4941 #else  // V8_HOST_ARCH_MIPS
4942   // If we are using the simulator then we should always align to the expected
4943   // alignment. As the simulator is used to generate snapshots we do not know
4944   // if the target platform will need alignment, so this is controlled from a
4945   // flag.
4946   return FLAG_sim_stack_alignment;
4947 #endif  // V8_HOST_ARCH_MIPS
4948 }
4949 
4950 
AssertStackIsAligned()4951 void MacroAssembler::AssertStackIsAligned() {
4952   if (emit_debug_code()) {
4953       const int frame_alignment = ActivationFrameAlignment();
4954       const int frame_alignment_mask = frame_alignment - 1;
4955 
4956       if (frame_alignment > kPointerSize) {
4957         Label alignment_as_expected;
4958         DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4959         andi(at, sp, frame_alignment_mask);
4960         Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4961         // Don't use Check here, as it will call Runtime_Abort re-entering here.
4962         stop("Unexpected stack alignment");
4963         bind(&alignment_as_expected);
4964       }
4965     }
4966 }
4967 
4968 
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)4969 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4970     Register reg,
4971     Register scratch,
4972     Label* not_power_of_two_or_zero) {
4973   Dsubu(scratch, reg, Operand(1));
4974   Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4975          scratch, Operand(zero_reg));
4976   and_(at, scratch, reg);  // In the delay slot.
4977   Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4978 }
4979 
4980 
SmiTagCheckOverflow(Register reg,Register overflow)4981 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4982   DCHECK(!reg.is(overflow));
4983   mov(overflow, reg);  // Save original value.
4984   SmiTag(reg);
4985   xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
4986 }
4987 
4988 
SmiTagCheckOverflow(Register dst,Register src,Register overflow)4989 void MacroAssembler::SmiTagCheckOverflow(Register dst,
4990                                          Register src,
4991                                          Register overflow) {
4992   if (dst.is(src)) {
4993     // Fall back to slower case.
4994     SmiTagCheckOverflow(dst, overflow);
4995   } else {
4996     DCHECK(!dst.is(src));
4997     DCHECK(!dst.is(overflow));
4998     DCHECK(!src.is(overflow));
4999     SmiTag(dst, src);
5000     xor_(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
5001   }
5002 }
5003 
5004 
SmiLoadUntag(Register dst,MemOperand src)5005 void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
5006   if (SmiValuesAre32Bits()) {
5007     lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5008   } else {
5009     lw(dst, src);
5010     SmiUntag(dst);
5011   }
5012 }
5013 
5014 
SmiLoadScale(Register dst,MemOperand src,int scale)5015 void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
5016   if (SmiValuesAre32Bits()) {
5017     // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
5018     lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5019     dsll(dst, dst, scale);
5020   } else {
5021     lw(dst, src);
5022     DCHECK(scale >= kSmiTagSize);
5023     sll(dst, dst, scale - kSmiTagSize);
5024   }
5025 }
5026 
5027 
5028 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
SmiLoadWithScale(Register d_smi,Register d_scaled,MemOperand src,int scale)5029 void MacroAssembler::SmiLoadWithScale(Register d_smi,
5030                                       Register d_scaled,
5031                                       MemOperand src,
5032                                       int scale) {
5033   if (SmiValuesAre32Bits()) {
5034     ld(d_smi, src);
5035     dsra(d_scaled, d_smi, kSmiShift - scale);
5036   } else {
5037     lw(d_smi, src);
5038     DCHECK(scale >= kSmiTagSize);
5039     sll(d_scaled, d_smi, scale - kSmiTagSize);
5040   }
5041 }
5042 
5043 
5044 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
SmiLoadUntagWithScale(Register d_int,Register d_scaled,MemOperand src,int scale)5045 void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
5046                                            Register d_scaled,
5047                                            MemOperand src,
5048                                            int scale) {
5049   if (SmiValuesAre32Bits()) {
5050     lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
5051     dsll(d_scaled, d_int, scale);
5052   } else {
5053     lw(d_int, src);
5054     // Need both the int and the scaled in, so use two instructions.
5055     SmiUntag(d_int);
5056     sll(d_scaled, d_int, scale);
5057   }
5058 }
5059 
5060 
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)5061 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5062                                        Register src,
5063                                        Label* smi_case) {
5064   // DCHECK(!dst.is(src));
5065   JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5066   SmiUntag(dst, src);
5067 }
5068 
5069 
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)5070 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5071                                           Register src,
5072                                           Label* non_smi_case) {
5073   // DCHECK(!dst.is(src));
5074   JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5075   SmiUntag(dst, src);
5076 }
5077 
JumpIfSmi(Register value,Label * smi_label,Register scratch,BranchDelaySlot bd)5078 void MacroAssembler::JumpIfSmi(Register value,
5079                                Label* smi_label,
5080                                Register scratch,
5081                                BranchDelaySlot bd) {
5082   DCHECK_EQ(0, kSmiTag);
5083   andi(scratch, value, kSmiTagMask);
5084   Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5085 }
5086 
JumpIfNotSmi(Register value,Label * not_smi_label,Register scratch,BranchDelaySlot bd)5087 void MacroAssembler::JumpIfNotSmi(Register value,
5088                                   Label* not_smi_label,
5089                                   Register scratch,
5090                                   BranchDelaySlot bd) {
5091   DCHECK_EQ(0, kSmiTag);
5092   andi(scratch, value, kSmiTagMask);
5093   Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5094 }
5095 
5096 
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)5097 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5098                                       Register reg2,
5099                                       Label* on_not_both_smi) {
5100   STATIC_ASSERT(kSmiTag == 0);
5101   // TODO(plind): Find some better to fix this assert issue.
5102 #if defined(__APPLE__)
5103   DCHECK_EQ(1, kSmiTagMask);
5104 #else
5105   DCHECK_EQ((uint64_t)1, kSmiTagMask);
5106 #endif
5107   or_(at, reg1, reg2);
5108   JumpIfNotSmi(at, on_not_both_smi);
5109 }
5110 
5111 
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)5112 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5113                                      Register reg2,
5114                                      Label* on_either_smi) {
5115   STATIC_ASSERT(kSmiTag == 0);
5116   // TODO(plind): Find some better to fix this assert issue.
5117 #if defined(__APPLE__)
5118   DCHECK_EQ(1, kSmiTagMask);
5119 #else
5120   DCHECK_EQ((uint64_t)1, kSmiTagMask);
5121 #endif
5122   // Both Smi tags must be 1 (not Smi).
5123   and_(at, reg1, reg2);
5124   JumpIfSmi(at, on_either_smi);
5125 }
5126 
5127 
AssertNotSmi(Register object)5128 void MacroAssembler::AssertNotSmi(Register object) {
5129   if (emit_debug_code()) {
5130     STATIC_ASSERT(kSmiTag == 0);
5131     andi(at, object, kSmiTagMask);
5132     Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5133   }
5134 }
5135 
5136 
AssertSmi(Register object)5137 void MacroAssembler::AssertSmi(Register object) {
5138   if (emit_debug_code()) {
5139     STATIC_ASSERT(kSmiTag == 0);
5140     andi(at, object, kSmiTagMask);
5141     Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5142   }
5143 }
5144 
5145 
AssertString(Register object)5146 void MacroAssembler::AssertString(Register object) {
5147   if (emit_debug_code()) {
5148     STATIC_ASSERT(kSmiTag == 0);
5149     SmiTst(object, a4);
5150     Check(ne, kOperandIsASmiAndNotAString, a4, Operand(zero_reg));
5151     push(object);
5152     ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5153     lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5154     Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
5155     pop(object);
5156   }
5157 }
5158 
5159 
AssertName(Register object)5160 void MacroAssembler::AssertName(Register object) {
5161   if (emit_debug_code()) {
5162     STATIC_ASSERT(kSmiTag == 0);
5163     SmiTst(object, a4);
5164     Check(ne, kOperandIsASmiAndNotAName, a4, Operand(zero_reg));
5165     push(object);
5166     ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5167     lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5168     Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
5169     pop(object);
5170   }
5171 }
5172 
5173 
AssertUndefinedOrAllocationSite(Register object,Register scratch)5174 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5175                                                      Register scratch) {
5176   if (emit_debug_code()) {
5177     Label done_checking;
5178     AssertNotSmi(object);
5179     LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5180     Branch(&done_checking, eq, object, Operand(scratch));
5181     push(object);
5182     ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5183     LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5184     Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
5185     pop(object);
5186     bind(&done_checking);
5187   }
5188 }
5189 
5190 
AssertIsRoot(Register reg,Heap::RootListIndex index)5191 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5192   if (emit_debug_code()) {
5193     DCHECK(!reg.is(at));
5194     LoadRoot(at, index);
5195     Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5196   }
5197 }
5198 
5199 
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)5200 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5201                                          Register heap_number_map,
5202                                          Register scratch,
5203                                          Label* on_not_heap_number) {
5204   ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5205   AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5206   Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
5207 }
5208 
5209 
LookupNumberStringCache(Register object,Register result,Register scratch1,Register scratch2,Register scratch3,Label * not_found)5210 void MacroAssembler::LookupNumberStringCache(Register object,
5211                                              Register result,
5212                                              Register scratch1,
5213                                              Register scratch2,
5214                                              Register scratch3,
5215                                              Label* not_found) {
5216   // Use of registers. Register result is used as a temporary.
5217   Register number_string_cache = result;
5218   Register mask = scratch3;
5219 
5220   // Load the number string cache.
5221   LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
5222 
5223   // Make the hash mask from the length of the number string cache. It
5224   // contains two elements (number and string) for each cache entry.
5225   ld(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
5226   // Divide length by two (length is a smi).
5227   // dsra(mask, mask, kSmiTagSize + 1);
5228   dsra32(mask, mask, 1);
5229   Daddu(mask, mask, -1);  // Make mask.
5230 
5231   // Calculate the entry in the number string cache. The hash value in the
5232   // number string cache for smis is just the smi value, and the hash for
5233   // doubles is the xor of the upper and lower words. See
5234   // Heap::GetNumberStringCache.
5235   Label is_smi;
5236   Label load_result_from_cache;
5237   JumpIfSmi(object, &is_smi);
5238   CheckMap(object,
5239            scratch1,
5240            Heap::kHeapNumberMapRootIndex,
5241            not_found,
5242            DONT_DO_SMI_CHECK);
5243 
5244   STATIC_ASSERT(8 == kDoubleSize);
5245   Daddu(scratch1,
5246        object,
5247        Operand(HeapNumber::kValueOffset - kHeapObjectTag));
5248   ld(scratch2, MemOperand(scratch1, kPointerSize));
5249   ld(scratch1, MemOperand(scratch1, 0));
5250   Xor(scratch1, scratch1, Operand(scratch2));
5251   And(scratch1, scratch1, Operand(mask));
5252 
5253   // Calculate address of entry in string cache: each entry consists
5254   // of two pointer sized fields.
5255   dsll(scratch1, scratch1, kPointerSizeLog2 + 1);
5256   Daddu(scratch1, number_string_cache, scratch1);
5257 
5258   Register probe = mask;
5259   ld(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
5260   JumpIfSmi(probe, not_found);
5261   ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
5262   ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
5263   BranchF(&load_result_from_cache, NULL, eq, f12, f14);
5264   Branch(not_found);
5265 
5266   bind(&is_smi);
5267   Register scratch = scratch1;
5268   // dsra(scratch, object, 1);   // Shift away the tag.
5269   dsra32(scratch, scratch, 0);
5270   And(scratch, mask, Operand(scratch));
5271 
5272   // Calculate address of entry in string cache: each entry consists
5273   // of two pointer sized fields.
5274   dsll(scratch, scratch, kPointerSizeLog2 + 1);
5275   Daddu(scratch, number_string_cache, scratch);
5276 
5277   // Check if the entry is the smi we are looking for.
5278   ld(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
5279   Branch(not_found, ne, object, Operand(probe));
5280 
5281   // Get the result from the cache.
5282   bind(&load_result_from_cache);
5283   ld(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
5284 
5285   IncrementCounter(isolate()->counters()->number_to_string_native(),
5286                    1,
5287                    scratch1,
5288                    scratch2);
5289 }
5290 
5291 
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5292 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5293     Register first, Register second, Register scratch1, Register scratch2,
5294     Label* failure) {
5295   // Test that both first and second are sequential one-byte strings.
5296   // Assume that they are non-smis.
5297   ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5298   ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5299   lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5300   lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5301 
5302   JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5303                                                  scratch2, failure);
5304 }
5305 
5306 
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5307 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5308                                                            Register second,
5309                                                            Register scratch1,
5310                                                            Register scratch2,
5311                                                            Label* failure) {
5312   // Check that neither is a smi.
5313   STATIC_ASSERT(kSmiTag == 0);
5314   And(scratch1, first, Operand(second));
5315   JumpIfSmi(scratch1, failure);
5316   JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5317                                                scratch2, failure);
5318 }
5319 
5320 
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5321 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5322     Register first, Register second, Register scratch1, Register scratch2,
5323     Label* failure) {
5324   const int kFlatOneByteStringMask =
5325       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5326   const int kFlatOneByteStringTag =
5327       kStringTag | kOneByteStringTag | kSeqStringTag;
5328   DCHECK(kFlatOneByteStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
5329   andi(scratch1, first, kFlatOneByteStringMask);
5330   Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
5331   andi(scratch2, second, kFlatOneByteStringMask);
5332   Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
5333 }
5334 
5335 
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)5336 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
5337                                                               Register scratch,
5338                                                               Label* failure) {
5339   const int kFlatOneByteStringMask =
5340       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5341   const int kFlatOneByteStringTag =
5342       kStringTag | kOneByteStringTag | kSeqStringTag;
5343   And(scratch, type, Operand(kFlatOneByteStringMask));
5344   Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
5345 }
5346 
5347 
5348 static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
5349 
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)5350 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5351                                               int num_double_arguments) {
5352   int stack_passed_words = 0;
5353   num_reg_arguments += 2 * num_double_arguments;
5354 
5355   // O32: Up to four simple arguments are passed in registers a0..a3.
5356   // N64: Up to eight simple arguments are passed in registers a0..a7.
5357   if (num_reg_arguments > kRegisterPassedArguments) {
5358     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5359   }
5360   stack_passed_words += kCArgSlotCount;
5361   return stack_passed_words;
5362 }
5363 
5364 
EmitSeqStringSetCharCheck(Register string,Register index,Register value,Register scratch,uint32_t encoding_mask)5365 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5366                                                Register index,
5367                                                Register value,
5368                                                Register scratch,
5369                                                uint32_t encoding_mask) {
5370   Label is_object;
5371   SmiTst(string, at);
5372   Check(ne, kNonObject, at, Operand(zero_reg));
5373 
5374   ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
5375   lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5376 
5377   andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5378   li(scratch, Operand(encoding_mask));
5379   Check(eq, kUnexpectedStringType, at, Operand(scratch));
5380 
5381   // TODO(plind): requires Smi size check code for mips32.
5382 
5383   ld(at, FieldMemOperand(string, String::kLengthOffset));
5384   Check(lt, kIndexIsTooLarge, index, Operand(at));
5385 
5386   DCHECK(Smi::FromInt(0) == 0);
5387   Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5388 }
5389 
5390 
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)5391 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5392                                           int num_double_arguments,
5393                                           Register scratch) {
5394   int frame_alignment = ActivationFrameAlignment();
5395 
5396   // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
5397   // O32: Up to four simple arguments are passed in registers a0..a3.
5398   // Those four arguments must have reserved argument slots on the stack for
5399   // mips, even though those argument slots are not normally used.
5400   // Both ABIs: Remaining arguments are pushed on the stack, above (higher
5401   // address than) the (O32) argument slots. (arg slot calculation handled by
5402   // CalculateStackPassedWords()).
5403   int stack_passed_arguments = CalculateStackPassedWords(
5404       num_reg_arguments, num_double_arguments);
5405   if (frame_alignment > kPointerSize) {
5406     // Make stack end at alignment and make room for num_arguments - 4 words
5407     // and the original value of sp.
5408     mov(scratch, sp);
5409     Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5410     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5411     And(sp, sp, Operand(-frame_alignment));
5412     sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5413   } else {
5414     Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5415   }
5416 }
5417 
5418 
PrepareCallCFunction(int num_reg_arguments,Register scratch)5419 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5420                                           Register scratch) {
5421   PrepareCallCFunction(num_reg_arguments, 0, scratch);
5422 }
5423 
5424 
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)5425 void MacroAssembler::CallCFunction(ExternalReference function,
5426                                    int num_reg_arguments,
5427                                    int num_double_arguments) {
5428   li(t8, Operand(function));
5429   CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5430 }
5431 
5432 
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)5433 void MacroAssembler::CallCFunction(Register function,
5434                                    int num_reg_arguments,
5435                                    int num_double_arguments) {
5436   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5437 }
5438 
5439 
CallCFunction(ExternalReference function,int num_arguments)5440 void MacroAssembler::CallCFunction(ExternalReference function,
5441                                    int num_arguments) {
5442   CallCFunction(function, num_arguments, 0);
5443 }
5444 
5445 
CallCFunction(Register function,int num_arguments)5446 void MacroAssembler::CallCFunction(Register function,
5447                                    int num_arguments) {
5448   CallCFunction(function, num_arguments, 0);
5449 }
5450 
5451 
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)5452 void MacroAssembler::CallCFunctionHelper(Register function,
5453                                          int num_reg_arguments,
5454                                          int num_double_arguments) {
5455   DCHECK(has_frame());
5456   // Make sure that the stack is aligned before calling a C function unless
5457   // running in the simulator. The simulator has its own alignment check which
5458   // provides more information.
5459   // The argument stots are presumed to have been set up by
5460   // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5461 
5462 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
5463   if (emit_debug_code()) {
5464     int frame_alignment = base::OS::ActivationFrameAlignment();
5465     int frame_alignment_mask = frame_alignment - 1;
5466     if (frame_alignment > kPointerSize) {
5467       DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5468       Label alignment_as_expected;
5469       And(at, sp, Operand(frame_alignment_mask));
5470       Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5471       // Don't use Check here, as it will call Runtime_Abort possibly
5472       // re-entering here.
5473       stop("Unexpected alignment in CallCFunction");
5474       bind(&alignment_as_expected);
5475     }
5476   }
5477 #endif  // V8_HOST_ARCH_MIPS
5478 
5479   // Just call directly. The function called cannot cause a GC, or
5480   // allow preemption, so the return address in the link register
5481   // stays correct.
5482 
5483   if (!function.is(t9)) {
5484     mov(t9, function);
5485     function = t9;
5486   }
5487 
5488   Call(function);
5489 
5490   int stack_passed_arguments = CalculateStackPassedWords(
5491       num_reg_arguments, num_double_arguments);
5492 
5493   if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5494     ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5495   } else {
5496     Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5497   }
5498 }
5499 
5500 
5501 #undef BRANCH_ARGS_CHECK
5502 
5503 
PatchRelocatedValue(Register li_location,Register scratch,Register new_value)5504 void MacroAssembler::PatchRelocatedValue(Register li_location,
5505                                          Register scratch,
5506                                          Register new_value) {
5507   lwu(scratch, MemOperand(li_location));
5508   // At this point scratch is a lui(at, ...) instruction.
5509   if (emit_debug_code()) {
5510     And(scratch, scratch, kOpcodeMask);
5511     Check(eq, kTheInstructionToPatchShouldBeALui,
5512         scratch, Operand(LUI));
5513     lwu(scratch, MemOperand(li_location));
5514   }
5515   dsrl32(t9, new_value, 0);
5516   Ins(scratch, t9, 0, kImm16Bits);
5517   sw(scratch, MemOperand(li_location));
5518 
5519   lwu(scratch, MemOperand(li_location, kInstrSize));
5520   // scratch is now ori(at, ...).
5521   if (emit_debug_code()) {
5522     And(scratch, scratch, kOpcodeMask);
5523     Check(eq, kTheInstructionToPatchShouldBeAnOri,
5524         scratch, Operand(ORI));
5525     lwu(scratch, MemOperand(li_location, kInstrSize));
5526   }
5527   dsrl(t9, new_value, kImm16Bits);
5528   Ins(scratch, t9, 0, kImm16Bits);
5529   sw(scratch, MemOperand(li_location, kInstrSize));
5530 
5531   lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5532   // scratch is now ori(at, ...).
5533   if (emit_debug_code()) {
5534     And(scratch, scratch, kOpcodeMask);
5535     Check(eq, kTheInstructionToPatchShouldBeAnOri,
5536         scratch, Operand(ORI));
5537     lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5538   }
5539 
5540   Ins(scratch, new_value, 0, kImm16Bits);
5541   sw(scratch, MemOperand(li_location, kInstrSize * 3));
5542 
5543   // Update the I-cache so the new lui and ori can be executed.
5544   FlushICache(li_location, 4);
5545 }
5546 
GetRelocatedValue(Register li_location,Register value,Register scratch)5547 void MacroAssembler::GetRelocatedValue(Register li_location,
5548                                        Register value,
5549                                        Register scratch) {
5550   lwu(value, MemOperand(li_location));
5551   if (emit_debug_code()) {
5552     And(value, value, kOpcodeMask);
5553     Check(eq, kTheInstructionShouldBeALui,
5554         value, Operand(LUI));
5555     lwu(value, MemOperand(li_location));
5556   }
5557 
5558   // value now holds a lui instruction. Extract the immediate.
5559   andi(value, value, kImm16Mask);
5560   dsll32(value, value, kImm16Bits);
5561 
5562   lwu(scratch, MemOperand(li_location, kInstrSize));
5563   if (emit_debug_code()) {
5564     And(scratch, scratch, kOpcodeMask);
5565     Check(eq, kTheInstructionShouldBeAnOri,
5566         scratch, Operand(ORI));
5567     lwu(scratch, MemOperand(li_location, kInstrSize));
5568   }
5569   // "scratch" now holds an ori instruction. Extract the immediate.
5570   andi(scratch, scratch, kImm16Mask);
5571   dsll32(scratch, scratch, 0);
5572 
5573   or_(value, value, scratch);
5574 
5575   lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5576   if (emit_debug_code()) {
5577     And(scratch, scratch, kOpcodeMask);
5578     Check(eq, kTheInstructionShouldBeAnOri,
5579         scratch, Operand(ORI));
5580     lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5581   }
5582   // "scratch" now holds an ori instruction. Extract the immediate.
5583   andi(scratch, scratch, kImm16Mask);
5584   dsll(scratch, scratch, kImm16Bits);
5585 
5586   or_(value, value, scratch);
5587   // Sign extend extracted address.
5588   dsra(value, value, kImm16Bits);
5589 }
5590 
5591 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)5592 void MacroAssembler::CheckPageFlag(
5593     Register object,
5594     Register scratch,
5595     int mask,
5596     Condition cc,
5597     Label* condition_met) {
5598   And(scratch, object, Operand(~Page::kPageAlignmentMask));
5599   ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5600   And(scratch, scratch, Operand(mask));
5601   Branch(condition_met, cc, scratch, Operand(zero_reg));
5602 }
5603 
5604 
CheckMapDeprecated(Handle<Map> map,Register scratch,Label * if_deprecated)5605 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5606                                         Register scratch,
5607                                         Label* if_deprecated) {
5608   if (map->CanBeDeprecated()) {
5609     li(scratch, Operand(map));
5610     ld(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
5611     And(scratch, scratch, Operand(Map::Deprecated::kMask));
5612     Branch(if_deprecated, ne, scratch, Operand(zero_reg));
5613   }
5614 }
5615 
5616 
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)5617 void MacroAssembler::JumpIfBlack(Register object,
5618                                  Register scratch0,
5619                                  Register scratch1,
5620                                  Label* on_black) {
5621   HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
5622   DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5623 }
5624 
5625 
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)5626 void MacroAssembler::HasColor(Register object,
5627                               Register bitmap_scratch,
5628                               Register mask_scratch,
5629                               Label* has_color,
5630                               int first_bit,
5631                               int second_bit) {
5632   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5633   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5634 
5635   GetMarkBits(object, bitmap_scratch, mask_scratch);
5636 
5637   Label other_color;
5638   // Note that we are using a 4-byte aligned 8-byte load.
5639   Uld(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5640   And(t8, t9, Operand(mask_scratch));
5641   Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5642   // Shift left 1 by adding.
5643   Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
5644   And(t8, t9, Operand(mask_scratch));
5645   Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5646 
5647   bind(&other_color);
5648 }
5649 
5650 
5651 // Detect some, but not all, common pointer-free objects.  This is used by the
5652 // incremental write barrier which doesn't care about oddballs (they are always
5653 // marked black immediately so this code is not hit).
JumpIfDataObject(Register value,Register scratch,Label * not_data_object)5654 void MacroAssembler::JumpIfDataObject(Register value,
5655                                       Register scratch,
5656                                       Label* not_data_object) {
5657   DCHECK(!AreAliased(value, scratch, t8, no_reg));
5658   Label is_data_object;
5659   ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5660   LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5661   Branch(&is_data_object, eq, t8, Operand(scratch));
5662   DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5663   DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5664   // If it's a string and it's not a cons string then it's an object containing
5665   // no GC pointers.
5666   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5667   And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5668   Branch(not_data_object, ne, t8, Operand(zero_reg));
5669   bind(&is_data_object);
5670 }
5671 
5672 
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)5673 void MacroAssembler::GetMarkBits(Register addr_reg,
5674                                  Register bitmap_reg,
5675                                  Register mask_reg) {
5676   DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5677   // addr_reg is divided into fields:
5678   // |63        page base        20|19    high      8|7   shift   3|2  0|
5679   // 'high' gives the index of the cell holding color bits for the object.
5680   // 'shift' gives the offset in the cell for this object's color.
5681   And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5682   Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5683   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5684   Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5685   dsll(t8, t8, Bitmap::kBytesPerCellLog2);
5686   Daddu(bitmap_reg, bitmap_reg, t8);
5687   li(t8, Operand(1));
5688   dsllv(mask_reg, t8, mask_reg);
5689 }
5690 
5691 
EnsureNotWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white_and_not_data)5692 void MacroAssembler::EnsureNotWhite(
5693     Register value,
5694     Register bitmap_scratch,
5695     Register mask_scratch,
5696     Register load_scratch,
5697     Label* value_is_white_and_not_data) {
5698   DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5699   GetMarkBits(value, bitmap_scratch, mask_scratch);
5700 
5701   // If the value is black or grey we don't need to do anything.
5702   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5703   DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5704   DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5705   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5706 
5707   Label done;
5708 
5709   // Since both black and grey have a 1 in the first position and white does
5710   // not have a 1 there we only need to check one bit.
5711   // Note that we are using a 4-byte aligned 8-byte load.
5712   Uld(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5713   And(t8, mask_scratch, load_scratch);
5714   Branch(&done, ne, t8, Operand(zero_reg));
5715 
5716   if (emit_debug_code()) {
5717     // Check for impossible bit pattern.
5718     Label ok;
5719     // sll may overflow, making the check conservative.
5720     dsll(t8, mask_scratch, 1);
5721     And(t8, load_scratch, t8);
5722     Branch(&ok, eq, t8, Operand(zero_reg));
5723     stop("Impossible marking bit pattern");
5724     bind(&ok);
5725   }
5726 
5727   // Value is white.  We check whether it is data that doesn't need scanning.
5728   // Currently only checks for HeapNumber and non-cons strings.
5729   Register map = load_scratch;  // Holds map while checking type.
5730   Register length = load_scratch;  // Holds length of object after testing type.
5731   Label is_data_object;
5732 
5733   // Check for heap-number
5734   ld(map, FieldMemOperand(value, HeapObject::kMapOffset));
5735   LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5736   {
5737     Label skip;
5738     Branch(&skip, ne, t8, Operand(map));
5739     li(length, HeapNumber::kSize);
5740     Branch(&is_data_object);
5741     bind(&skip);
5742   }
5743 
5744   // Check for strings.
5745   DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5746   DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5747   // If it's a string and it's not a cons string then it's an object containing
5748   // no GC pointers.
5749   Register instance_type = load_scratch;
5750   lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5751   And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5752   Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5753   // It's a non-indirect (non-cons and non-slice) string.
5754   // If it's external, the length is just ExternalString::kSize.
5755   // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5756   // External strings are the only ones with the kExternalStringTag bit
5757   // set.
5758   DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5759   DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5760   And(t8, instance_type, Operand(kExternalStringTag));
5761   {
5762     Label skip;
5763     Branch(&skip, eq, t8, Operand(zero_reg));
5764     li(length, ExternalString::kSize);
5765     Branch(&is_data_object);
5766     bind(&skip);
5767   }
5768 
5769   // Sequential string, either Latin1 or UC16.
5770   // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
5771   // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5772   // getting the length multiplied by 2.
5773   DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5774   DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
5775   lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset));
5776   And(t8, instance_type, Operand(kStringEncodingMask));
5777   {
5778     Label skip;
5779     Branch(&skip, ne, t8, Operand(zero_reg));
5780     // Adjust length for UC16.
5781     dsll(t9, t9, 1);
5782     bind(&skip);
5783   }
5784   Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5785   DCHECK(!length.is(t8));
5786   And(length, length, Operand(~kObjectAlignmentMask));
5787 
5788   bind(&is_data_object);
5789   // Value is a data object, and it is white.  Mark it black.  Since we know
5790   // that the object is white we can make it black by flipping one bit.
5791   Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5792   Or(t8, t8, Operand(mask_scratch));
5793   Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5794 
5795   And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5796   Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5797   Daddu(t8, t8, Operand(length));
5798   Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5799 
5800   bind(&done);
5801 }
5802 
5803 
LoadInstanceDescriptors(Register map,Register descriptors)5804 void MacroAssembler::LoadInstanceDescriptors(Register map,
5805                                              Register descriptors) {
5806   ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5807 }
5808 
5809 
NumberOfOwnDescriptors(Register dst,Register map)5810 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5811   ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
5812   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5813 }
5814 
5815 
EnumLength(Register dst,Register map)5816 void MacroAssembler::EnumLength(Register dst, Register map) {
5817   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5818   ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
5819   And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5820   SmiTag(dst);
5821 }
5822 
5823 
CheckEnumCache(Register null_value,Label * call_runtime)5824 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5825   Register  empty_fixed_array_value = a6;
5826   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5827   Label next, start;
5828   mov(a2, a0);
5829 
5830   // Check if the enum length field is properly initialized, indicating that
5831   // there is an enum cache.
5832   ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5833 
5834   EnumLength(a3, a1);
5835   Branch(
5836       call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5837 
5838   jmp(&start);
5839 
5840   bind(&next);
5841   ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5842 
5843   // For all objects but the receiver, check that the cache is empty.
5844   EnumLength(a3, a1);
5845   Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5846 
5847   bind(&start);
5848 
5849   // Check that there are no elements. Register a2 contains the current JS
5850   // object we've reached through the prototype chain.
5851   Label no_elements;
5852   ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5853   Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5854 
5855   // Second chance, the object may be using the empty slow element dictionary.
5856   LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5857   Branch(call_runtime, ne, a2, Operand(at));
5858 
5859   bind(&no_elements);
5860   ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5861   Branch(&next, ne, a2, Operand(null_value));
5862 }
5863 
5864 
ClampUint8(Register output_reg,Register input_reg)5865 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5866   DCHECK(!output_reg.is(input_reg));
5867   Label done;
5868   li(output_reg, Operand(255));
5869   // Normal branch: nop in delay slot.
5870   Branch(&done, gt, input_reg, Operand(output_reg));
5871   // Use delay slot in this branch.
5872   Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5873   mov(output_reg, zero_reg);  // In delay slot.
5874   mov(output_reg, input_reg);  // Value is in range 0..255.
5875   bind(&done);
5876 }
5877 
5878 
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister temp_double_reg)5879 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5880                                         DoubleRegister input_reg,
5881                                         DoubleRegister temp_double_reg) {
5882   Label above_zero;
5883   Label done;
5884   Label in_bounds;
5885 
5886   Move(temp_double_reg, 0.0);
5887   BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5888 
5889   // Double value is less than zero, NaN or Inf, return 0.
5890   mov(result_reg, zero_reg);
5891   Branch(&done);
5892 
5893   // Double value is >= 255, return 255.
5894   bind(&above_zero);
5895   Move(temp_double_reg, 255.0);
5896   BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5897   li(result_reg, Operand(255));
5898   Branch(&done);
5899 
5900   // In 0-255 range, round and truncate.
5901   bind(&in_bounds);
5902   cvt_w_d(temp_double_reg, input_reg);
5903   mfc1(result_reg, temp_double_reg);
5904   bind(&done);
5905 }
5906 
5907 
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found,Condition cond,Label * allocation_memento_present)5908 void MacroAssembler::TestJSArrayForAllocationMemento(
5909     Register receiver_reg,
5910     Register scratch_reg,
5911     Label* no_memento_found,
5912     Condition cond,
5913     Label* allocation_memento_present) {
5914   ExternalReference new_space_start =
5915       ExternalReference::new_space_start(isolate());
5916   ExternalReference new_space_allocation_top =
5917       ExternalReference::new_space_allocation_top_address(isolate());
5918   Daddu(scratch_reg, receiver_reg,
5919        Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5920   Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5921   li(at, Operand(new_space_allocation_top));
5922   ld(at, MemOperand(at));
5923   Branch(no_memento_found, gt, scratch_reg, Operand(at));
5924   ld(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5925   if (allocation_memento_present) {
5926     Branch(allocation_memento_present, cond, scratch_reg,
5927            Operand(isolate()->factory()->allocation_memento_map()));
5928   }
5929 }
5930 
5931 
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)5932 Register GetRegisterThatIsNotOneOf(Register reg1,
5933                                    Register reg2,
5934                                    Register reg3,
5935                                    Register reg4,
5936                                    Register reg5,
5937                                    Register reg6) {
5938   RegList regs = 0;
5939   if (reg1.is_valid()) regs |= reg1.bit();
5940   if (reg2.is_valid()) regs |= reg2.bit();
5941   if (reg3.is_valid()) regs |= reg3.bit();
5942   if (reg4.is_valid()) regs |= reg4.bit();
5943   if (reg5.is_valid()) regs |= reg5.bit();
5944   if (reg6.is_valid()) regs |= reg6.bit();
5945 
5946   for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5947     Register candidate = Register::FromAllocationIndex(i);
5948     if (regs & candidate.bit()) continue;
5949     return candidate;
5950   }
5951   UNREACHABLE();
5952   return no_reg;
5953 }
5954 
5955 
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)5956 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5957     Register object,
5958     Register scratch0,
5959     Register scratch1,
5960     Label* found) {
5961   DCHECK(!scratch1.is(scratch0));
5962   Factory* factory = isolate()->factory();
5963   Register current = scratch0;
5964   Label loop_again;
5965 
5966   // Scratch contained elements pointer.
5967   Move(current, object);
5968 
5969   // Loop based on the map going up the prototype chain.
5970   bind(&loop_again);
5971   ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
5972   lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
5973   DecodeField<Map::ElementsKindBits>(scratch1);
5974   Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
5975   ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
5976   Branch(&loop_again, ne, current, Operand(factory->null_value()));
5977 }
5978 
5979 
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8)5980 bool AreAliased(Register reg1,
5981                 Register reg2,
5982                 Register reg3,
5983                 Register reg4,
5984                 Register reg5,
5985                 Register reg6,
5986                 Register reg7,
5987                 Register reg8) {
5988   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
5989       reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5990       reg7.is_valid() + reg8.is_valid();
5991 
5992   RegList regs = 0;
5993   if (reg1.is_valid()) regs |= reg1.bit();
5994   if (reg2.is_valid()) regs |= reg2.bit();
5995   if (reg3.is_valid()) regs |= reg3.bit();
5996   if (reg4.is_valid()) regs |= reg4.bit();
5997   if (reg5.is_valid()) regs |= reg5.bit();
5998   if (reg6.is_valid()) regs |= reg6.bit();
5999   if (reg7.is_valid()) regs |= reg7.bit();
6000   if (reg8.is_valid()) regs |= reg8.bit();
6001   int n_of_non_aliasing_regs = NumRegs(regs);
6002 
6003   return n_of_valid_regs != n_of_non_aliasing_regs;
6004 }
6005 
6006 
CodePatcher(byte * address,int instructions,FlushICache flush_cache)6007 CodePatcher::CodePatcher(byte* address,
6008                          int instructions,
6009                          FlushICache flush_cache)
6010     : address_(address),
6011       size_(instructions * Assembler::kInstrSize),
6012       masm_(NULL, address, size_ + Assembler::kGap),
6013       flush_cache_(flush_cache) {
6014   // Create a new macro assembler pointing to the address of the code to patch.
6015   // The size is adjusted with kGap on order for the assembler to generate size
6016   // bytes of instructions without failing with buffer size constraints.
6017   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6018 }
6019 
6020 
~CodePatcher()6021 CodePatcher::~CodePatcher() {
6022   // Indicate that code has changed.
6023   if (flush_cache_ == FLUSH) {
6024     CpuFeatures::FlushICache(address_, size_);
6025   }
6026   // Check that the code was patched as expected.
6027   DCHECK(masm_.pc_ == address_ + size_);
6028   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6029 }
6030 
6031 
Emit(Instr instr)6032 void CodePatcher::Emit(Instr instr) {
6033   masm()->emit(instr);
6034 }
6035 
6036 
Emit(Address addr)6037 void CodePatcher::Emit(Address addr) {
6038   // masm()->emit(reinterpret_cast<Instr>(addr));
6039 }
6040 
6041 
ChangeBranchCondition(Condition cond)6042 void CodePatcher::ChangeBranchCondition(Condition cond) {
6043   Instr instr = Assembler::instr_at(masm_.pc_);
6044   DCHECK(Assembler::IsBranch(instr));
6045   uint32_t opcode = Assembler::GetOpcodeField(instr);
6046   // Currently only the 'eq' and 'ne' cond values are supported and the simple
6047   // branch instructions (with opcode being the branch type).
6048   // There are some special cases (see Assembler::IsBranch()) so extending this
6049   // would be tricky.
6050   DCHECK(opcode == BEQ ||
6051          opcode == BNE ||
6052         opcode == BLEZ ||
6053         opcode == BGTZ ||
6054         opcode == BEQL ||
6055         opcode == BNEL ||
6056        opcode == BLEZL ||
6057        opcode == BGTZL);
6058   opcode = (cond == eq) ? BEQ : BNE;
6059   instr = (instr & ~kOpcodeMask) | opcode;
6060   masm_.emit(instr);
6061 }
6062 
6063 
TruncatingDiv(Register result,Register dividend,int32_t divisor)6064 void MacroAssembler::TruncatingDiv(Register result,
6065                                    Register dividend,
6066                                    int32_t divisor) {
6067   DCHECK(!dividend.is(result));
6068   DCHECK(!dividend.is(at));
6069   DCHECK(!result.is(at));
6070   base::MagicNumbersForDivision<uint32_t> mag =
6071   base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6072   li(at, Operand(mag.multiplier));
6073   Mulh(result, dividend, Operand(at));
6074   bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6075   if (divisor > 0 && neg) {
6076     Addu(result, result, Operand(dividend));
6077   }
6078   if (divisor < 0 && !neg && mag.multiplier > 0) {
6079     Subu(result, result, Operand(dividend));
6080   }
6081   if (mag.shift > 0) sra(result, result, mag.shift);
6082   srl(at, dividend, 31);
6083   Addu(result, result, Operand(at));
6084 }
6085 
6086 
6087 } }  // namespace v8::internal
6088 
6089 #endif  // V8_TARGET_ARCH_MIPS64
6090