1
2 // Copyright 2012 the V8 project authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5
6 #include <limits.h> // For LONG_MIN, LONG_MAX.
7
8 #if V8_TARGET_ARCH_MIPS
9
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/debug/debug.h"
15 #include "src/mips/macro-assembler-mips.h"
16 #include "src/register-configuration.h"
17 #include "src/runtime/runtime.h"
18
19 namespace v8 {
20 namespace internal {
21
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
23 CodeObjectRequired create_code_object)
24 : Assembler(arg_isolate, buffer, size),
25 generating_stub_(false),
26 has_frame_(false),
27 has_double_zero_reg_set_(false) {
28 if (create_code_object == CodeObjectRequired::kYes) {
29 code_object_ =
30 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
31 }
32 }
33
34
Load(Register dst,const MemOperand & src,Representation r)35 void MacroAssembler::Load(Register dst,
36 const MemOperand& src,
37 Representation r) {
38 DCHECK(!r.IsDouble());
39 if (r.IsInteger8()) {
40 lb(dst, src);
41 } else if (r.IsUInteger8()) {
42 lbu(dst, src);
43 } else if (r.IsInteger16()) {
44 lh(dst, src);
45 } else if (r.IsUInteger16()) {
46 lhu(dst, src);
47 } else {
48 lw(dst, src);
49 }
50 }
51
52
Store(Register src,const MemOperand & dst,Representation r)53 void MacroAssembler::Store(Register src,
54 const MemOperand& dst,
55 Representation r) {
56 DCHECK(!r.IsDouble());
57 if (r.IsInteger8() || r.IsUInteger8()) {
58 sb(src, dst);
59 } else if (r.IsInteger16() || r.IsUInteger16()) {
60 sh(src, dst);
61 } else {
62 if (r.IsHeapObject()) {
63 AssertNotSmi(src);
64 } else if (r.IsSmi()) {
65 AssertSmi(src);
66 }
67 sw(src, dst);
68 }
69 }
70
71
LoadRoot(Register destination,Heap::RootListIndex index)72 void MacroAssembler::LoadRoot(Register destination,
73 Heap::RootListIndex index) {
74 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
75 }
76
77
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)78 void MacroAssembler::LoadRoot(Register destination,
79 Heap::RootListIndex index,
80 Condition cond,
81 Register src1, const Operand& src2) {
82 Branch(2, NegateCondition(cond), src1, src2);
83 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
84 }
85
86
StoreRoot(Register source,Heap::RootListIndex index)87 void MacroAssembler::StoreRoot(Register source,
88 Heap::RootListIndex index) {
89 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
90 sw(source, MemOperand(s6, index << kPointerSizeLog2));
91 }
92
93
StoreRoot(Register source,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)94 void MacroAssembler::StoreRoot(Register source,
95 Heap::RootListIndex index,
96 Condition cond,
97 Register src1, const Operand& src2) {
98 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
99 Branch(2, NegateCondition(cond), src1, src2);
100 sw(source, MemOperand(s6, index << kPointerSizeLog2));
101 }
102
103
104 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()105 void MacroAssembler::PushSafepointRegisters() {
106 // Safepoints expect a block of kNumSafepointRegisters values on the
107 // stack, so adjust the stack for unsaved registers.
108 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
109 DCHECK(num_unsaved >= 0);
110 if (num_unsaved > 0) {
111 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
112 }
113 MultiPush(kSafepointSavedRegisters);
114 }
115
116
PopSafepointRegisters()117 void MacroAssembler::PopSafepointRegisters() {
118 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
119 MultiPop(kSafepointSavedRegisters);
120 if (num_unsaved > 0) {
121 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
122 }
123 }
124
125
StoreToSafepointRegisterSlot(Register src,Register dst)126 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
127 sw(src, SafepointRegisterSlot(dst));
128 }
129
130
LoadFromSafepointRegisterSlot(Register dst,Register src)131 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
132 lw(dst, SafepointRegisterSlot(src));
133 }
134
135
SafepointRegisterStackIndex(int reg_code)136 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
137 // The registers are pushed starting with the highest encoding,
138 // which means that lowest encodings are closest to the stack pointer.
139 return kSafepointRegisterStackIndexMap[reg_code];
140 }
141
142
SafepointRegisterSlot(Register reg)143 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
144 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
145 }
146
147
SafepointRegistersAndDoublesSlot(Register reg)148 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
149 UNIMPLEMENTED_MIPS();
150 // General purpose registers are pushed last on the stack.
151 int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
152 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
153 return MemOperand(sp, doubles_size + register_offset);
154 }
155
156
InNewSpace(Register object,Register scratch,Condition cc,Label * branch)157 void MacroAssembler::InNewSpace(Register object,
158 Register scratch,
159 Condition cc,
160 Label* branch) {
161 DCHECK(cc == eq || cc == ne);
162 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
163 Branch(branch, cc, scratch,
164 Operand(ExternalReference::new_space_start(isolate())));
165 }
166
167
168 // Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
169 // The register 'object' contains a heap object pointer. The heap object
170 // tag is shifted away.
RecordWriteField(Register object,int offset,Register value,Register dst,RAStatus ra_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)171 void MacroAssembler::RecordWriteField(
172 Register object,
173 int offset,
174 Register value,
175 Register dst,
176 RAStatus ra_status,
177 SaveFPRegsMode save_fp,
178 RememberedSetAction remembered_set_action,
179 SmiCheck smi_check,
180 PointersToHereCheck pointers_to_here_check_for_value) {
181 DCHECK(!AreAliased(value, dst, t8, object));
182 // First, check if a write barrier is even needed. The tests below
183 // catch stores of Smis.
184 Label done;
185
186 // Skip barrier if writing a smi.
187 if (smi_check == INLINE_SMI_CHECK) {
188 JumpIfSmi(value, &done);
189 }
190
191 // Although the object register is tagged, the offset is relative to the start
192 // of the object, so so offset must be a multiple of kPointerSize.
193 DCHECK(IsAligned(offset, kPointerSize));
194
195 Addu(dst, object, Operand(offset - kHeapObjectTag));
196 if (emit_debug_code()) {
197 Label ok;
198 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
199 Branch(&ok, eq, t8, Operand(zero_reg));
200 stop("Unaligned cell in write barrier");
201 bind(&ok);
202 }
203
204 RecordWrite(object,
205 dst,
206 value,
207 ra_status,
208 save_fp,
209 remembered_set_action,
210 OMIT_SMI_CHECK,
211 pointers_to_here_check_for_value);
212
213 bind(&done);
214
215 // Clobber clobbered input registers when running with the debug-code flag
216 // turned on to provoke errors.
217 if (emit_debug_code()) {
218 li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
219 li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
220 }
221 }
222
223
224 // Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
RecordWriteForMap(Register object,Register map,Register dst,RAStatus ra_status,SaveFPRegsMode fp_mode)225 void MacroAssembler::RecordWriteForMap(Register object,
226 Register map,
227 Register dst,
228 RAStatus ra_status,
229 SaveFPRegsMode fp_mode) {
230 if (emit_debug_code()) {
231 DCHECK(!dst.is(at));
232 lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
233 Check(eq,
234 kWrongAddressOrValuePassedToRecordWrite,
235 dst,
236 Operand(isolate()->factory()->meta_map()));
237 }
238
239 if (!FLAG_incremental_marking) {
240 return;
241 }
242
243 if (emit_debug_code()) {
244 lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
245 Check(eq,
246 kWrongAddressOrValuePassedToRecordWrite,
247 map,
248 Operand(at));
249 }
250
251 Label done;
252
253 // A single check of the map's pages interesting flag suffices, since it is
254 // only set during incremental collection, and then it's also guaranteed that
255 // the from object's page's interesting flag is also set. This optimization
256 // relies on the fact that maps can never be in new space.
257 CheckPageFlag(map,
258 map, // Used as scratch.
259 MemoryChunk::kPointersToHereAreInterestingMask,
260 eq,
261 &done);
262
263 Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
264 if (emit_debug_code()) {
265 Label ok;
266 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
267 Branch(&ok, eq, at, Operand(zero_reg));
268 stop("Unaligned cell in write barrier");
269 bind(&ok);
270 }
271
272 // Record the actual write.
273 if (ra_status == kRAHasNotBeenSaved) {
274 push(ra);
275 }
276 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
277 fp_mode);
278 CallStub(&stub);
279 if (ra_status == kRAHasNotBeenSaved) {
280 pop(ra);
281 }
282
283 bind(&done);
284
285 // Count number of write barriers in generated code.
286 isolate()->counters()->write_barriers_static()->Increment();
287 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
288
289 // Clobber clobbered registers when running with the debug-code flag
290 // turned on to provoke errors.
291 if (emit_debug_code()) {
292 li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
293 li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
294 }
295 }
296
297
298 // Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
299 // The register 'object' contains a heap object pointer. The heap object
300 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,RAStatus ra_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)301 void MacroAssembler::RecordWrite(
302 Register object,
303 Register address,
304 Register value,
305 RAStatus ra_status,
306 SaveFPRegsMode fp_mode,
307 RememberedSetAction remembered_set_action,
308 SmiCheck smi_check,
309 PointersToHereCheck pointers_to_here_check_for_value) {
310 DCHECK(!AreAliased(object, address, value, t8));
311 DCHECK(!AreAliased(object, address, value, t9));
312
313 if (emit_debug_code()) {
314 lw(at, MemOperand(address));
315 Assert(
316 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
317 }
318
319 if (remembered_set_action == OMIT_REMEMBERED_SET &&
320 !FLAG_incremental_marking) {
321 return;
322 }
323
324 // First, check if a write barrier is even needed. The tests below
325 // catch stores of smis and stores into the young generation.
326 Label done;
327
328 if (smi_check == INLINE_SMI_CHECK) {
329 DCHECK_EQ(0, kSmiTag);
330 JumpIfSmi(value, &done);
331 }
332
333 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
334 CheckPageFlag(value,
335 value, // Used as scratch.
336 MemoryChunk::kPointersToHereAreInterestingMask,
337 eq,
338 &done);
339 }
340 CheckPageFlag(object,
341 value, // Used as scratch.
342 MemoryChunk::kPointersFromHereAreInterestingMask,
343 eq,
344 &done);
345
346 // Record the actual write.
347 if (ra_status == kRAHasNotBeenSaved) {
348 push(ra);
349 }
350 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
351 fp_mode);
352 CallStub(&stub);
353 if (ra_status == kRAHasNotBeenSaved) {
354 pop(ra);
355 }
356
357 bind(&done);
358
359 // Count number of write barriers in generated code.
360 isolate()->counters()->write_barriers_static()->Increment();
361 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
362 value);
363
364 // Clobber clobbered registers when running with the debug-code flag
365 // turned on to provoke errors.
366 if (emit_debug_code()) {
367 li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
368 li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
369 }
370 }
371
372
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)373 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
374 Register address,
375 Register scratch,
376 SaveFPRegsMode fp_mode,
377 RememberedSetFinalAction and_then) {
378 Label done;
379 if (emit_debug_code()) {
380 Label ok;
381 JumpIfNotInNewSpace(object, scratch, &ok);
382 stop("Remembered set pointer is in new space");
383 bind(&ok);
384 }
385 // Load store buffer top.
386 ExternalReference store_buffer =
387 ExternalReference::store_buffer_top(isolate());
388 li(t8, Operand(store_buffer));
389 lw(scratch, MemOperand(t8));
390 // Store pointer to buffer and increment buffer top.
391 sw(address, MemOperand(scratch));
392 Addu(scratch, scratch, kPointerSize);
393 // Write back new top of buffer.
394 sw(scratch, MemOperand(t8));
395 // Call stub on end of buffer.
396 // Check for end of buffer.
397 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
398 if (and_then == kFallThroughAtEnd) {
399 Branch(&done, eq, t8, Operand(zero_reg));
400 } else {
401 DCHECK(and_then == kReturnAtEnd);
402 Ret(eq, t8, Operand(zero_reg));
403 }
404 push(ra);
405 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
406 CallStub(&store_buffer_overflow);
407 pop(ra);
408 bind(&done);
409 if (and_then == kReturnAtEnd) {
410 Ret();
411 }
412 }
413
414
415 // -----------------------------------------------------------------------------
416 // Allocation support.
417
418
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)419 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
420 Register scratch,
421 Label* miss) {
422 Label same_contexts;
423
424 DCHECK(!holder_reg.is(scratch));
425 DCHECK(!holder_reg.is(at));
426 DCHECK(!scratch.is(at));
427
428 // Load current lexical context from the stack frame.
429 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
430 // In debug mode, make sure the lexical context is set.
431 #ifdef DEBUG
432 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
433 scratch, Operand(zero_reg));
434 #endif
435
436 // Load the native context of the current context.
437 lw(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
438
439 // Check the context is a native context.
440 if (emit_debug_code()) {
441 push(holder_reg); // Temporarily save holder on the stack.
442 // Read the first word and compare to the native_context_map.
443 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
444 LoadRoot(at, Heap::kNativeContextMapRootIndex);
445 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
446 holder_reg, Operand(at));
447 pop(holder_reg); // Restore holder.
448 }
449
450 // Check if both contexts are the same.
451 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
452 Branch(&same_contexts, eq, scratch, Operand(at));
453
454 // Check the context is a native context.
455 if (emit_debug_code()) {
456 push(holder_reg); // Temporarily save holder on the stack.
457 mov(holder_reg, at); // Move at to its holding place.
458 LoadRoot(at, Heap::kNullValueRootIndex);
459 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
460 holder_reg, Operand(at));
461
462 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
463 LoadRoot(at, Heap::kNativeContextMapRootIndex);
464 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
465 holder_reg, Operand(at));
466 // Restore at is not needed. at is reloaded below.
467 pop(holder_reg); // Restore holder.
468 // Restore at to holder's context.
469 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
470 }
471
472 // Check that the security token in the calling global object is
473 // compatible with the security token in the receiving global
474 // object.
475 int token_offset = Context::kHeaderSize +
476 Context::SECURITY_TOKEN_INDEX * kPointerSize;
477
478 lw(scratch, FieldMemOperand(scratch, token_offset));
479 lw(at, FieldMemOperand(at, token_offset));
480 Branch(miss, ne, scratch, Operand(at));
481
482 bind(&same_contexts);
483 }
484
485
486 // Compute the hash code from the untagged key. This must be kept in sync with
487 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
488 // code-stub-hydrogen.cc
GetNumberHash(Register reg0,Register scratch)489 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
490 // First of all we assign the hash seed to scratch.
491 LoadRoot(scratch, Heap::kHashSeedRootIndex);
492 SmiUntag(scratch);
493
494 // Xor original key with a seed.
495 xor_(reg0, reg0, scratch);
496
497 // Compute the hash code from the untagged key. This must be kept in sync
498 // with ComputeIntegerHash in utils.h.
499 //
500 // hash = ~hash + (hash << 15);
501 nor(scratch, reg0, zero_reg);
502 sll(at, reg0, 15);
503 addu(reg0, scratch, at);
504
505 // hash = hash ^ (hash >> 12);
506 srl(at, reg0, 12);
507 xor_(reg0, reg0, at);
508
509 // hash = hash + (hash << 2);
510 sll(at, reg0, 2);
511 addu(reg0, reg0, at);
512
513 // hash = hash ^ (hash >> 4);
514 srl(at, reg0, 4);
515 xor_(reg0, reg0, at);
516
517 // hash = hash * 2057;
518 sll(scratch, reg0, 11);
519 sll(at, reg0, 3);
520 addu(reg0, reg0, at);
521 addu(reg0, reg0, scratch);
522
523 // hash = hash ^ (hash >> 16);
524 srl(at, reg0, 16);
525 xor_(reg0, reg0, at);
526 And(reg0, reg0, Operand(0x3fffffff));
527 }
528
529
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register result,Register reg0,Register reg1,Register reg2)530 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
531 Register elements,
532 Register key,
533 Register result,
534 Register reg0,
535 Register reg1,
536 Register reg2) {
537 // Register use:
538 //
539 // elements - holds the slow-case elements of the receiver on entry.
540 // Unchanged unless 'result' is the same register.
541 //
542 // key - holds the smi key on entry.
543 // Unchanged unless 'result' is the same register.
544 //
545 //
546 // result - holds the result on exit if the load succeeded.
547 // Allowed to be the same as 'key' or 'result'.
548 // Unchanged on bailout so 'key' or 'result' can be used
549 // in further computation.
550 //
551 // Scratch registers:
552 //
553 // reg0 - holds the untagged key on entry and holds the hash once computed.
554 //
555 // reg1 - Used to hold the capacity mask of the dictionary.
556 //
557 // reg2 - Used for the index into the dictionary.
558 // at - Temporary (avoid MacroAssembler instructions also using 'at').
559 Label done;
560
561 GetNumberHash(reg0, reg1);
562
563 // Compute the capacity mask.
564 lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
565 sra(reg1, reg1, kSmiTagSize);
566 Subu(reg1, reg1, Operand(1));
567
568 // Generate an unrolled loop that performs a few probes before giving up.
569 for (int i = 0; i < kNumberDictionaryProbes; i++) {
570 // Use reg2 for index calculations and keep the hash intact in reg0.
571 mov(reg2, reg0);
572 // Compute the masked index: (hash + i + i * i) & mask.
573 if (i > 0) {
574 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
575 }
576 and_(reg2, reg2, reg1);
577
578 // Scale the index by multiplying by the element size.
579 DCHECK(SeededNumberDictionary::kEntrySize == 3);
580 sll(at, reg2, 1); // 2x.
581 addu(reg2, reg2, at); // reg2 = reg2 * 3.
582
583 // Check if the key is identical to the name.
584 sll(at, reg2, kPointerSizeLog2);
585 addu(reg2, elements, at);
586
587 lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
588 if (i != kNumberDictionaryProbes - 1) {
589 Branch(&done, eq, key, Operand(at));
590 } else {
591 Branch(miss, ne, key, Operand(at));
592 }
593 }
594
595 bind(&done);
596 // Check that the value is a field property.
597 // reg2: elements + (index * kPointerSize).
598 const int kDetailsOffset =
599 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
600 lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
601 DCHECK_EQ(DATA, 0);
602 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
603 Branch(miss, ne, at, Operand(zero_reg));
604
605 // Get the value at the masked, scaled index and return.
606 const int kValueOffset =
607 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
608 lw(result, FieldMemOperand(reg2, kValueOffset));
609 }
610
611
612 // ---------------------------------------------------------------------------
613 // Instruction macros.
614
Addu(Register rd,Register rs,const Operand & rt)615 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
616 if (rt.is_reg()) {
617 addu(rd, rs, rt.rm());
618 } else {
619 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
620 addiu(rd, rs, rt.imm32_);
621 } else {
622 // li handles the relocation.
623 DCHECK(!rs.is(at));
624 li(at, rt);
625 addu(rd, rs, at);
626 }
627 }
628 }
629
630
Subu(Register rd,Register rs,const Operand & rt)631 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
632 if (rt.is_reg()) {
633 subu(rd, rs, rt.rm());
634 } else {
635 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
636 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
637 } else {
638 // li handles the relocation.
639 DCHECK(!rs.is(at));
640 li(at, rt);
641 subu(rd, rs, at);
642 }
643 }
644 }
645
646
Mul(Register rd,Register rs,const Operand & rt)647 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
648 if (rt.is_reg()) {
649 if (IsMipsArchVariant(kLoongson)) {
650 mult(rs, rt.rm());
651 mflo(rd);
652 } else {
653 mul(rd, rs, rt.rm());
654 }
655 } else {
656 // li handles the relocation.
657 DCHECK(!rs.is(at));
658 li(at, rt);
659 if (IsMipsArchVariant(kLoongson)) {
660 mult(rs, at);
661 mflo(rd);
662 } else {
663 mul(rd, rs, at);
664 }
665 }
666 }
667
668
Mul(Register rd_hi,Register rd_lo,Register rs,const Operand & rt)669 void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
670 Register rs, const Operand& rt) {
671 if (rt.is_reg()) {
672 if (!IsMipsArchVariant(kMips32r6)) {
673 mult(rs, rt.rm());
674 mflo(rd_lo);
675 mfhi(rd_hi);
676 } else {
677 if (rd_lo.is(rs)) {
678 DCHECK(!rd_hi.is(rs));
679 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
680 muh(rd_hi, rs, rt.rm());
681 mul(rd_lo, rs, rt.rm());
682 } else {
683 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
684 mul(rd_lo, rs, rt.rm());
685 muh(rd_hi, rs, rt.rm());
686 }
687 }
688 } else {
689 // li handles the relocation.
690 DCHECK(!rs.is(at));
691 li(at, rt);
692 if (!IsMipsArchVariant(kMips32r6)) {
693 mult(rs, at);
694 mflo(rd_lo);
695 mfhi(rd_hi);
696 } else {
697 if (rd_lo.is(rs)) {
698 DCHECK(!rd_hi.is(rs));
699 DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
700 muh(rd_hi, rs, at);
701 mul(rd_lo, rs, at);
702 } else {
703 DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
704 mul(rd_lo, rs, at);
705 muh(rd_hi, rs, at);
706 }
707 }
708 }
709 }
710
711
Mulh(Register rd,Register rs,const Operand & rt)712 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
713 if (rt.is_reg()) {
714 if (!IsMipsArchVariant(kMips32r6)) {
715 mult(rs, rt.rm());
716 mfhi(rd);
717 } else {
718 muh(rd, rs, rt.rm());
719 }
720 } else {
721 // li handles the relocation.
722 DCHECK(!rs.is(at));
723 li(at, rt);
724 if (!IsMipsArchVariant(kMips32r6)) {
725 mult(rs, at);
726 mfhi(rd);
727 } else {
728 muh(rd, rs, at);
729 }
730 }
731 }
732
733
Mult(Register rs,const Operand & rt)734 void MacroAssembler::Mult(Register rs, const Operand& rt) {
735 if (rt.is_reg()) {
736 mult(rs, rt.rm());
737 } else {
738 // li handles the relocation.
739 DCHECK(!rs.is(at));
740 li(at, rt);
741 mult(rs, at);
742 }
743 }
744
745
Mulhu(Register rd,Register rs,const Operand & rt)746 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
747 if (rt.is_reg()) {
748 if (!IsMipsArchVariant(kMips32r6)) {
749 multu(rs, rt.rm());
750 mfhi(rd);
751 } else {
752 muhu(rd, rs, rt.rm());
753 }
754 } else {
755 // li handles the relocation.
756 DCHECK(!rs.is(at));
757 li(at, rt);
758 if (!IsMipsArchVariant(kMips32r6)) {
759 multu(rs, at);
760 mfhi(rd);
761 } else {
762 muhu(rd, rs, at);
763 }
764 }
765 }
766
767
Multu(Register rs,const Operand & rt)768 void MacroAssembler::Multu(Register rs, const Operand& rt) {
769 if (rt.is_reg()) {
770 multu(rs, rt.rm());
771 } else {
772 // li handles the relocation.
773 DCHECK(!rs.is(at));
774 li(at, rt);
775 multu(rs, at);
776 }
777 }
778
779
Div(Register rs,const Operand & rt)780 void MacroAssembler::Div(Register rs, const Operand& rt) {
781 if (rt.is_reg()) {
782 div(rs, rt.rm());
783 } else {
784 // li handles the relocation.
785 DCHECK(!rs.is(at));
786 li(at, rt);
787 div(rs, at);
788 }
789 }
790
791
Div(Register rem,Register res,Register rs,const Operand & rt)792 void MacroAssembler::Div(Register rem, Register res,
793 Register rs, const Operand& rt) {
794 if (rt.is_reg()) {
795 if (!IsMipsArchVariant(kMips32r6)) {
796 div(rs, rt.rm());
797 mflo(res);
798 mfhi(rem);
799 } else {
800 div(res, rs, rt.rm());
801 mod(rem, rs, rt.rm());
802 }
803 } else {
804 // li handles the relocation.
805 DCHECK(!rs.is(at));
806 li(at, rt);
807 if (!IsMipsArchVariant(kMips32r6)) {
808 div(rs, at);
809 mflo(res);
810 mfhi(rem);
811 } else {
812 div(res, rs, at);
813 mod(rem, rs, at);
814 }
815 }
816 }
817
818
Div(Register res,Register rs,const Operand & rt)819 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
820 if (rt.is_reg()) {
821 if (!IsMipsArchVariant(kMips32r6)) {
822 div(rs, rt.rm());
823 mflo(res);
824 } else {
825 div(res, rs, rt.rm());
826 }
827 } else {
828 // li handles the relocation.
829 DCHECK(!rs.is(at));
830 li(at, rt);
831 if (!IsMipsArchVariant(kMips32r6)) {
832 div(rs, at);
833 mflo(res);
834 } else {
835 div(res, rs, at);
836 }
837 }
838 }
839
840
Mod(Register rd,Register rs,const Operand & rt)841 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
842 if (rt.is_reg()) {
843 if (!IsMipsArchVariant(kMips32r6)) {
844 div(rs, rt.rm());
845 mfhi(rd);
846 } else {
847 mod(rd, rs, rt.rm());
848 }
849 } else {
850 // li handles the relocation.
851 DCHECK(!rs.is(at));
852 li(at, rt);
853 if (!IsMipsArchVariant(kMips32r6)) {
854 div(rs, at);
855 mfhi(rd);
856 } else {
857 mod(rd, rs, at);
858 }
859 }
860 }
861
862
Modu(Register rd,Register rs,const Operand & rt)863 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
864 if (rt.is_reg()) {
865 if (!IsMipsArchVariant(kMips32r6)) {
866 divu(rs, rt.rm());
867 mfhi(rd);
868 } else {
869 modu(rd, rs, rt.rm());
870 }
871 } else {
872 // li handles the relocation.
873 DCHECK(!rs.is(at));
874 li(at, rt);
875 if (!IsMipsArchVariant(kMips32r6)) {
876 divu(rs, at);
877 mfhi(rd);
878 } else {
879 modu(rd, rs, at);
880 }
881 }
882 }
883
884
Divu(Register rs,const Operand & rt)885 void MacroAssembler::Divu(Register rs, const Operand& rt) {
886 if (rt.is_reg()) {
887 divu(rs, rt.rm());
888 } else {
889 // li handles the relocation.
890 DCHECK(!rs.is(at));
891 li(at, rt);
892 divu(rs, at);
893 }
894 }
895
896
Divu(Register res,Register rs,const Operand & rt)897 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
898 if (rt.is_reg()) {
899 if (!IsMipsArchVariant(kMips32r6)) {
900 divu(rs, rt.rm());
901 mflo(res);
902 } else {
903 divu(res, rs, rt.rm());
904 }
905 } else {
906 // li handles the relocation.
907 DCHECK(!rs.is(at));
908 li(at, rt);
909 if (!IsMipsArchVariant(kMips32r6)) {
910 divu(rs, at);
911 mflo(res);
912 } else {
913 divu(res, rs, at);
914 }
915 }
916 }
917
918
And(Register rd,Register rs,const Operand & rt)919 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
920 if (rt.is_reg()) {
921 and_(rd, rs, rt.rm());
922 } else {
923 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
924 andi(rd, rs, rt.imm32_);
925 } else {
926 // li handles the relocation.
927 DCHECK(!rs.is(at));
928 li(at, rt);
929 and_(rd, rs, at);
930 }
931 }
932 }
933
934
Or(Register rd,Register rs,const Operand & rt)935 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
936 if (rt.is_reg()) {
937 or_(rd, rs, rt.rm());
938 } else {
939 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
940 ori(rd, rs, rt.imm32_);
941 } else {
942 // li handles the relocation.
943 DCHECK(!rs.is(at));
944 li(at, rt);
945 or_(rd, rs, at);
946 }
947 }
948 }
949
950
Xor(Register rd,Register rs,const Operand & rt)951 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
952 if (rt.is_reg()) {
953 xor_(rd, rs, rt.rm());
954 } else {
955 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
956 xori(rd, rs, rt.imm32_);
957 } else {
958 // li handles the relocation.
959 DCHECK(!rs.is(at));
960 li(at, rt);
961 xor_(rd, rs, at);
962 }
963 }
964 }
965
966
Nor(Register rd,Register rs,const Operand & rt)967 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
968 if (rt.is_reg()) {
969 nor(rd, rs, rt.rm());
970 } else {
971 // li handles the relocation.
972 DCHECK(!rs.is(at));
973 li(at, rt);
974 nor(rd, rs, at);
975 }
976 }
977
978
Neg(Register rs,const Operand & rt)979 void MacroAssembler::Neg(Register rs, const Operand& rt) {
980 DCHECK(rt.is_reg());
981 DCHECK(!at.is(rs));
982 DCHECK(!at.is(rt.rm()));
983 li(at, -1);
984 xor_(rs, rt.rm(), at);
985 }
986
987
Slt(Register rd,Register rs,const Operand & rt)988 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
989 if (rt.is_reg()) {
990 slt(rd, rs, rt.rm());
991 } else {
992 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
993 slti(rd, rs, rt.imm32_);
994 } else {
995 // li handles the relocation.
996 DCHECK(!rs.is(at));
997 li(at, rt);
998 slt(rd, rs, at);
999 }
1000 }
1001 }
1002
1003
Sltu(Register rd,Register rs,const Operand & rt)1004 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1005 if (rt.is_reg()) {
1006 sltu(rd, rs, rt.rm());
1007 } else {
1008 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
1009 sltiu(rd, rs, rt.imm32_);
1010 } else {
1011 // li handles the relocation.
1012 DCHECK(!rs.is(at));
1013 li(at, rt);
1014 sltu(rd, rs, at);
1015 }
1016 }
1017 }
1018
1019
Ror(Register rd,Register rs,const Operand & rt)1020 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1021 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1022 if (rt.is_reg()) {
1023 rotrv(rd, rs, rt.rm());
1024 } else {
1025 rotr(rd, rs, rt.imm32_);
1026 }
1027 } else {
1028 if (rt.is_reg()) {
1029 subu(at, zero_reg, rt.rm());
1030 sllv(at, rs, at);
1031 srlv(rd, rs, rt.rm());
1032 or_(rd, rd, at);
1033 } else {
1034 if (rt.imm32_ == 0) {
1035 srl(rd, rs, 0);
1036 } else {
1037 srl(at, rs, rt.imm32_);
1038 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
1039 or_(rd, rd, at);
1040 }
1041 }
1042 }
1043 }
1044
1045
Pref(int32_t hint,const MemOperand & rs)1046 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1047 if (IsMipsArchVariant(kLoongson)) {
1048 lw(zero_reg, rs);
1049 } else {
1050 pref(hint, rs);
1051 }
1052 }
1053
1054
Lsa(Register rd,Register rt,Register rs,uint8_t sa,Register scratch)1055 void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
1056 Register scratch) {
1057 if (IsMipsArchVariant(kMips32r6) && sa <= 4) {
1058 lsa(rd, rt, rs, sa);
1059 } else {
1060 Register tmp = rd.is(rt) ? scratch : rd;
1061 DCHECK(!tmp.is(rt));
1062 sll(tmp, rs, sa);
1063 Addu(rd, rt, tmp);
1064 }
1065 }
1066
1067
1068 // ------------Pseudo-instructions-------------
1069
Ulw(Register rd,const MemOperand & rs)1070 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1071 lwr(rd, rs);
1072 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1073 }
1074
1075
Usw(Register rd,const MemOperand & rs)1076 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1077 swr(rd, rs);
1078 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1079 }
1080
1081
li(Register dst,Handle<Object> value,LiFlags mode)1082 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1083 AllowDeferredHandleDereference smi_check;
1084 if (value->IsSmi()) {
1085 li(dst, Operand(value), mode);
1086 } else {
1087 DCHECK(value->IsHeapObject());
1088 if (isolate()->heap()->InNewSpace(*value)) {
1089 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1090 li(dst, Operand(cell));
1091 lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
1092 } else {
1093 li(dst, Operand(value));
1094 }
1095 }
1096 }
1097
1098
li(Register rd,Operand j,LiFlags mode)1099 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1100 DCHECK(!j.is_reg());
1101 BlockTrampolinePoolScope block_trampoline_pool(this);
1102 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1103 // Normal load of an immediate value which does not need Relocation Info.
1104 if (is_int16(j.imm32_)) {
1105 addiu(rd, zero_reg, j.imm32_);
1106 } else if (!(j.imm32_ & kHiMask)) {
1107 ori(rd, zero_reg, j.imm32_);
1108 } else if (!(j.imm32_ & kImm16Mask)) {
1109 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1110 } else {
1111 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1112 ori(rd, rd, (j.imm32_ & kImm16Mask));
1113 }
1114 } else {
1115 if (MustUseReg(j.rmode_)) {
1116 RecordRelocInfo(j.rmode_, j.imm32_);
1117 }
1118 // We always need the same number of instructions as we may need to patch
1119 // this code to load another value which may need 2 instructions to load.
1120 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1121 ori(rd, rd, (j.imm32_ & kImm16Mask));
1122 }
1123 }
1124
1125
MultiPush(RegList regs)1126 void MacroAssembler::MultiPush(RegList regs) {
1127 int16_t num_to_push = NumberOfBitsSet(regs);
1128 int16_t stack_offset = num_to_push * kPointerSize;
1129
1130 Subu(sp, sp, Operand(stack_offset));
1131 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1132 if ((regs & (1 << i)) != 0) {
1133 stack_offset -= kPointerSize;
1134 sw(ToRegister(i), MemOperand(sp, stack_offset));
1135 }
1136 }
1137 }
1138
1139
MultiPushReversed(RegList regs)1140 void MacroAssembler::MultiPushReversed(RegList regs) {
1141 int16_t num_to_push = NumberOfBitsSet(regs);
1142 int16_t stack_offset = num_to_push * kPointerSize;
1143
1144 Subu(sp, sp, Operand(stack_offset));
1145 for (int16_t i = 0; i < kNumRegisters; i++) {
1146 if ((regs & (1 << i)) != 0) {
1147 stack_offset -= kPointerSize;
1148 sw(ToRegister(i), MemOperand(sp, stack_offset));
1149 }
1150 }
1151 }
1152
1153
MultiPop(RegList regs)1154 void MacroAssembler::MultiPop(RegList regs) {
1155 int16_t stack_offset = 0;
1156
1157 for (int16_t i = 0; i < kNumRegisters; i++) {
1158 if ((regs & (1 << i)) != 0) {
1159 lw(ToRegister(i), MemOperand(sp, stack_offset));
1160 stack_offset += kPointerSize;
1161 }
1162 }
1163 addiu(sp, sp, stack_offset);
1164 }
1165
1166
MultiPopReversed(RegList regs)1167 void MacroAssembler::MultiPopReversed(RegList regs) {
1168 int16_t stack_offset = 0;
1169
1170 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1171 if ((regs & (1 << i)) != 0) {
1172 lw(ToRegister(i), MemOperand(sp, stack_offset));
1173 stack_offset += kPointerSize;
1174 }
1175 }
1176 addiu(sp, sp, stack_offset);
1177 }
1178
1179
MultiPushFPU(RegList regs)1180 void MacroAssembler::MultiPushFPU(RegList regs) {
1181 int16_t num_to_push = NumberOfBitsSet(regs);
1182 int16_t stack_offset = num_to_push * kDoubleSize;
1183
1184 Subu(sp, sp, Operand(stack_offset));
1185 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1186 if ((regs & (1 << i)) != 0) {
1187 stack_offset -= kDoubleSize;
1188 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1189 }
1190 }
1191 }
1192
1193
MultiPushReversedFPU(RegList regs)1194 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1195 int16_t num_to_push = NumberOfBitsSet(regs);
1196 int16_t stack_offset = num_to_push * kDoubleSize;
1197
1198 Subu(sp, sp, Operand(stack_offset));
1199 for (int16_t i = 0; i < kNumRegisters; i++) {
1200 if ((regs & (1 << i)) != 0) {
1201 stack_offset -= kDoubleSize;
1202 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1203 }
1204 }
1205 }
1206
1207
MultiPopFPU(RegList regs)1208 void MacroAssembler::MultiPopFPU(RegList regs) {
1209 int16_t stack_offset = 0;
1210
1211 for (int16_t i = 0; i < kNumRegisters; i++) {
1212 if ((regs & (1 << i)) != 0) {
1213 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1214 stack_offset += kDoubleSize;
1215 }
1216 }
1217 addiu(sp, sp, stack_offset);
1218 }
1219
1220
MultiPopReversedFPU(RegList regs)1221 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1222 int16_t stack_offset = 0;
1223
1224 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1225 if ((regs & (1 << i)) != 0) {
1226 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1227 stack_offset += kDoubleSize;
1228 }
1229 }
1230 addiu(sp, sp, stack_offset);
1231 }
1232
1233
Ext(Register rt,Register rs,uint16_t pos,uint16_t size)1234 void MacroAssembler::Ext(Register rt,
1235 Register rs,
1236 uint16_t pos,
1237 uint16_t size) {
1238 DCHECK(pos < 32);
1239 DCHECK(pos + size < 33);
1240
1241 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1242 ext_(rt, rs, pos, size);
1243 } else {
1244 // Move rs to rt and shift it left then right to get the
1245 // desired bitfield on the right side and zeroes on the left.
1246 int shift_left = 32 - (pos + size);
1247 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
1248
1249 int shift_right = 32 - size;
1250 if (shift_right > 0) {
1251 srl(rt, rt, shift_right);
1252 }
1253 }
1254 }
1255
1256
Ins(Register rt,Register rs,uint16_t pos,uint16_t size)1257 void MacroAssembler::Ins(Register rt,
1258 Register rs,
1259 uint16_t pos,
1260 uint16_t size) {
1261 DCHECK(pos < 32);
1262 DCHECK(pos + size <= 32);
1263 DCHECK(size != 0);
1264
1265 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1266 ins_(rt, rs, pos, size);
1267 } else {
1268 DCHECK(!rt.is(t8) && !rs.is(t8));
1269 Subu(at, zero_reg, Operand(1));
1270 srl(at, at, 32 - size);
1271 and_(t8, rs, at);
1272 sll(t8, t8, pos);
1273 sll(at, at, pos);
1274 nor(at, at, zero_reg);
1275 and_(at, rt, at);
1276 or_(rt, t8, at);
1277 }
1278 }
1279
1280
Cvt_d_uw(FPURegister fd,Register rs,FPURegister scratch)1281 void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs,
1282 FPURegister scratch) {
1283 // In FP64Mode we do convertion from long.
1284 if (IsFp64Mode()) {
1285 mtc1(rs, scratch);
1286 Mthc1(zero_reg, scratch);
1287 cvt_d_l(fd, scratch);
1288 } else {
1289 // Convert rs to a FP value in fd.
1290 DCHECK(!fd.is(scratch));
1291 DCHECK(!rs.is(at));
1292
1293 Label msb_clear, conversion_done;
1294 // For a value which is < 2^31, regard it as a signed positve word.
1295 Branch(&msb_clear, ge, rs, Operand(zero_reg), USE_DELAY_SLOT);
1296 mtc1(rs, fd);
1297
1298 li(at, 0x41F00000); // FP value: 2^32.
1299
1300 // For unsigned inputs > 2^31, we convert to double as a signed int32,
1301 // then add 2^32 to move it back to unsigned value in range 2^31..2^31-1.
1302 mtc1(zero_reg, scratch);
1303 Mthc1(at, scratch);
1304
1305 cvt_d_w(fd, fd);
1306
1307 Branch(USE_DELAY_SLOT, &conversion_done);
1308 add_d(fd, fd, scratch);
1309
1310 bind(&msb_clear);
1311 cvt_d_w(fd, fd);
1312
1313 bind(&conversion_done);
1314 }
1315 }
1316
1317
Trunc_uw_d(FPURegister fd,FPURegister fs,FPURegister scratch)1318 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1319 FPURegister fs,
1320 FPURegister scratch) {
1321 Trunc_uw_d(fs, t8, scratch);
1322 mtc1(t8, fd);
1323 }
1324
1325
Trunc_w_d(FPURegister fd,FPURegister fs)1326 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1327 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1328 Mfhc1(t8, fs);
1329 trunc_w_d(fd, fs);
1330 Mthc1(t8, fs);
1331 } else {
1332 trunc_w_d(fd, fs);
1333 }
1334 }
1335
1336
Round_w_d(FPURegister fd,FPURegister fs)1337 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1338 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1339 Mfhc1(t8, fs);
1340 round_w_d(fd, fs);
1341 Mthc1(t8, fs);
1342 } else {
1343 round_w_d(fd, fs);
1344 }
1345 }
1346
1347
Floor_w_d(FPURegister fd,FPURegister fs)1348 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1349 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1350 Mfhc1(t8, fs);
1351 floor_w_d(fd, fs);
1352 Mthc1(t8, fs);
1353 } else {
1354 floor_w_d(fd, fs);
1355 }
1356 }
1357
1358
Ceil_w_d(FPURegister fd,FPURegister fs)1359 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1360 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1361 Mfhc1(t8, fs);
1362 ceil_w_d(fd, fs);
1363 Mthc1(t8, fs);
1364 } else {
1365 ceil_w_d(fd, fs);
1366 }
1367 }
1368
1369
Trunc_uw_d(FPURegister fd,Register rs,FPURegister scratch)1370 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1371 Register rs,
1372 FPURegister scratch) {
1373 DCHECK(!fd.is(scratch));
1374 DCHECK(!rs.is(at));
1375
1376 // Load 2^31 into scratch as its float representation.
1377 li(at, 0x41E00000);
1378 mtc1(zero_reg, scratch);
1379 Mthc1(at, scratch);
1380 // Test if scratch > fd.
1381 // If fd < 2^31 we can convert it normally.
1382 Label simple_convert;
1383 BranchF(&simple_convert, NULL, lt, fd, scratch);
1384
1385 // First we subtract 2^31 from fd, then trunc it to rs
1386 // and add 2^31 to rs.
1387 sub_d(scratch, fd, scratch);
1388 trunc_w_d(scratch, scratch);
1389 mfc1(rs, scratch);
1390 Or(rs, rs, 1 << 31);
1391
1392 Label done;
1393 Branch(&done);
1394 // Simple conversion.
1395 bind(&simple_convert);
1396 trunc_w_d(scratch, fd);
1397 mfc1(rs, scratch);
1398
1399 bind(&done);
1400 }
1401
1402
Mthc1(Register rt,FPURegister fs)1403 void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
1404 if (IsFp64Mode()) {
1405 mthc1(rt, fs);
1406 } else {
1407 mtc1(rt, fs.high());
1408 }
1409 }
1410
1411
Mfhc1(Register rt,FPURegister fs)1412 void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
1413 if (IsFp64Mode()) {
1414 mfhc1(rt, fs);
1415 } else {
1416 mfc1(rt, fs.high());
1417 }
1418 }
1419
1420
BranchFCommon(SecondaryField sizeField,Label * target,Label * nan,Condition cond,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)1421 void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
1422 Label* nan, Condition cond, FPURegister cmp1,
1423 FPURegister cmp2, BranchDelaySlot bd) {
1424 {
1425 BlockTrampolinePoolScope block_trampoline_pool(this);
1426 if (cond == al) {
1427 Branch(bd, target);
1428 return;
1429 }
1430
1431 if (IsMipsArchVariant(kMips32r6)) {
1432 sizeField = sizeField == D ? L : W;
1433 }
1434 DCHECK(nan || target);
1435 // Check for unordered (NaN) cases.
1436 if (nan) {
1437 bool long_branch =
1438 nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
1439 if (!IsMipsArchVariant(kMips32r6)) {
1440 if (long_branch) {
1441 Label skip;
1442 c(UN, sizeField, cmp1, cmp2);
1443 bc1f(&skip);
1444 nop();
1445 BranchLong(nan, bd);
1446 bind(&skip);
1447 } else {
1448 c(UN, sizeField, cmp1, cmp2);
1449 bc1t(nan);
1450 if (bd == PROTECT) {
1451 nop();
1452 }
1453 }
1454 } else {
1455 // Use kDoubleCompareReg for comparison result. It has to be unavailable
1456 // to lithium register allocator.
1457 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1458 if (long_branch) {
1459 Label skip;
1460 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
1461 bc1eqz(&skip, kDoubleCompareReg);
1462 nop();
1463 BranchLong(nan, bd);
1464 bind(&skip);
1465 } else {
1466 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
1467 bc1nez(nan, kDoubleCompareReg);
1468 if (bd == PROTECT) {
1469 nop();
1470 }
1471 }
1472 }
1473 }
1474
1475 if (target) {
1476 bool long_branch =
1477 target->is_bound() ? is_near(target) : is_trampoline_emitted();
1478 if (long_branch) {
1479 Label skip;
1480 Condition neg_cond = NegateFpuCondition(cond);
1481 BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
1482 BranchLong(target, bd);
1483 bind(&skip);
1484 } else {
1485 BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
1486 }
1487 }
1488 }
1489 }
1490
BranchShortF(SecondaryField sizeField,Label * target,Condition cc,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)1491 void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
1492 Condition cc, FPURegister cmp1,
1493 FPURegister cmp2, BranchDelaySlot bd) {
1494 if (!IsMipsArchVariant(kMips32r6)) {
1495 BlockTrampolinePoolScope block_trampoline_pool(this);
1496 if (target) {
1497 // Here NaN cases were either handled by this function or are assumed to
1498 // have been handled by the caller.
1499 switch (cc) {
1500 case lt:
1501 c(OLT, sizeField, cmp1, cmp2);
1502 bc1t(target);
1503 break;
1504 case ult:
1505 c(ULT, sizeField, cmp1, cmp2);
1506 bc1t(target);
1507 break;
1508 case gt:
1509 c(ULE, sizeField, cmp1, cmp2);
1510 bc1f(target);
1511 break;
1512 case ugt:
1513 c(OLE, sizeField, cmp1, cmp2);
1514 bc1f(target);
1515 break;
1516 case ge:
1517 c(ULT, sizeField, cmp1, cmp2);
1518 bc1f(target);
1519 break;
1520 case uge:
1521 c(OLT, sizeField, cmp1, cmp2);
1522 bc1f(target);
1523 break;
1524 case le:
1525 c(OLE, sizeField, cmp1, cmp2);
1526 bc1t(target);
1527 break;
1528 case ule:
1529 c(ULE, sizeField, cmp1, cmp2);
1530 bc1t(target);
1531 break;
1532 case eq:
1533 c(EQ, sizeField, cmp1, cmp2);
1534 bc1t(target);
1535 break;
1536 case ueq:
1537 c(UEQ, sizeField, cmp1, cmp2);
1538 bc1t(target);
1539 break;
1540 case ne: // Unordered or not equal.
1541 c(EQ, sizeField, cmp1, cmp2);
1542 bc1f(target);
1543 break;
1544 case ogl:
1545 c(UEQ, sizeField, cmp1, cmp2);
1546 bc1f(target);
1547 break;
1548 default:
1549 CHECK(0);
1550 }
1551 }
1552 } else {
1553 BlockTrampolinePoolScope block_trampoline_pool(this);
1554 if (target) {
1555 // Here NaN cases were either handled by this function or are assumed to
1556 // have been handled by the caller.
1557 // Unsigned conditions are treated as their signed counterpart.
1558 // Use kDoubleCompareReg for comparison result, it is
1559 // valid in fp64 (FR = 1) mode which is implied for mips32r6.
1560 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1561 switch (cc) {
1562 case lt:
1563 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1564 bc1nez(target, kDoubleCompareReg);
1565 break;
1566 case ult:
1567 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1568 bc1nez(target, kDoubleCompareReg);
1569 break;
1570 case gt:
1571 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1572 bc1eqz(target, kDoubleCompareReg);
1573 break;
1574 case ugt:
1575 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1576 bc1eqz(target, kDoubleCompareReg);
1577 break;
1578 case ge:
1579 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1580 bc1eqz(target, kDoubleCompareReg);
1581 break;
1582 case uge:
1583 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1584 bc1eqz(target, kDoubleCompareReg);
1585 break;
1586 case le:
1587 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1588 bc1nez(target, kDoubleCompareReg);
1589 break;
1590 case ule:
1591 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1592 bc1nez(target, kDoubleCompareReg);
1593 break;
1594 case eq:
1595 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1596 bc1nez(target, kDoubleCompareReg);
1597 break;
1598 case ueq:
1599 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1600 bc1nez(target, kDoubleCompareReg);
1601 break;
1602 case ne:
1603 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1604 bc1eqz(target, kDoubleCompareReg);
1605 break;
1606 case ogl:
1607 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1608 bc1eqz(target, kDoubleCompareReg);
1609 break;
1610 default:
1611 CHECK(0);
1612 }
1613 }
1614 }
1615 if (bd == PROTECT) {
1616 nop();
1617 }
1618 }
1619
1620
FmoveLow(FPURegister dst,Register src_low)1621 void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
1622 if (IsFp64Mode()) {
1623 DCHECK(!src_low.is(at));
1624 mfhc1(at, dst);
1625 mtc1(src_low, dst);
1626 mthc1(at, dst);
1627 } else {
1628 mtc1(src_low, dst);
1629 }
1630 }
1631
1632
Move(FPURegister dst,float imm)1633 void MacroAssembler::Move(FPURegister dst, float imm) {
1634 li(at, Operand(bit_cast<int32_t>(imm)));
1635 mtc1(at, dst);
1636 }
1637
1638
Move(FPURegister dst,double imm)1639 void MacroAssembler::Move(FPURegister dst, double imm) {
1640 static const DoubleRepresentation minus_zero(-0.0);
1641 static const DoubleRepresentation zero(0.0);
1642 DoubleRepresentation value_rep(imm);
1643 // Handle special values first.
1644 if (value_rep == zero && has_double_zero_reg_set_) {
1645 mov_d(dst, kDoubleRegZero);
1646 } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
1647 neg_d(dst, kDoubleRegZero);
1648 } else {
1649 uint32_t lo, hi;
1650 DoubleAsTwoUInt32(imm, &lo, &hi);
1651 // Move the low part of the double into the lower of the corresponding FPU
1652 // register of FPU register pair.
1653 if (lo != 0) {
1654 li(at, Operand(lo));
1655 mtc1(at, dst);
1656 } else {
1657 mtc1(zero_reg, dst);
1658 }
1659 // Move the high part of the double into the higher of the corresponding FPU
1660 // register of FPU register pair.
1661 if (hi != 0) {
1662 li(at, Operand(hi));
1663 Mthc1(at, dst);
1664 } else {
1665 Mthc1(zero_reg, dst);
1666 }
1667 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
1668 }
1669 }
1670
1671
Movz(Register rd,Register rs,Register rt)1672 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1673 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1674 Label done;
1675 Branch(&done, ne, rt, Operand(zero_reg));
1676 mov(rd, rs);
1677 bind(&done);
1678 } else {
1679 movz(rd, rs, rt);
1680 }
1681 }
1682
1683
Movn(Register rd,Register rs,Register rt)1684 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1685 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1686 Label done;
1687 Branch(&done, eq, rt, Operand(zero_reg));
1688 mov(rd, rs);
1689 bind(&done);
1690 } else {
1691 movn(rd, rs, rt);
1692 }
1693 }
1694
1695
Movt(Register rd,Register rs,uint16_t cc)1696 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1697 if (IsMipsArchVariant(kLoongson)) {
1698 // Tests an FP condition code and then conditionally move rs to rd.
1699 // We do not currently use any FPU cc bit other than bit 0.
1700 DCHECK(cc == 0);
1701 DCHECK(!(rs.is(t8) || rd.is(t8)));
1702 Label done;
1703 Register scratch = t8;
1704 // For testing purposes we need to fetch content of the FCSR register and
1705 // than test its cc (floating point condition code) bit (for cc = 0, it is
1706 // 24. bit of the FCSR).
1707 cfc1(scratch, FCSR);
1708 // For the MIPS I, II and III architectures, the contents of scratch is
1709 // UNPREDICTABLE for the instruction immediately following CFC1.
1710 nop();
1711 srl(scratch, scratch, 16);
1712 andi(scratch, scratch, 0x0080);
1713 Branch(&done, eq, scratch, Operand(zero_reg));
1714 mov(rd, rs);
1715 bind(&done);
1716 } else {
1717 movt(rd, rs, cc);
1718 }
1719 }
1720
1721
Movf(Register rd,Register rs,uint16_t cc)1722 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1723 if (IsMipsArchVariant(kLoongson)) {
1724 // Tests an FP condition code and then conditionally move rs to rd.
1725 // We do not currently use any FPU cc bit other than bit 0.
1726 DCHECK(cc == 0);
1727 DCHECK(!(rs.is(t8) || rd.is(t8)));
1728 Label done;
1729 Register scratch = t8;
1730 // For testing purposes we need to fetch content of the FCSR register and
1731 // than test its cc (floating point condition code) bit (for cc = 0, it is
1732 // 24. bit of the FCSR).
1733 cfc1(scratch, FCSR);
1734 // For the MIPS I, II and III architectures, the contents of scratch is
1735 // UNPREDICTABLE for the instruction immediately following CFC1.
1736 nop();
1737 srl(scratch, scratch, 16);
1738 andi(scratch, scratch, 0x0080);
1739 Branch(&done, ne, scratch, Operand(zero_reg));
1740 mov(rd, rs);
1741 bind(&done);
1742 } else {
1743 movf(rd, rs, cc);
1744 }
1745 }
1746
1747
Clz(Register rd,Register rs)1748 void MacroAssembler::Clz(Register rd, Register rs) {
1749 if (IsMipsArchVariant(kLoongson)) {
1750 DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1751 Register mask = t8;
1752 Register scratch = t9;
1753 Label loop, end;
1754 mov(at, rs);
1755 mov(rd, zero_reg);
1756 lui(mask, 0x8000);
1757 bind(&loop);
1758 and_(scratch, at, mask);
1759 Branch(&end, ne, scratch, Operand(zero_reg));
1760 addiu(rd, rd, 1);
1761 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1762 srl(mask, mask, 1);
1763 bind(&end);
1764 } else {
1765 clz(rd, rs);
1766 }
1767 }
1768
1769
EmitFPUTruncate(FPURoundingMode rounding_mode,Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch,Register except_flag,CheckForInexactConversion check_inexact)1770 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1771 Register result,
1772 DoubleRegister double_input,
1773 Register scratch,
1774 DoubleRegister double_scratch,
1775 Register except_flag,
1776 CheckForInexactConversion check_inexact) {
1777 DCHECK(!result.is(scratch));
1778 DCHECK(!double_input.is(double_scratch));
1779 DCHECK(!except_flag.is(scratch));
1780
1781 Label done;
1782
1783 // Clear the except flag (0 = no exception)
1784 mov(except_flag, zero_reg);
1785
1786 // Test for values that can be exactly represented as a signed 32-bit integer.
1787 cvt_w_d(double_scratch, double_input);
1788 mfc1(result, double_scratch);
1789 cvt_d_w(double_scratch, double_scratch);
1790 BranchF(&done, NULL, eq, double_input, double_scratch);
1791
1792 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1793
1794 if (check_inexact == kDontCheckForInexactConversion) {
1795 // Ignore inexact exceptions.
1796 except_mask &= ~kFCSRInexactFlagMask;
1797 }
1798
1799 // Save FCSR.
1800 cfc1(scratch, FCSR);
1801 // Disable FPU exceptions.
1802 ctc1(zero_reg, FCSR);
1803
1804 // Do operation based on rounding mode.
1805 switch (rounding_mode) {
1806 case kRoundToNearest:
1807 Round_w_d(double_scratch, double_input);
1808 break;
1809 case kRoundToZero:
1810 Trunc_w_d(double_scratch, double_input);
1811 break;
1812 case kRoundToPlusInf:
1813 Ceil_w_d(double_scratch, double_input);
1814 break;
1815 case kRoundToMinusInf:
1816 Floor_w_d(double_scratch, double_input);
1817 break;
1818 } // End of switch-statement.
1819
1820 // Retrieve FCSR.
1821 cfc1(except_flag, FCSR);
1822 // Restore FCSR.
1823 ctc1(scratch, FCSR);
1824 // Move the converted value into the result register.
1825 mfc1(result, double_scratch);
1826
1827 // Check for fpu exceptions.
1828 And(except_flag, except_flag, Operand(except_mask));
1829
1830 bind(&done);
1831 }
1832
1833
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)1834 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1835 DoubleRegister double_input,
1836 Label* done) {
1837 DoubleRegister single_scratch = kLithiumScratchDouble.low();
1838 Register scratch = at;
1839 Register scratch2 = t9;
1840
1841 // Clear cumulative exception flags and save the FCSR.
1842 cfc1(scratch2, FCSR);
1843 ctc1(zero_reg, FCSR);
1844 // Try a conversion to a signed integer.
1845 trunc_w_d(single_scratch, double_input);
1846 mfc1(result, single_scratch);
1847 // Retrieve and restore the FCSR.
1848 cfc1(scratch, FCSR);
1849 ctc1(scratch2, FCSR);
1850 // Check for overflow and NaNs.
1851 And(scratch,
1852 scratch,
1853 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1854 // If we had no exceptions we are done.
1855 Branch(done, eq, scratch, Operand(zero_reg));
1856 }
1857
1858
TruncateDoubleToI(Register result,DoubleRegister double_input)1859 void MacroAssembler::TruncateDoubleToI(Register result,
1860 DoubleRegister double_input) {
1861 Label done;
1862
1863 TryInlineTruncateDoubleToI(result, double_input, &done);
1864
1865 // If we fell through then inline version didn't succeed - call stub instead.
1866 push(ra);
1867 Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1868 sdc1(double_input, MemOperand(sp, 0));
1869
1870 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
1871 CallStub(&stub);
1872
1873 Addu(sp, sp, Operand(kDoubleSize));
1874 pop(ra);
1875
1876 bind(&done);
1877 }
1878
1879
TruncateHeapNumberToI(Register result,Register object)1880 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1881 Label done;
1882 DoubleRegister double_scratch = f12;
1883 DCHECK(!result.is(object));
1884
1885 ldc1(double_scratch,
1886 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1887 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1888
1889 // If we fell through then inline version didn't succeed - call stub instead.
1890 push(ra);
1891 DoubleToIStub stub(isolate(),
1892 object,
1893 result,
1894 HeapNumber::kValueOffset - kHeapObjectTag,
1895 true,
1896 true);
1897 CallStub(&stub);
1898 pop(ra);
1899
1900 bind(&done);
1901 }
1902
1903
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch,Label * not_number)1904 void MacroAssembler::TruncateNumberToI(Register object,
1905 Register result,
1906 Register heap_number_map,
1907 Register scratch,
1908 Label* not_number) {
1909 Label done;
1910 DCHECK(!result.is(object));
1911
1912 UntagAndJumpIfSmi(result, object, &done);
1913 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1914 TruncateHeapNumberToI(result, object);
1915
1916 bind(&done);
1917 }
1918
1919
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)1920 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1921 Register src,
1922 int num_least_bits) {
1923 Ext(dst, src, kSmiTagSize, num_least_bits);
1924 }
1925
1926
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)1927 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1928 Register src,
1929 int num_least_bits) {
1930 And(dst, src, Operand((1 << num_least_bits) - 1));
1931 }
1932
1933
1934 // Emulated condtional branches do not emit a nop in the branch delay slot.
1935 //
1936 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1937 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
1938 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1939 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1940
1941
Branch(int32_t offset,BranchDelaySlot bdslot)1942 void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
1943 DCHECK(IsMipsArchVariant(kMips32r6) ? is_int26(offset) : is_int16(offset));
1944 BranchShort(offset, bdslot);
1945 }
1946
1947
Branch(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1948 void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
1949 const Operand& rt, BranchDelaySlot bdslot) {
1950 bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
1951 DCHECK(is_near);
1952 USE(is_near);
1953 }
1954
1955
Branch(Label * L,BranchDelaySlot bdslot)1956 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1957 if (L->is_bound()) {
1958 if (is_near_branch(L)) {
1959 BranchShort(L, bdslot);
1960 } else {
1961 BranchLong(L, bdslot);
1962 }
1963 } else {
1964 if (is_trampoline_emitted()) {
1965 BranchLong(L, bdslot);
1966 } else {
1967 BranchShort(L, bdslot);
1968 }
1969 }
1970 }
1971
1972
Branch(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1973 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1974 const Operand& rt,
1975 BranchDelaySlot bdslot) {
1976 if (L->is_bound()) {
1977 if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
1978 if (cond != cc_always) {
1979 Label skip;
1980 Condition neg_cond = NegateCondition(cond);
1981 BranchShort(&skip, neg_cond, rs, rt);
1982 BranchLong(L, bdslot);
1983 bind(&skip);
1984 } else {
1985 BranchLong(L, bdslot);
1986 }
1987 }
1988 } else {
1989 if (is_trampoline_emitted()) {
1990 if (cond != cc_always) {
1991 Label skip;
1992 Condition neg_cond = NegateCondition(cond);
1993 BranchShort(&skip, neg_cond, rs, rt);
1994 BranchLong(L, bdslot);
1995 bind(&skip);
1996 } else {
1997 BranchLong(L, bdslot);
1998 }
1999 } else {
2000 BranchShort(L, cond, rs, rt, bdslot);
2001 }
2002 }
2003 }
2004
2005
Branch(Label * L,Condition cond,Register rs,Heap::RootListIndex index,BranchDelaySlot bdslot)2006 void MacroAssembler::Branch(Label* L,
2007 Condition cond,
2008 Register rs,
2009 Heap::RootListIndex index,
2010 BranchDelaySlot bdslot) {
2011 LoadRoot(at, index);
2012 Branch(L, cond, rs, Operand(at), bdslot);
2013 }
2014
2015
BranchShortHelper(int16_t offset,Label * L,BranchDelaySlot bdslot)2016 void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
2017 BranchDelaySlot bdslot) {
2018 DCHECK(L == nullptr || offset == 0);
2019 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2020 b(offset);
2021
2022 // Emit a nop in the branch delay slot if required.
2023 if (bdslot == PROTECT)
2024 nop();
2025 }
2026
2027
BranchShortHelperR6(int32_t offset,Label * L)2028 void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
2029 DCHECK(L == nullptr || offset == 0);
2030 offset = GetOffset(offset, L, OffsetSize::kOffset26);
2031 bc(offset);
2032 }
2033
2034
BranchShort(int32_t offset,BranchDelaySlot bdslot)2035 void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
2036 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2037 DCHECK(is_int26(offset));
2038 BranchShortHelperR6(offset, nullptr);
2039 } else {
2040 DCHECK(is_int16(offset));
2041 BranchShortHelper(offset, nullptr, bdslot);
2042 }
2043 }
2044
2045
BranchShort(Label * L,BranchDelaySlot bdslot)2046 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2047 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2048 BranchShortHelperR6(0, L);
2049 } else {
2050 BranchShortHelper(0, L, bdslot);
2051 }
2052 }
2053
2054
IsZero(const Operand & rt)2055 static inline bool IsZero(const Operand& rt) {
2056 if (rt.is_reg()) {
2057 return rt.rm().is(zero_reg);
2058 } else {
2059 return rt.immediate() == 0;
2060 }
2061 }
2062
2063
GetOffset(int32_t offset,Label * L,OffsetSize bits)2064 int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
2065 if (L) {
2066 offset = branch_offset_helper(L, bits) >> 2;
2067 } else {
2068 DCHECK(is_intn(offset, bits));
2069 }
2070 return offset;
2071 }
2072
2073
GetRtAsRegisterHelper(const Operand & rt,Register scratch)2074 Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
2075 Register scratch) {
2076 Register r2 = no_reg;
2077 if (rt.is_reg()) {
2078 r2 = rt.rm_;
2079 } else {
2080 r2 = scratch;
2081 li(r2, rt);
2082 }
2083
2084 return r2;
2085 }
2086
2087
BranchShortHelperR6(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt)2088 bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
2089 Condition cond, Register rs,
2090 const Operand& rt) {
2091 DCHECK(L == nullptr || offset == 0);
2092 Register scratch = rs.is(at) ? t8 : at;
2093 OffsetSize bits = OffsetSize::kOffset16;
2094
2095 // Be careful to always use shifted_branch_offset only just before the
2096 // branch instruction, as the location will be remember for patching the
2097 // target.
2098 {
2099 BlockTrampolinePoolScope block_trampoline_pool(this);
2100 switch (cond) {
2101 case cc_always:
2102 bits = OffsetSize::kOffset26;
2103 if (!is_near(L, bits)) return false;
2104 offset = GetOffset(offset, L, bits);
2105 bc(offset);
2106 break;
2107 case eq:
2108 if (rs.code() == rt.rm_.reg_code) {
2109 // Pre R6 beq is used here to make the code patchable. Otherwise bc
2110 // should be used which has no condition field so is not patchable.
2111 bits = OffsetSize::kOffset16;
2112 if (!is_near(L, bits)) return false;
2113 scratch = GetRtAsRegisterHelper(rt, scratch);
2114 offset = GetOffset(offset, L, bits);
2115 beq(rs, scratch, offset);
2116 nop();
2117 } else if (IsZero(rt)) {
2118 bits = OffsetSize::kOffset21;
2119 if (!is_near(L, bits)) return false;
2120 offset = GetOffset(offset, L, bits);
2121 beqzc(rs, offset);
2122 } else {
2123 // We don't want any other register but scratch clobbered.
2124 bits = OffsetSize::kOffset16;
2125 if (!is_near(L, bits)) return false;
2126 scratch = GetRtAsRegisterHelper(rt, scratch);
2127 offset = GetOffset(offset, L, bits);
2128 beqc(rs, scratch, offset);
2129 }
2130 break;
2131 case ne:
2132 if (rs.code() == rt.rm_.reg_code) {
2133 // Pre R6 bne is used here to make the code patchable. Otherwise we
2134 // should not generate any instruction.
2135 bits = OffsetSize::kOffset16;
2136 if (!is_near(L, bits)) return false;
2137 scratch = GetRtAsRegisterHelper(rt, scratch);
2138 offset = GetOffset(offset, L, bits);
2139 bne(rs, scratch, offset);
2140 nop();
2141 } else if (IsZero(rt)) {
2142 bits = OffsetSize::kOffset21;
2143 if (!is_near(L, bits)) return false;
2144 offset = GetOffset(offset, L, bits);
2145 bnezc(rs, offset);
2146 } else {
2147 // We don't want any other register but scratch clobbered.
2148 bits = OffsetSize::kOffset16;
2149 if (!is_near(L, bits)) return false;
2150 scratch = GetRtAsRegisterHelper(rt, scratch);
2151 offset = GetOffset(offset, L, bits);
2152 bnec(rs, scratch, offset);
2153 }
2154 break;
2155
2156 // Signed comparison.
2157 case greater:
2158 // rs > rt
2159 if (rs.code() == rt.rm_.reg_code) {
2160 break; // No code needs to be emitted.
2161 } else if (rs.is(zero_reg)) {
2162 bits = OffsetSize::kOffset16;
2163 if (!is_near(L, bits)) return false;
2164 scratch = GetRtAsRegisterHelper(rt, scratch);
2165 offset = GetOffset(offset, L, bits);
2166 bltzc(scratch, offset);
2167 } else if (IsZero(rt)) {
2168 bits = OffsetSize::kOffset16;
2169 if (!is_near(L, bits)) return false;
2170 offset = GetOffset(offset, L, bits);
2171 bgtzc(rs, offset);
2172 } else {
2173 bits = OffsetSize::kOffset16;
2174 if (!is_near(L, bits)) return false;
2175 scratch = GetRtAsRegisterHelper(rt, scratch);
2176 DCHECK(!rs.is(scratch));
2177 offset = GetOffset(offset, L, bits);
2178 bltc(scratch, rs, offset);
2179 }
2180 break;
2181 case greater_equal:
2182 // rs >= rt
2183 if (rs.code() == rt.rm_.reg_code) {
2184 bits = OffsetSize::kOffset26;
2185 if (!is_near(L, bits)) return false;
2186 offset = GetOffset(offset, L, bits);
2187 bc(offset);
2188 } else if (rs.is(zero_reg)) {
2189 bits = OffsetSize::kOffset16;
2190 if (!is_near(L, bits)) return false;
2191 scratch = GetRtAsRegisterHelper(rt, scratch);
2192 offset = GetOffset(offset, L, bits);
2193 blezc(scratch, offset);
2194 } else if (IsZero(rt)) {
2195 bits = OffsetSize::kOffset16;
2196 if (!is_near(L, bits)) return false;
2197 offset = GetOffset(offset, L, bits);
2198 bgezc(rs, offset);
2199 } else {
2200 bits = OffsetSize::kOffset16;
2201 if (!is_near(L, bits)) return false;
2202 scratch = GetRtAsRegisterHelper(rt, scratch);
2203 DCHECK(!rs.is(scratch));
2204 offset = GetOffset(offset, L, bits);
2205 bgec(rs, scratch, offset);
2206 }
2207 break;
2208 case less:
2209 // rs < rt
2210 if (rs.code() == rt.rm_.reg_code) {
2211 break; // No code needs to be emitted.
2212 } else if (rs.is(zero_reg)) {
2213 bits = OffsetSize::kOffset16;
2214 if (!is_near(L, bits)) return false;
2215 scratch = GetRtAsRegisterHelper(rt, scratch);
2216 offset = GetOffset(offset, L, bits);
2217 bgtzc(scratch, offset);
2218 } else if (IsZero(rt)) {
2219 bits = OffsetSize::kOffset16;
2220 if (!is_near(L, bits)) return false;
2221 offset = GetOffset(offset, L, bits);
2222 bltzc(rs, offset);
2223 } else {
2224 bits = OffsetSize::kOffset16;
2225 if (!is_near(L, bits)) return false;
2226 scratch = GetRtAsRegisterHelper(rt, scratch);
2227 DCHECK(!rs.is(scratch));
2228 offset = GetOffset(offset, L, bits);
2229 bltc(rs, scratch, offset);
2230 }
2231 break;
2232 case less_equal:
2233 // rs <= rt
2234 if (rs.code() == rt.rm_.reg_code) {
2235 bits = OffsetSize::kOffset26;
2236 if (!is_near(L, bits)) return false;
2237 offset = GetOffset(offset, L, bits);
2238 bc(offset);
2239 } else if (rs.is(zero_reg)) {
2240 bits = OffsetSize::kOffset16;
2241 if (!is_near(L, bits)) return false;
2242 scratch = GetRtAsRegisterHelper(rt, scratch);
2243 offset = GetOffset(offset, L, bits);
2244 bgezc(scratch, offset);
2245 } else if (IsZero(rt)) {
2246 bits = OffsetSize::kOffset16;
2247 if (!is_near(L, bits)) return false;
2248 offset = GetOffset(offset, L, bits);
2249 blezc(rs, offset);
2250 } else {
2251 bits = OffsetSize::kOffset16;
2252 if (!is_near(L, bits)) return false;
2253 scratch = GetRtAsRegisterHelper(rt, scratch);
2254 DCHECK(!rs.is(scratch));
2255 offset = GetOffset(offset, L, bits);
2256 bgec(scratch, rs, offset);
2257 }
2258 break;
2259
2260 // Unsigned comparison.
2261 case Ugreater:
2262 // rs > rt
2263 if (rs.code() == rt.rm_.reg_code) {
2264 break; // No code needs to be emitted.
2265 } else if (rs.is(zero_reg)) {
2266 bits = OffsetSize::kOffset21;
2267 if (!is_near(L, bits)) return false;
2268 scratch = GetRtAsRegisterHelper(rt, scratch);
2269 offset = GetOffset(offset, L, bits);
2270 bnezc(scratch, offset);
2271 } else if (IsZero(rt)) {
2272 bits = OffsetSize::kOffset21;
2273 if (!is_near(L, bits)) return false;
2274 offset = GetOffset(offset, L, bits);
2275 bnezc(rs, offset);
2276 } else {
2277 bits = OffsetSize::kOffset16;
2278 if (!is_near(L, bits)) return false;
2279 scratch = GetRtAsRegisterHelper(rt, scratch);
2280 DCHECK(!rs.is(scratch));
2281 offset = GetOffset(offset, L, bits);
2282 bltuc(scratch, rs, offset);
2283 }
2284 break;
2285 case Ugreater_equal:
2286 // rs >= rt
2287 if (rs.code() == rt.rm_.reg_code) {
2288 bits = OffsetSize::kOffset26;
2289 if (!is_near(L, bits)) return false;
2290 offset = GetOffset(offset, L, bits);
2291 bc(offset);
2292 } else if (rs.is(zero_reg)) {
2293 bits = OffsetSize::kOffset21;
2294 if (!is_near(L, bits)) return false;
2295 scratch = GetRtAsRegisterHelper(rt, scratch);
2296 offset = GetOffset(offset, L, bits);
2297 beqzc(scratch, offset);
2298 } else if (IsZero(rt)) {
2299 bits = OffsetSize::kOffset26;
2300 if (!is_near(L, bits)) return false;
2301 offset = GetOffset(offset, L, bits);
2302 bc(offset);
2303 } else {
2304 bits = OffsetSize::kOffset16;
2305 if (!is_near(L, bits)) return false;
2306 scratch = GetRtAsRegisterHelper(rt, scratch);
2307 DCHECK(!rs.is(scratch));
2308 offset = GetOffset(offset, L, bits);
2309 bgeuc(rs, scratch, offset);
2310 }
2311 break;
2312 case Uless:
2313 // rs < rt
2314 if (rs.code() == rt.rm_.reg_code) {
2315 break; // No code needs to be emitted.
2316 } else if (rs.is(zero_reg)) {
2317 bits = OffsetSize::kOffset21;
2318 if (!is_near(L, bits)) return false;
2319 scratch = GetRtAsRegisterHelper(rt, scratch);
2320 offset = GetOffset(offset, L, bits);
2321 bnezc(scratch, offset);
2322 } else if (IsZero(rt)) {
2323 break; // No code needs to be emitted.
2324 } else {
2325 bits = OffsetSize::kOffset16;
2326 if (!is_near(L, bits)) return false;
2327 scratch = GetRtAsRegisterHelper(rt, scratch);
2328 DCHECK(!rs.is(scratch));
2329 offset = GetOffset(offset, L, bits);
2330 bltuc(rs, scratch, offset);
2331 }
2332 break;
2333 case Uless_equal:
2334 // rs <= rt
2335 if (rs.code() == rt.rm_.reg_code) {
2336 bits = OffsetSize::kOffset26;
2337 if (!is_near(L, bits)) return false;
2338 offset = GetOffset(offset, L, bits);
2339 bc(offset);
2340 } else if (rs.is(zero_reg)) {
2341 bits = OffsetSize::kOffset26;
2342 if (!is_near(L, bits)) return false;
2343 scratch = GetRtAsRegisterHelper(rt, scratch);
2344 offset = GetOffset(offset, L, bits);
2345 bc(offset);
2346 } else if (IsZero(rt)) {
2347 bits = OffsetSize::kOffset21;
2348 if (!is_near(L, bits)) return false;
2349 offset = GetOffset(offset, L, bits);
2350 beqzc(rs, offset);
2351 } else {
2352 bits = OffsetSize::kOffset16;
2353 if (!is_near(L, bits)) return false;
2354 scratch = GetRtAsRegisterHelper(rt, scratch);
2355 DCHECK(!rs.is(scratch));
2356 offset = GetOffset(offset, L, bits);
2357 bgeuc(scratch, rs, offset);
2358 }
2359 break;
2360 default:
2361 UNREACHABLE();
2362 }
2363 }
2364 CheckTrampolinePoolQuick(1);
2365 return true;
2366 }
2367
2368
BranchShortHelper(int16_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2369 bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
2370 Register rs, const Operand& rt,
2371 BranchDelaySlot bdslot) {
2372 DCHECK(L == nullptr || offset == 0);
2373 if (!is_near(L, OffsetSize::kOffset16)) return false;
2374
2375 Register scratch = at;
2376 int32_t offset32;
2377
2378 // Be careful to always use shifted_branch_offset only just before the
2379 // branch instruction, as the location will be remember for patching the
2380 // target.
2381 {
2382 BlockTrampolinePoolScope block_trampoline_pool(this);
2383 switch (cond) {
2384 case cc_always:
2385 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2386 b(offset32);
2387 break;
2388 case eq:
2389 if (IsZero(rt)) {
2390 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2391 beq(rs, zero_reg, offset32);
2392 } else {
2393 // We don't want any other register but scratch clobbered.
2394 scratch = GetRtAsRegisterHelper(rt, scratch);
2395 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2396 beq(rs, scratch, offset32);
2397 }
2398 break;
2399 case ne:
2400 if (IsZero(rt)) {
2401 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2402 bne(rs, zero_reg, offset32);
2403 } else {
2404 // We don't want any other register but scratch clobbered.
2405 scratch = GetRtAsRegisterHelper(rt, scratch);
2406 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2407 bne(rs, scratch, offset32);
2408 }
2409 break;
2410
2411 // Signed comparison.
2412 case greater:
2413 if (IsZero(rt)) {
2414 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2415 bgtz(rs, offset32);
2416 } else {
2417 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2418 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2419 bne(scratch, zero_reg, offset32);
2420 }
2421 break;
2422 case greater_equal:
2423 if (IsZero(rt)) {
2424 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2425 bgez(rs, offset32);
2426 } else {
2427 Slt(scratch, rs, rt);
2428 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2429 beq(scratch, zero_reg, offset32);
2430 }
2431 break;
2432 case less:
2433 if (IsZero(rt)) {
2434 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2435 bltz(rs, offset32);
2436 } else {
2437 Slt(scratch, rs, rt);
2438 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2439 bne(scratch, zero_reg, offset32);
2440 }
2441 break;
2442 case less_equal:
2443 if (IsZero(rt)) {
2444 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2445 blez(rs, offset32);
2446 } else {
2447 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2448 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2449 beq(scratch, zero_reg, offset32);
2450 }
2451 break;
2452
2453 // Unsigned comparison.
2454 case Ugreater:
2455 if (IsZero(rt)) {
2456 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2457 bne(rs, zero_reg, offset32);
2458 } else {
2459 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2460 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2461 bne(scratch, zero_reg, offset32);
2462 }
2463 break;
2464 case Ugreater_equal:
2465 if (IsZero(rt)) {
2466 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2467 b(offset32);
2468 } else {
2469 Sltu(scratch, rs, rt);
2470 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2471 beq(scratch, zero_reg, offset32);
2472 }
2473 break;
2474 case Uless:
2475 if (IsZero(rt)) {
2476 return true; // No code needs to be emitted.
2477 } else {
2478 Sltu(scratch, rs, rt);
2479 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2480 bne(scratch, zero_reg, offset32);
2481 }
2482 break;
2483 case Uless_equal:
2484 if (IsZero(rt)) {
2485 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2486 beq(rs, zero_reg, offset32);
2487 } else {
2488 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2489 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2490 beq(scratch, zero_reg, offset32);
2491 }
2492 break;
2493 default:
2494 UNREACHABLE();
2495 }
2496 }
2497 // Emit a nop in the branch delay slot if required.
2498 if (bdslot == PROTECT)
2499 nop();
2500
2501 return true;
2502 }
2503
2504
BranchShortCheck(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2505 bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
2506 Register rs, const Operand& rt,
2507 BranchDelaySlot bdslot) {
2508 BRANCH_ARGS_CHECK(cond, rs, rt);
2509
2510 if (!L) {
2511 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2512 DCHECK(is_int26(offset));
2513 return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
2514 } else {
2515 DCHECK(is_int16(offset));
2516 return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
2517 }
2518 } else {
2519 DCHECK(offset == 0);
2520 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2521 return BranchShortHelperR6(0, L, cond, rs, rt);
2522 } else {
2523 return BranchShortHelper(0, L, cond, rs, rt, bdslot);
2524 }
2525 }
2526 return false;
2527 }
2528
2529
BranchShort(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2530 void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
2531 const Operand& rt, BranchDelaySlot bdslot) {
2532 BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2533 }
2534
2535
BranchShort(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2536 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
2537 const Operand& rt, BranchDelaySlot bdslot) {
2538 BranchShortCheck(0, L, cond, rs, rt, bdslot);
2539 }
2540
2541
BranchAndLink(int32_t offset,BranchDelaySlot bdslot)2542 void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
2543 BranchAndLinkShort(offset, bdslot);
2544 }
2545
2546
BranchAndLink(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2547 void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
2548 const Operand& rt, BranchDelaySlot bdslot) {
2549 bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2550 DCHECK(is_near);
2551 USE(is_near);
2552 }
2553
2554
BranchAndLink(Label * L,BranchDelaySlot bdslot)2555 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2556 if (L->is_bound()) {
2557 if (is_near_branch(L)) {
2558 BranchAndLinkShort(L, bdslot);
2559 } else {
2560 BranchAndLinkLong(L, bdslot);
2561 }
2562 } else {
2563 if (is_trampoline_emitted()) {
2564 BranchAndLinkLong(L, bdslot);
2565 } else {
2566 BranchAndLinkShort(L, bdslot);
2567 }
2568 }
2569 }
2570
2571
BranchAndLink(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2572 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2573 const Operand& rt,
2574 BranchDelaySlot bdslot) {
2575 if (L->is_bound()) {
2576 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
2577 Label skip;
2578 Condition neg_cond = NegateCondition(cond);
2579 BranchShort(&skip, neg_cond, rs, rt);
2580 BranchAndLinkLong(L, bdslot);
2581 bind(&skip);
2582 }
2583 } else {
2584 if (is_trampoline_emitted()) {
2585 Label skip;
2586 Condition neg_cond = NegateCondition(cond);
2587 BranchShort(&skip, neg_cond, rs, rt);
2588 BranchAndLinkLong(L, bdslot);
2589 bind(&skip);
2590 } else {
2591 BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
2592 }
2593 }
2594 }
2595
2596
BranchAndLinkShortHelper(int16_t offset,Label * L,BranchDelaySlot bdslot)2597 void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
2598 BranchDelaySlot bdslot) {
2599 DCHECK(L == nullptr || offset == 0);
2600 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2601 bal(offset);
2602
2603 // Emit a nop in the branch delay slot if required.
2604 if (bdslot == PROTECT)
2605 nop();
2606 }
2607
2608
BranchAndLinkShortHelperR6(int32_t offset,Label * L)2609 void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
2610 DCHECK(L == nullptr || offset == 0);
2611 offset = GetOffset(offset, L, OffsetSize::kOffset26);
2612 balc(offset);
2613 }
2614
2615
BranchAndLinkShort(int32_t offset,BranchDelaySlot bdslot)2616 void MacroAssembler::BranchAndLinkShort(int32_t offset,
2617 BranchDelaySlot bdslot) {
2618 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2619 DCHECK(is_int26(offset));
2620 BranchAndLinkShortHelperR6(offset, nullptr);
2621 } else {
2622 DCHECK(is_int16(offset));
2623 BranchAndLinkShortHelper(offset, nullptr, bdslot);
2624 }
2625 }
2626
2627
BranchAndLinkShort(Label * L,BranchDelaySlot bdslot)2628 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2629 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2630 BranchAndLinkShortHelperR6(0, L);
2631 } else {
2632 BranchAndLinkShortHelper(0, L, bdslot);
2633 }
2634 }
2635
2636
BranchAndLinkShortHelperR6(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt)2637 bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
2638 Condition cond, Register rs,
2639 const Operand& rt) {
2640 DCHECK(L == nullptr || offset == 0);
2641 Register scratch = rs.is(at) ? t8 : at;
2642 OffsetSize bits = OffsetSize::kOffset16;
2643
2644 BlockTrampolinePoolScope block_trampoline_pool(this);
2645 DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
2646 switch (cond) {
2647 case cc_always:
2648 bits = OffsetSize::kOffset26;
2649 if (!is_near(L, bits)) return false;
2650 offset = GetOffset(offset, L, bits);
2651 balc(offset);
2652 break;
2653 case eq:
2654 if (!is_near(L, bits)) return false;
2655 Subu(scratch, rs, rt);
2656 offset = GetOffset(offset, L, bits);
2657 beqzalc(scratch, offset);
2658 break;
2659 case ne:
2660 if (!is_near(L, bits)) return false;
2661 Subu(scratch, rs, rt);
2662 offset = GetOffset(offset, L, bits);
2663 bnezalc(scratch, offset);
2664 break;
2665
2666 // Signed comparison.
2667 case greater:
2668 // rs > rt
2669 if (rs.code() == rt.rm_.reg_code) {
2670 break; // No code needs to be emitted.
2671 } else if (rs.is(zero_reg)) {
2672 if (!is_near(L, bits)) return false;
2673 scratch = GetRtAsRegisterHelper(rt, scratch);
2674 offset = GetOffset(offset, L, bits);
2675 bltzalc(scratch, offset);
2676 } else if (IsZero(rt)) {
2677 if (!is_near(L, bits)) return false;
2678 offset = GetOffset(offset, L, bits);
2679 bgtzalc(rs, offset);
2680 } else {
2681 if (!is_near(L, bits)) return false;
2682 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2683 offset = GetOffset(offset, L, bits);
2684 bnezalc(scratch, offset);
2685 }
2686 break;
2687 case greater_equal:
2688 // rs >= rt
2689 if (rs.code() == rt.rm_.reg_code) {
2690 bits = OffsetSize::kOffset26;
2691 if (!is_near(L, bits)) return false;
2692 offset = GetOffset(offset, L, bits);
2693 balc(offset);
2694 } else if (rs.is(zero_reg)) {
2695 if (!is_near(L, bits)) return false;
2696 scratch = GetRtAsRegisterHelper(rt, scratch);
2697 offset = GetOffset(offset, L, bits);
2698 blezalc(scratch, offset);
2699 } else if (IsZero(rt)) {
2700 if (!is_near(L, bits)) return false;
2701 offset = GetOffset(offset, L, bits);
2702 bgezalc(rs, offset);
2703 } else {
2704 if (!is_near(L, bits)) return false;
2705 Slt(scratch, rs, rt);
2706 offset = GetOffset(offset, L, bits);
2707 beqzalc(scratch, offset);
2708 }
2709 break;
2710 case less:
2711 // rs < rt
2712 if (rs.code() == rt.rm_.reg_code) {
2713 break; // No code needs to be emitted.
2714 } else if (rs.is(zero_reg)) {
2715 if (!is_near(L, bits)) return false;
2716 scratch = GetRtAsRegisterHelper(rt, scratch);
2717 offset = GetOffset(offset, L, bits);
2718 bgtzalc(scratch, offset);
2719 } else if (IsZero(rt)) {
2720 if (!is_near(L, bits)) return false;
2721 offset = GetOffset(offset, L, bits);
2722 bltzalc(rs, offset);
2723 } else {
2724 if (!is_near(L, bits)) return false;
2725 Slt(scratch, rs, rt);
2726 offset = GetOffset(offset, L, bits);
2727 bnezalc(scratch, offset);
2728 }
2729 break;
2730 case less_equal:
2731 // rs <= r2
2732 if (rs.code() == rt.rm_.reg_code) {
2733 bits = OffsetSize::kOffset26;
2734 if (!is_near(L, bits)) return false;
2735 offset = GetOffset(offset, L, bits);
2736 balc(offset);
2737 } else if (rs.is(zero_reg)) {
2738 if (!is_near(L, bits)) return false;
2739 scratch = GetRtAsRegisterHelper(rt, scratch);
2740 offset = GetOffset(offset, L, bits);
2741 bgezalc(scratch, offset);
2742 } else if (IsZero(rt)) {
2743 if (!is_near(L, bits)) return false;
2744 offset = GetOffset(offset, L, bits);
2745 blezalc(rs, offset);
2746 } else {
2747 if (!is_near(L, bits)) return false;
2748 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2749 offset = GetOffset(offset, L, bits);
2750 beqzalc(scratch, offset);
2751 }
2752 break;
2753
2754
2755 // Unsigned comparison.
2756 case Ugreater:
2757 // rs > r2
2758 if (!is_near(L, bits)) return false;
2759 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2760 offset = GetOffset(offset, L, bits);
2761 bnezalc(scratch, offset);
2762 break;
2763 case Ugreater_equal:
2764 // rs >= r2
2765 if (!is_near(L, bits)) return false;
2766 Sltu(scratch, rs, rt);
2767 offset = GetOffset(offset, L, bits);
2768 beqzalc(scratch, offset);
2769 break;
2770 case Uless:
2771 // rs < r2
2772 if (!is_near(L, bits)) return false;
2773 Sltu(scratch, rs, rt);
2774 offset = GetOffset(offset, L, bits);
2775 bnezalc(scratch, offset);
2776 break;
2777 case Uless_equal:
2778 // rs <= r2
2779 if (!is_near(L, bits)) return false;
2780 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2781 offset = GetOffset(offset, L, bits);
2782 beqzalc(scratch, offset);
2783 break;
2784 default:
2785 UNREACHABLE();
2786 }
2787 return true;
2788 }
2789
2790
2791 // Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
2792 // with the slt instructions. We could use sub or add instead but we would miss
2793 // overflow cases, so we keep slt and add an intermediate third instruction.
BranchAndLinkShortHelper(int16_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2794 bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
2795 Condition cond, Register rs,
2796 const Operand& rt,
2797 BranchDelaySlot bdslot) {
2798 DCHECK(L == nullptr || offset == 0);
2799 if (!is_near(L, OffsetSize::kOffset16)) return false;
2800
2801 Register scratch = t8;
2802 BlockTrampolinePoolScope block_trampoline_pool(this);
2803
2804 switch (cond) {
2805 case cc_always:
2806 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2807 bal(offset);
2808 break;
2809 case eq:
2810 bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
2811 nop();
2812 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2813 bal(offset);
2814 break;
2815 case ne:
2816 beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
2817 nop();
2818 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2819 bal(offset);
2820 break;
2821
2822 // Signed comparison.
2823 case greater:
2824 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2825 addiu(scratch, scratch, -1);
2826 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2827 bgezal(scratch, offset);
2828 break;
2829 case greater_equal:
2830 Slt(scratch, rs, rt);
2831 addiu(scratch, scratch, -1);
2832 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2833 bltzal(scratch, offset);
2834 break;
2835 case less:
2836 Slt(scratch, rs, rt);
2837 addiu(scratch, scratch, -1);
2838 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2839 bgezal(scratch, offset);
2840 break;
2841 case less_equal:
2842 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2843 addiu(scratch, scratch, -1);
2844 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2845 bltzal(scratch, offset);
2846 break;
2847
2848 // Unsigned comparison.
2849 case Ugreater:
2850 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2851 addiu(scratch, scratch, -1);
2852 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2853 bgezal(scratch, offset);
2854 break;
2855 case Ugreater_equal:
2856 Sltu(scratch, rs, rt);
2857 addiu(scratch, scratch, -1);
2858 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2859 bltzal(scratch, offset);
2860 break;
2861 case Uless:
2862 Sltu(scratch, rs, rt);
2863 addiu(scratch, scratch, -1);
2864 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2865 bgezal(scratch, offset);
2866 break;
2867 case Uless_equal:
2868 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2869 addiu(scratch, scratch, -1);
2870 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2871 bltzal(scratch, offset);
2872 break;
2873
2874 default:
2875 UNREACHABLE();
2876 }
2877
2878 // Emit a nop in the branch delay slot if required.
2879 if (bdslot == PROTECT)
2880 nop();
2881
2882 return true;
2883 }
2884
2885
BranchAndLinkShortCheck(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2886 bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
2887 Condition cond, Register rs,
2888 const Operand& rt,
2889 BranchDelaySlot bdslot) {
2890 BRANCH_ARGS_CHECK(cond, rs, rt);
2891
2892 if (!L) {
2893 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2894 DCHECK(is_int26(offset));
2895 return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
2896 } else {
2897 DCHECK(is_int16(offset));
2898 return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
2899 }
2900 } else {
2901 DCHECK(offset == 0);
2902 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2903 return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
2904 } else {
2905 return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
2906 }
2907 }
2908 return false;
2909 }
2910
2911
Jump(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2912 void MacroAssembler::Jump(Register target,
2913 Condition cond,
2914 Register rs,
2915 const Operand& rt,
2916 BranchDelaySlot bd) {
2917 BlockTrampolinePoolScope block_trampoline_pool(this);
2918 if (cond == cc_always) {
2919 jr(target);
2920 } else {
2921 BRANCH_ARGS_CHECK(cond, rs, rt);
2922 Branch(2, NegateCondition(cond), rs, rt);
2923 jr(target);
2924 }
2925 // Emit a nop in the branch delay slot if required.
2926 if (bd == PROTECT)
2927 nop();
2928 }
2929
2930
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2931 void MacroAssembler::Jump(intptr_t target,
2932 RelocInfo::Mode rmode,
2933 Condition cond,
2934 Register rs,
2935 const Operand& rt,
2936 BranchDelaySlot bd) {
2937 Label skip;
2938 if (cond != cc_always) {
2939 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2940 }
2941 // The first instruction of 'li' may be placed in the delay slot.
2942 // This is not an issue, t9 is expected to be clobbered anyway.
2943 li(t9, Operand(target, rmode));
2944 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2945 bind(&skip);
2946 }
2947
2948
Jump(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2949 void MacroAssembler::Jump(Address target,
2950 RelocInfo::Mode rmode,
2951 Condition cond,
2952 Register rs,
2953 const Operand& rt,
2954 BranchDelaySlot bd) {
2955 DCHECK(!RelocInfo::IsCodeTarget(rmode));
2956 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2957 }
2958
2959
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2960 void MacroAssembler::Jump(Handle<Code> code,
2961 RelocInfo::Mode rmode,
2962 Condition cond,
2963 Register rs,
2964 const Operand& rt,
2965 BranchDelaySlot bd) {
2966 DCHECK(RelocInfo::IsCodeTarget(rmode));
2967 AllowDeferredHandleDereference embedding_raw_address;
2968 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2969 }
2970
2971
CallSize(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2972 int MacroAssembler::CallSize(Register target,
2973 Condition cond,
2974 Register rs,
2975 const Operand& rt,
2976 BranchDelaySlot bd) {
2977 int size = 0;
2978
2979 if (cond == cc_always) {
2980 size += 1;
2981 } else {
2982 size += 3;
2983 }
2984
2985 if (bd == PROTECT)
2986 size += 1;
2987
2988 return size * kInstrSize;
2989 }
2990
2991
2992 // Note: To call gcc-compiled C code on mips, you must call thru t9.
Call(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2993 void MacroAssembler::Call(Register target,
2994 Condition cond,
2995 Register rs,
2996 const Operand& rt,
2997 BranchDelaySlot bd) {
2998 #ifdef DEBUG
2999 int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
3000 #endif
3001
3002 BlockTrampolinePoolScope block_trampoline_pool(this);
3003 Label start;
3004 bind(&start);
3005 if (cond == cc_always) {
3006 jalr(target);
3007 } else {
3008 BRANCH_ARGS_CHECK(cond, rs, rt);
3009 Branch(2, NegateCondition(cond), rs, rt);
3010 jalr(target);
3011 }
3012 // Emit a nop in the branch delay slot if required.
3013 if (bd == PROTECT)
3014 nop();
3015
3016 #ifdef DEBUG
3017 CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
3018 SizeOfCodeGeneratedSince(&start));
3019 #endif
3020 }
3021
3022
CallSize(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3023 int MacroAssembler::CallSize(Address target,
3024 RelocInfo::Mode rmode,
3025 Condition cond,
3026 Register rs,
3027 const Operand& rt,
3028 BranchDelaySlot bd) {
3029 int size = CallSize(t9, cond, rs, rt, bd);
3030 return size + 2 * kInstrSize;
3031 }
3032
3033
Call(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3034 void MacroAssembler::Call(Address target,
3035 RelocInfo::Mode rmode,
3036 Condition cond,
3037 Register rs,
3038 const Operand& rt,
3039 BranchDelaySlot bd) {
3040 BlockTrampolinePoolScope block_trampoline_pool(this);
3041 Label start;
3042 bind(&start);
3043 int32_t target_int = reinterpret_cast<int32_t>(target);
3044 // Must record previous source positions before the
3045 // li() generates a new code target.
3046 positions_recorder()->WriteRecordedPositions();
3047 li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
3048 Call(t9, cond, rs, rt, bd);
3049 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3050 SizeOfCodeGeneratedSince(&start));
3051 }
3052
3053
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3054 int MacroAssembler::CallSize(Handle<Code> code,
3055 RelocInfo::Mode rmode,
3056 TypeFeedbackId ast_id,
3057 Condition cond,
3058 Register rs,
3059 const Operand& rt,
3060 BranchDelaySlot bd) {
3061 AllowDeferredHandleDereference using_raw_address;
3062 return CallSize(reinterpret_cast<Address>(code.location()),
3063 rmode, cond, rs, rt, bd);
3064 }
3065
3066
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3067 void MacroAssembler::Call(Handle<Code> code,
3068 RelocInfo::Mode rmode,
3069 TypeFeedbackId ast_id,
3070 Condition cond,
3071 Register rs,
3072 const Operand& rt,
3073 BranchDelaySlot bd) {
3074 BlockTrampolinePoolScope block_trampoline_pool(this);
3075 Label start;
3076 bind(&start);
3077 DCHECK(RelocInfo::IsCodeTarget(rmode));
3078 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3079 SetRecordedAstId(ast_id);
3080 rmode = RelocInfo::CODE_TARGET_WITH_ID;
3081 }
3082 AllowDeferredHandleDereference embedding_raw_address;
3083 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3084 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3085 SizeOfCodeGeneratedSince(&start));
3086 }
3087
3088
Ret(Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3089 void MacroAssembler::Ret(Condition cond,
3090 Register rs,
3091 const Operand& rt,
3092 BranchDelaySlot bd) {
3093 Jump(ra, cond, rs, rt, bd);
3094 }
3095
3096
BranchLong(Label * L,BranchDelaySlot bdslot)3097 void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
3098 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
3099 (!L->is_bound() || is_near_r6(L))) {
3100 BranchShortHelperR6(0, L);
3101 } else {
3102 BlockTrampolinePoolScope block_trampoline_pool(this);
3103 uint32_t imm32;
3104 imm32 = jump_address(L);
3105 {
3106 BlockGrowBufferScope block_buf_growth(this);
3107 // Buffer growth (and relocation) must be blocked for internal references
3108 // until associated instructions are emitted and available to be patched.
3109 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3110 lui(at, (imm32 & kHiMask) >> kLuiShift);
3111 ori(at, at, (imm32 & kImm16Mask));
3112 }
3113 jr(at);
3114
3115 // Emit a nop in the branch delay slot if required.
3116 if (bdslot == PROTECT) nop();
3117 }
3118 }
3119
3120
BranchAndLinkLong(Label * L,BranchDelaySlot bdslot)3121 void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
3122 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
3123 (!L->is_bound() || is_near_r6(L))) {
3124 BranchAndLinkShortHelperR6(0, L);
3125 } else {
3126 BlockTrampolinePoolScope block_trampoline_pool(this);
3127 uint32_t imm32;
3128 imm32 = jump_address(L);
3129 {
3130 BlockGrowBufferScope block_buf_growth(this);
3131 // Buffer growth (and relocation) must be blocked for internal references
3132 // until associated instructions are emitted and available to be patched.
3133 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3134 lui(at, (imm32 & kHiMask) >> kLuiShift);
3135 ori(at, at, (imm32 & kImm16Mask));
3136 }
3137 jalr(at);
3138
3139 // Emit a nop in the branch delay slot if required.
3140 if (bdslot == PROTECT) nop();
3141 }
3142 }
3143
3144
DropAndRet(int drop)3145 void MacroAssembler::DropAndRet(int drop) {
3146 DCHECK(is_int16(drop * kPointerSize));
3147 Ret(USE_DELAY_SLOT);
3148 addiu(sp, sp, drop * kPointerSize);
3149 }
3150
DropAndRet(int drop,Condition cond,Register r1,const Operand & r2)3151 void MacroAssembler::DropAndRet(int drop,
3152 Condition cond,
3153 Register r1,
3154 const Operand& r2) {
3155 // Both Drop and Ret need to be conditional.
3156 Label skip;
3157 if (cond != cc_always) {
3158 Branch(&skip, NegateCondition(cond), r1, r2);
3159 }
3160
3161 Drop(drop);
3162 Ret();
3163
3164 if (cond != cc_always) {
3165 bind(&skip);
3166 }
3167 }
3168
3169
Drop(int count,Condition cond,Register reg,const Operand & op)3170 void MacroAssembler::Drop(int count,
3171 Condition cond,
3172 Register reg,
3173 const Operand& op) {
3174 if (count <= 0) {
3175 return;
3176 }
3177
3178 Label skip;
3179
3180 if (cond != al) {
3181 Branch(&skip, NegateCondition(cond), reg, op);
3182 }
3183
3184 Addu(sp, sp, Operand(count * kPointerSize));
3185
3186 if (cond != al) {
3187 bind(&skip);
3188 }
3189 }
3190
3191
3192
Swap(Register reg1,Register reg2,Register scratch)3193 void MacroAssembler::Swap(Register reg1,
3194 Register reg2,
3195 Register scratch) {
3196 if (scratch.is(no_reg)) {
3197 Xor(reg1, reg1, Operand(reg2));
3198 Xor(reg2, reg2, Operand(reg1));
3199 Xor(reg1, reg1, Operand(reg2));
3200 } else {
3201 mov(scratch, reg1);
3202 mov(reg1, reg2);
3203 mov(reg2, scratch);
3204 }
3205 }
3206
3207
Call(Label * target)3208 void MacroAssembler::Call(Label* target) {
3209 BranchAndLink(target);
3210 }
3211
3212
Push(Handle<Object> handle)3213 void MacroAssembler::Push(Handle<Object> handle) {
3214 li(at, Operand(handle));
3215 push(at);
3216 }
3217
3218
DebugBreak()3219 void MacroAssembler::DebugBreak() {
3220 PrepareCEntryArgs(0);
3221 PrepareCEntryFunction(
3222 ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
3223 CEntryStub ces(isolate(), 1);
3224 DCHECK(AllowThisStubCall(&ces));
3225 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
3226 }
3227
3228
3229 // ---------------------------------------------------------------------------
3230 // Exception handling.
3231
PushStackHandler()3232 void MacroAssembler::PushStackHandler() {
3233 // Adjust this code if not the case.
3234 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3235 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3236
3237 // Link the current handler as the next handler.
3238 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3239 lw(t1, MemOperand(t2));
3240 push(t1);
3241
3242 // Set this new handler as the current one.
3243 sw(sp, MemOperand(t2));
3244 }
3245
3246
PopStackHandler()3247 void MacroAssembler::PopStackHandler() {
3248 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3249 pop(a1);
3250 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
3251 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3252 sw(a1, MemOperand(at));
3253 }
3254
3255
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)3256 void MacroAssembler::Allocate(int object_size,
3257 Register result,
3258 Register scratch1,
3259 Register scratch2,
3260 Label* gc_required,
3261 AllocationFlags flags) {
3262 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3263 if (!FLAG_inline_new) {
3264 if (emit_debug_code()) {
3265 // Trash the registers to simulate an allocation failure.
3266 li(result, 0x7091);
3267 li(scratch1, 0x7191);
3268 li(scratch2, 0x7291);
3269 }
3270 jmp(gc_required);
3271 return;
3272 }
3273
3274 DCHECK(!AreAliased(result, scratch1, scratch2, t9));
3275
3276 // Make object size into bytes.
3277 if ((flags & SIZE_IN_WORDS) != 0) {
3278 object_size *= kPointerSize;
3279 }
3280 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
3281
3282 // Check relative positions of allocation top and limit addresses.
3283 // ARM adds additional checks to make sure the ldm instruction can be
3284 // used. On MIPS we don't have ldm so we don't need additional checks either.
3285 ExternalReference allocation_top =
3286 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3287 ExternalReference allocation_limit =
3288 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3289
3290 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
3291 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
3292 DCHECK((limit - top) == kPointerSize);
3293
3294 // Set up allocation top address and allocation limit registers.
3295 Register top_address = scratch1;
3296 // This code stores a temporary value in t9.
3297 Register alloc_limit = t9;
3298 Register result_end = scratch2;
3299 li(top_address, Operand(allocation_top));
3300
3301 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3302 // Load allocation top into result and allocation limit into alloc_limit.
3303 lw(result, MemOperand(top_address));
3304 lw(alloc_limit, MemOperand(top_address, kPointerSize));
3305 } else {
3306 if (emit_debug_code()) {
3307 // Assert that result actually contains top on entry.
3308 lw(alloc_limit, MemOperand(top_address));
3309 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
3310 }
3311 // Load allocation limit. Result already contains allocation top.
3312 lw(alloc_limit, MemOperand(top_address, limit - top));
3313 }
3314
3315 if ((flags & DOUBLE_ALIGNMENT) != 0) {
3316 // Align the next allocation. Storing the filler map without checking top is
3317 // safe in new-space because the limit of the heap is aligned there.
3318 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
3319 And(result_end, result, Operand(kDoubleAlignmentMask));
3320 Label aligned;
3321 Branch(&aligned, eq, result_end, Operand(zero_reg));
3322 if ((flags & PRETENURE) != 0) {
3323 Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
3324 }
3325 li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
3326 sw(result_end, MemOperand(result));
3327 Addu(result, result, Operand(kDoubleSize / 2));
3328 bind(&aligned);
3329 }
3330
3331 // Calculate new top and bail out if new space is exhausted. Use result
3332 // to calculate the new top.
3333 Addu(result_end, result, Operand(object_size));
3334 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
3335 sw(result_end, MemOperand(top_address));
3336
3337 // Tag object if requested.
3338 if ((flags & TAG_OBJECT) != 0) {
3339 Addu(result, result, Operand(kHeapObjectTag));
3340 }
3341 }
3342
3343
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)3344 void MacroAssembler::Allocate(Register object_size, Register result,
3345 Register result_end, Register scratch,
3346 Label* gc_required, AllocationFlags flags) {
3347 if (!FLAG_inline_new) {
3348 if (emit_debug_code()) {
3349 // Trash the registers to simulate an allocation failure.
3350 li(result, 0x7091);
3351 li(scratch, 0x7191);
3352 li(result_end, 0x7291);
3353 }
3354 jmp(gc_required);
3355 return;
3356 }
3357
3358 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
3359 // is not specified. Other registers must not overlap.
3360 DCHECK(!AreAliased(object_size, result, scratch, t9));
3361 DCHECK(!AreAliased(result_end, result, scratch, t9));
3362 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
3363
3364 // Check relative positions of allocation top and limit addresses.
3365 // ARM adds additional checks to make sure the ldm instruction can be
3366 // used. On MIPS we don't have ldm so we don't need additional checks either.
3367 ExternalReference allocation_top =
3368 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3369 ExternalReference allocation_limit =
3370 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3371 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
3372 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
3373 DCHECK((limit - top) == kPointerSize);
3374
3375 // Set up allocation top address and allocation limit registers.
3376 Register top_address = scratch;
3377 // This code stores a temporary value in t9.
3378 Register alloc_limit = t9;
3379 li(top_address, Operand(allocation_top));
3380
3381 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3382 // Load allocation top into result and allocation limit into alloc_limit.
3383 lw(result, MemOperand(top_address));
3384 lw(alloc_limit, MemOperand(top_address, kPointerSize));
3385 } else {
3386 if (emit_debug_code()) {
3387 // Assert that result actually contains top on entry.
3388 lw(alloc_limit, MemOperand(top_address));
3389 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
3390 }
3391 // Load allocation limit. Result already contains allocation top.
3392 lw(alloc_limit, MemOperand(top_address, limit - top));
3393 }
3394
3395 if ((flags & DOUBLE_ALIGNMENT) != 0) {
3396 // Align the next allocation. Storing the filler map without checking top is
3397 // safe in new-space because the limit of the heap is aligned there.
3398 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
3399 And(result_end, result, Operand(kDoubleAlignmentMask));
3400 Label aligned;
3401 Branch(&aligned, eq, result_end, Operand(zero_reg));
3402 if ((flags & PRETENURE) != 0) {
3403 Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
3404 }
3405 li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
3406 sw(result_end, MemOperand(result));
3407 Addu(result, result, Operand(kDoubleSize / 2));
3408 bind(&aligned);
3409 }
3410
3411 // Calculate new top and bail out if new space is exhausted. Use result
3412 // to calculate the new top. Object size may be in words so a shift is
3413 // required to get the number of bytes.
3414 if ((flags & SIZE_IN_WORDS) != 0) {
3415 sll(result_end, object_size, kPointerSizeLog2);
3416 Addu(result_end, result, result_end);
3417 } else {
3418 Addu(result_end, result, Operand(object_size));
3419 }
3420 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
3421
3422 // Update allocation top. result temporarily holds the new top.
3423 if (emit_debug_code()) {
3424 And(alloc_limit, result_end, Operand(kObjectAlignmentMask));
3425 Check(eq, kUnalignedAllocationInNewSpace, alloc_limit, Operand(zero_reg));
3426 }
3427 sw(result_end, MemOperand(top_address));
3428
3429 // Tag object if requested.
3430 if ((flags & TAG_OBJECT) != 0) {
3431 Addu(result, result, Operand(kHeapObjectTag));
3432 }
3433 }
3434
3435
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)3436 void MacroAssembler::AllocateTwoByteString(Register result,
3437 Register length,
3438 Register scratch1,
3439 Register scratch2,
3440 Register scratch3,
3441 Label* gc_required) {
3442 // Calculate the number of bytes needed for the characters in the string while
3443 // observing object alignment.
3444 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3445 sll(scratch1, length, 1); // Length in bytes, not chars.
3446 addiu(scratch1, scratch1,
3447 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3448 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3449
3450 // Allocate two-byte string in new space.
3451 Allocate(scratch1,
3452 result,
3453 scratch2,
3454 scratch3,
3455 gc_required,
3456 TAG_OBJECT);
3457
3458 // Set the map, length and hash field.
3459 InitializeNewString(result,
3460 length,
3461 Heap::kStringMapRootIndex,
3462 scratch1,
3463 scratch2);
3464 }
3465
3466
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)3467 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3468 Register scratch1, Register scratch2,
3469 Register scratch3,
3470 Label* gc_required) {
3471 // Calculate the number of bytes needed for the characters in the string
3472 // while observing object alignment.
3473 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3474 DCHECK(kCharSize == 1);
3475 addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3476 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3477
3478 // Allocate one-byte string in new space.
3479 Allocate(scratch1,
3480 result,
3481 scratch2,
3482 scratch3,
3483 gc_required,
3484 TAG_OBJECT);
3485
3486 // Set the map, length and hash field.
3487 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3488 scratch1, scratch2);
3489 }
3490
3491
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3492 void MacroAssembler::AllocateTwoByteConsString(Register result,
3493 Register length,
3494 Register scratch1,
3495 Register scratch2,
3496 Label* gc_required) {
3497 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3498 TAG_OBJECT);
3499 InitializeNewString(result,
3500 length,
3501 Heap::kConsStringMapRootIndex,
3502 scratch1,
3503 scratch2);
3504 }
3505
3506
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3507 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3508 Register scratch1,
3509 Register scratch2,
3510 Label* gc_required) {
3511 Allocate(ConsString::kSize,
3512 result,
3513 scratch1,
3514 scratch2,
3515 gc_required,
3516 TAG_OBJECT);
3517
3518 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3519 scratch1, scratch2);
3520 }
3521
3522
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3523 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3524 Register length,
3525 Register scratch1,
3526 Register scratch2,
3527 Label* gc_required) {
3528 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3529 TAG_OBJECT);
3530
3531 InitializeNewString(result,
3532 length,
3533 Heap::kSlicedStringMapRootIndex,
3534 scratch1,
3535 scratch2);
3536 }
3537
3538
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3539 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3540 Register length,
3541 Register scratch1,
3542 Register scratch2,
3543 Label* gc_required) {
3544 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3545 TAG_OBJECT);
3546
3547 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3548 scratch1, scratch2);
3549 }
3550
3551
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)3552 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3553 Label* not_unique_name) {
3554 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3555 Label succeed;
3556 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3557 Branch(&succeed, eq, at, Operand(zero_reg));
3558 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3559
3560 bind(&succeed);
3561 }
3562
3563
3564 // Allocates a heap number or jumps to the label if the young space is full and
3565 // a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * need_gc,TaggingMode tagging_mode,MutableMode mode)3566 void MacroAssembler::AllocateHeapNumber(Register result,
3567 Register scratch1,
3568 Register scratch2,
3569 Register heap_number_map,
3570 Label* need_gc,
3571 TaggingMode tagging_mode,
3572 MutableMode mode) {
3573 // Allocate an object in the heap for the heap number and tag it as a heap
3574 // object.
3575 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3576 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3577
3578 Heap::RootListIndex map_index = mode == MUTABLE
3579 ? Heap::kMutableHeapNumberMapRootIndex
3580 : Heap::kHeapNumberMapRootIndex;
3581 AssertIsRoot(heap_number_map, map_index);
3582
3583 // Store heap number map in the allocated object.
3584 if (tagging_mode == TAG_RESULT) {
3585 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3586 } else {
3587 sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3588 }
3589 }
3590
3591
AllocateHeapNumberWithValue(Register result,FPURegister value,Register scratch1,Register scratch2,Label * gc_required)3592 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3593 FPURegister value,
3594 Register scratch1,
3595 Register scratch2,
3596 Label* gc_required) {
3597 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3598 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3599 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3600 }
3601
3602
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)3603 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
3604 Register value, Register scratch1,
3605 Register scratch2, Label* gc_required) {
3606 DCHECK(!result.is(constructor));
3607 DCHECK(!result.is(scratch1));
3608 DCHECK(!result.is(scratch2));
3609 DCHECK(!result.is(value));
3610
3611 // Allocate JSValue in new space.
3612 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
3613
3614 // Initialize the JSValue.
3615 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
3616 sw(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
3617 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
3618 sw(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
3619 sw(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
3620 sw(value, FieldMemOperand(result, JSValue::kValueOffset));
3621 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
3622 }
3623
3624
CopyBytes(Register src,Register dst,Register length,Register scratch)3625 void MacroAssembler::CopyBytes(Register src,
3626 Register dst,
3627 Register length,
3628 Register scratch) {
3629 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3630
3631 // Align src before copying in word size chunks.
3632 Branch(&byte_loop, le, length, Operand(kPointerSize));
3633 bind(&align_loop_1);
3634 And(scratch, src, kPointerSize - 1);
3635 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3636 lbu(scratch, MemOperand(src));
3637 Addu(src, src, 1);
3638 sb(scratch, MemOperand(dst));
3639 Addu(dst, dst, 1);
3640 Subu(length, length, Operand(1));
3641 Branch(&align_loop_1, ne, length, Operand(zero_reg));
3642
3643 // Copy bytes in word size chunks.
3644 bind(&word_loop);
3645 if (emit_debug_code()) {
3646 And(scratch, src, kPointerSize - 1);
3647 Assert(eq, kExpectingAlignmentForCopyBytes,
3648 scratch, Operand(zero_reg));
3649 }
3650 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3651 lw(scratch, MemOperand(src));
3652 Addu(src, src, kPointerSize);
3653
3654 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3655 // Can't use unaligned access - copy byte by byte.
3656 if (kArchEndian == kLittle) {
3657 sb(scratch, MemOperand(dst, 0));
3658 srl(scratch, scratch, 8);
3659 sb(scratch, MemOperand(dst, 1));
3660 srl(scratch, scratch, 8);
3661 sb(scratch, MemOperand(dst, 2));
3662 srl(scratch, scratch, 8);
3663 sb(scratch, MemOperand(dst, 3));
3664 } else {
3665 sb(scratch, MemOperand(dst, 3));
3666 srl(scratch, scratch, 8);
3667 sb(scratch, MemOperand(dst, 2));
3668 srl(scratch, scratch, 8);
3669 sb(scratch, MemOperand(dst, 1));
3670 srl(scratch, scratch, 8);
3671 sb(scratch, MemOperand(dst, 0));
3672 }
3673
3674 Addu(dst, dst, 4);
3675
3676 Subu(length, length, Operand(kPointerSize));
3677 Branch(&word_loop);
3678
3679 // Copy the last bytes if any left.
3680 bind(&byte_loop);
3681 Branch(&done, eq, length, Operand(zero_reg));
3682 bind(&byte_loop_1);
3683 lbu(scratch, MemOperand(src));
3684 Addu(src, src, 1);
3685 sb(scratch, MemOperand(dst));
3686 Addu(dst, dst, 1);
3687 Subu(length, length, Operand(1));
3688 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3689 bind(&done);
3690 }
3691
3692
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)3693 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
3694 Register end_address,
3695 Register filler) {
3696 Label loop, entry;
3697 Branch(&entry);
3698 bind(&loop);
3699 sw(filler, MemOperand(current_address));
3700 Addu(current_address, current_address, kPointerSize);
3701 bind(&entry);
3702 Branch(&loop, ult, current_address, Operand(end_address));
3703 }
3704
3705
CheckFastElements(Register map,Register scratch,Label * fail)3706 void MacroAssembler::CheckFastElements(Register map,
3707 Register scratch,
3708 Label* fail) {
3709 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3710 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3711 STATIC_ASSERT(FAST_ELEMENTS == 2);
3712 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3713 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3714 Branch(fail, hi, scratch,
3715 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3716 }
3717
3718
CheckFastObjectElements(Register map,Register scratch,Label * fail)3719 void MacroAssembler::CheckFastObjectElements(Register map,
3720 Register scratch,
3721 Label* fail) {
3722 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3723 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3724 STATIC_ASSERT(FAST_ELEMENTS == 2);
3725 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3726 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3727 Branch(fail, ls, scratch,
3728 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3729 Branch(fail, hi, scratch,
3730 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3731 }
3732
3733
CheckFastSmiElements(Register map,Register scratch,Label * fail)3734 void MacroAssembler::CheckFastSmiElements(Register map,
3735 Register scratch,
3736 Label* fail) {
3737 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3738 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3739 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3740 Branch(fail, hi, scratch,
3741 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3742 }
3743
3744
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,Register scratch2,Register scratch3,Label * fail,int elements_offset)3745 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3746 Register key_reg,
3747 Register elements_reg,
3748 Register scratch1,
3749 Register scratch2,
3750 Register scratch3,
3751 Label* fail,
3752 int elements_offset) {
3753 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2,
3754 scratch3));
3755 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3756 Register mantissa_reg = scratch2;
3757 Register exponent_reg = scratch3;
3758
3759 // Handle smi values specially.
3760 JumpIfSmi(value_reg, &smi_value);
3761
3762 // Ensure that the object is a heap number
3763 CheckMap(value_reg,
3764 scratch1,
3765 Heap::kHeapNumberMapRootIndex,
3766 fail,
3767 DONT_DO_SMI_CHECK);
3768
3769 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3770 // in the exponent.
3771 li(scratch1, Operand(kHoleNanUpper32 & HeapNumber::kExponentMask));
3772 lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3773 Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3774
3775 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3776
3777 bind(&have_double_value);
3778 sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3779 Addu(scratch1, scratch1, elements_reg);
3780 sw(mantissa_reg,
3781 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3782 + kHoleNanLower32Offset));
3783 sw(exponent_reg,
3784 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3785 + kHoleNanUpper32Offset));
3786 jmp(&done);
3787
3788 bind(&maybe_nan);
3789 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3790 // it's an Infinity, and the non-NaN code path applies.
3791 Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3792 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3793 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3794 bind(&is_nan);
3795 // Load canonical NaN for storing into the double array.
3796 LoadRoot(at, Heap::kNanValueRootIndex);
3797 lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
3798 lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
3799 jmp(&have_double_value);
3800
3801 bind(&smi_value);
3802 Addu(scratch1, elements_reg,
3803 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3804 elements_offset));
3805 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3806 Addu(scratch1, scratch1, scratch2);
3807 // scratch1 is now effective address of the double element
3808
3809 Register untagged_value = scratch2;
3810 SmiUntag(untagged_value, value_reg);
3811 mtc1(untagged_value, f2);
3812 cvt_d_w(f0, f2);
3813 sdc1(f0, MemOperand(scratch1, 0));
3814 bind(&done);
3815 }
3816
3817
CompareMapAndBranch(Register obj,Register scratch,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)3818 void MacroAssembler::CompareMapAndBranch(Register obj,
3819 Register scratch,
3820 Handle<Map> map,
3821 Label* early_success,
3822 Condition cond,
3823 Label* branch_to) {
3824 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3825 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3826 }
3827
3828
CompareMapAndBranch(Register obj_map,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)3829 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3830 Handle<Map> map,
3831 Label* early_success,
3832 Condition cond,
3833 Label* branch_to) {
3834 Branch(branch_to, cond, obj_map, Operand(map));
3835 }
3836
3837
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)3838 void MacroAssembler::CheckMap(Register obj,
3839 Register scratch,
3840 Handle<Map> map,
3841 Label* fail,
3842 SmiCheckType smi_check_type) {
3843 if (smi_check_type == DO_SMI_CHECK) {
3844 JumpIfSmi(obj, fail);
3845 }
3846 Label success;
3847 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3848 bind(&success);
3849 }
3850
3851
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)3852 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3853 Register scratch2, Handle<WeakCell> cell,
3854 Handle<Code> success,
3855 SmiCheckType smi_check_type) {
3856 Label fail;
3857 if (smi_check_type == DO_SMI_CHECK) {
3858 JumpIfSmi(obj, &fail);
3859 }
3860 lw(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3861 GetWeakValue(scratch2, cell);
3862 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
3863 bind(&fail);
3864 }
3865
3866
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)3867 void MacroAssembler::CheckMap(Register obj,
3868 Register scratch,
3869 Heap::RootListIndex index,
3870 Label* fail,
3871 SmiCheckType smi_check_type) {
3872 if (smi_check_type == DO_SMI_CHECK) {
3873 JumpIfSmi(obj, fail);
3874 }
3875 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3876 LoadRoot(at, index);
3877 Branch(fail, ne, scratch, Operand(at));
3878 }
3879
3880
GetWeakValue(Register value,Handle<WeakCell> cell)3881 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3882 li(value, Operand(cell));
3883 lw(value, FieldMemOperand(value, WeakCell::kValueOffset));
3884 }
3885
3886
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)3887 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3888 Label* miss) {
3889 GetWeakValue(value, cell);
3890 JumpIfSmi(value, miss);
3891 }
3892
3893
MovFromFloatResult(DoubleRegister dst)3894 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
3895 if (IsMipsSoftFloatABI) {
3896 if (kArchEndian == kLittle) {
3897 Move(dst, v0, v1);
3898 } else {
3899 Move(dst, v1, v0);
3900 }
3901 } else {
3902 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3903 }
3904 }
3905
3906
MovFromFloatParameter(DoubleRegister dst)3907 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
3908 if (IsMipsSoftFloatABI) {
3909 if (kArchEndian == kLittle) {
3910 Move(dst, a0, a1);
3911 } else {
3912 Move(dst, a1, a0);
3913 }
3914 } else {
3915 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
3916 }
3917 }
3918
3919
MovToFloatParameter(DoubleRegister src)3920 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
3921 if (!IsMipsSoftFloatABI) {
3922 Move(f12, src);
3923 } else {
3924 if (kArchEndian == kLittle) {
3925 Move(a0, a1, src);
3926 } else {
3927 Move(a1, a0, src);
3928 }
3929 }
3930 }
3931
3932
MovToFloatResult(DoubleRegister src)3933 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
3934 if (!IsMipsSoftFloatABI) {
3935 Move(f0, src);
3936 } else {
3937 if (kArchEndian == kLittle) {
3938 Move(v0, v1, src);
3939 } else {
3940 Move(v1, v0, src);
3941 }
3942 }
3943 }
3944
3945
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)3946 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3947 DoubleRegister src2) {
3948 if (!IsMipsSoftFloatABI) {
3949 if (src2.is(f12)) {
3950 DCHECK(!src1.is(f14));
3951 Move(f14, src2);
3952 Move(f12, src1);
3953 } else {
3954 Move(f12, src1);
3955 Move(f14, src2);
3956 }
3957 } else {
3958 if (kArchEndian == kLittle) {
3959 Move(a0, a1, src1);
3960 Move(a2, a3, src2);
3961 } else {
3962 Move(a1, a0, src1);
3963 Move(a3, a2, src2);
3964 }
3965 }
3966 }
3967
3968
3969 // -----------------------------------------------------------------------------
3970 // JavaScript invokes.
3971
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)3972 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3973 const ParameterCount& actual,
3974 Label* done,
3975 bool* definitely_mismatches,
3976 InvokeFlag flag,
3977 const CallWrapper& call_wrapper) {
3978 bool definitely_matches = false;
3979 *definitely_mismatches = false;
3980 Label regular_invoke;
3981
3982 // Check whether the expected and actual arguments count match. If not,
3983 // setup registers according to contract with ArgumentsAdaptorTrampoline:
3984 // a0: actual arguments count
3985 // a1: function (passed through to callee)
3986 // a2: expected arguments count
3987
3988 // The code below is made a lot easier because the calling code already sets
3989 // up actual and expected registers according to the contract if values are
3990 // passed in registers.
3991 DCHECK(actual.is_immediate() || actual.reg().is(a0));
3992 DCHECK(expected.is_immediate() || expected.reg().is(a2));
3993
3994 if (expected.is_immediate()) {
3995 DCHECK(actual.is_immediate());
3996 li(a0, Operand(actual.immediate()));
3997 if (expected.immediate() == actual.immediate()) {
3998 definitely_matches = true;
3999 } else {
4000 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4001 if (expected.immediate() == sentinel) {
4002 // Don't worry about adapting arguments for builtins that
4003 // don't want that done. Skip adaption code by making it look
4004 // like we have a match between expected and actual number of
4005 // arguments.
4006 definitely_matches = true;
4007 } else {
4008 *definitely_mismatches = true;
4009 li(a2, Operand(expected.immediate()));
4010 }
4011 }
4012 } else if (actual.is_immediate()) {
4013 li(a0, Operand(actual.immediate()));
4014 Branch(®ular_invoke, eq, expected.reg(), Operand(a0));
4015 } else {
4016 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
4017 }
4018
4019 if (!definitely_matches) {
4020 Handle<Code> adaptor =
4021 isolate()->builtins()->ArgumentsAdaptorTrampoline();
4022 if (flag == CALL_FUNCTION) {
4023 call_wrapper.BeforeCall(CallSize(adaptor));
4024 Call(adaptor);
4025 call_wrapper.AfterCall();
4026 if (!*definitely_mismatches) {
4027 Branch(done);
4028 }
4029 } else {
4030 Jump(adaptor, RelocInfo::CODE_TARGET);
4031 }
4032 bind(®ular_invoke);
4033 }
4034 }
4035
4036
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)4037 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
4038 const ParameterCount& expected,
4039 const ParameterCount& actual) {
4040 Label skip_flooding;
4041 ExternalReference step_in_enabled =
4042 ExternalReference::debug_step_in_enabled_address(isolate());
4043 li(t0, Operand(step_in_enabled));
4044 lb(t0, MemOperand(t0));
4045 Branch(&skip_flooding, eq, t0, Operand(zero_reg));
4046 {
4047 FrameScope frame(this,
4048 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
4049 if (expected.is_reg()) {
4050 SmiTag(expected.reg());
4051 Push(expected.reg());
4052 }
4053 if (actual.is_reg()) {
4054 SmiTag(actual.reg());
4055 Push(actual.reg());
4056 }
4057 if (new_target.is_valid()) {
4058 Push(new_target);
4059 }
4060 Push(fun);
4061 Push(fun);
4062 CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
4063 Pop(fun);
4064 if (new_target.is_valid()) {
4065 Pop(new_target);
4066 }
4067 if (actual.is_reg()) {
4068 Pop(actual.reg());
4069 SmiUntag(actual.reg());
4070 }
4071 if (expected.is_reg()) {
4072 Pop(expected.reg());
4073 SmiUntag(expected.reg());
4074 }
4075 }
4076 bind(&skip_flooding);
4077 }
4078
4079
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4080 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
4081 const ParameterCount& expected,
4082 const ParameterCount& actual,
4083 InvokeFlag flag,
4084 const CallWrapper& call_wrapper) {
4085 // You can't call a function without a valid frame.
4086 DCHECK(flag == JUMP_FUNCTION || has_frame());
4087 DCHECK(function.is(a1));
4088 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
4089
4090 if (call_wrapper.NeedsDebugStepCheck()) {
4091 FloodFunctionIfStepping(function, new_target, expected, actual);
4092 }
4093
4094 // Clear the new.target register if not given.
4095 if (!new_target.is_valid()) {
4096 LoadRoot(a3, Heap::kUndefinedValueRootIndex);
4097 }
4098
4099 Label done;
4100 bool definitely_mismatches = false;
4101 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
4102 call_wrapper);
4103 if (!definitely_mismatches) {
4104 // We call indirectly through the code field in the function to
4105 // allow recompilation to take effect without changing any of the
4106 // call sites.
4107 Register code = t0;
4108 lw(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4109 if (flag == CALL_FUNCTION) {
4110 call_wrapper.BeforeCall(CallSize(code));
4111 Call(code);
4112 call_wrapper.AfterCall();
4113 } else {
4114 DCHECK(flag == JUMP_FUNCTION);
4115 Jump(code);
4116 }
4117 // Continue here if InvokePrologue does handle the invocation due to
4118 // mismatched parameter counts.
4119 bind(&done);
4120 }
4121 }
4122
4123
InvokeFunction(Register function,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4124 void MacroAssembler::InvokeFunction(Register function,
4125 Register new_target,
4126 const ParameterCount& actual,
4127 InvokeFlag flag,
4128 const CallWrapper& call_wrapper) {
4129 // You can't call a function without a valid frame.
4130 DCHECK(flag == JUMP_FUNCTION || has_frame());
4131
4132 // Contract with called JS functions requires that function is passed in a1.
4133 DCHECK(function.is(a1));
4134 Register expected_reg = a2;
4135 Register temp_reg = t0;
4136
4137 lw(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4138 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4139 lw(expected_reg,
4140 FieldMemOperand(temp_reg,
4141 SharedFunctionInfo::kFormalParameterCountOffset));
4142 sra(expected_reg, expected_reg, kSmiTagSize);
4143
4144 ParameterCount expected(expected_reg);
4145 InvokeFunctionCode(function, new_target, expected, actual, flag,
4146 call_wrapper);
4147 }
4148
4149
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4150 void MacroAssembler::InvokeFunction(Register function,
4151 const ParameterCount& expected,
4152 const ParameterCount& actual,
4153 InvokeFlag flag,
4154 const CallWrapper& call_wrapper) {
4155 // You can't call a function without a valid frame.
4156 DCHECK(flag == JUMP_FUNCTION || has_frame());
4157
4158 // Contract with called JS functions requires that function is passed in a1.
4159 DCHECK(function.is(a1));
4160
4161 // Get the function and setup the context.
4162 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4163
4164 InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
4165 }
4166
4167
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4168 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4169 const ParameterCount& expected,
4170 const ParameterCount& actual,
4171 InvokeFlag flag,
4172 const CallWrapper& call_wrapper) {
4173 li(a1, function);
4174 InvokeFunction(a1, expected, actual, flag, call_wrapper);
4175 }
4176
4177
IsObjectJSStringType(Register object,Register scratch,Label * fail)4178 void MacroAssembler::IsObjectJSStringType(Register object,
4179 Register scratch,
4180 Label* fail) {
4181 DCHECK(kNotStringTag != 0);
4182
4183 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4184 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4185 And(scratch, scratch, Operand(kIsNotStringMask));
4186 Branch(fail, ne, scratch, Operand(zero_reg));
4187 }
4188
4189
IsObjectNameType(Register object,Register scratch,Label * fail)4190 void MacroAssembler::IsObjectNameType(Register object,
4191 Register scratch,
4192 Label* fail) {
4193 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4194 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4195 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4196 }
4197
4198
4199 // ---------------------------------------------------------------------------
4200 // Support functions.
4201
4202
GetMapConstructor(Register result,Register map,Register temp,Register temp2)4203 void MacroAssembler::GetMapConstructor(Register result, Register map,
4204 Register temp, Register temp2) {
4205 Label done, loop;
4206 lw(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
4207 bind(&loop);
4208 JumpIfSmi(result, &done);
4209 GetObjectType(result, temp, temp2);
4210 Branch(&done, ne, temp2, Operand(MAP_TYPE));
4211 lw(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
4212 Branch(&loop);
4213 bind(&done);
4214 }
4215
4216
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)4217 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
4218 Register scratch, Label* miss) {
4219 // Get the prototype or initial map from the function.
4220 lw(result,
4221 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4222
4223 // If the prototype or initial map is the hole, don't return it and
4224 // simply miss the cache instead. This will allow us to allocate a
4225 // prototype object on-demand in the runtime system.
4226 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4227 Branch(miss, eq, result, Operand(t8));
4228
4229 // If the function does not have an initial map, we're done.
4230 Label done;
4231 GetObjectType(result, scratch, scratch);
4232 Branch(&done, ne, scratch, Operand(MAP_TYPE));
4233
4234 // Get the prototype from the initial map.
4235 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
4236
4237 // All done.
4238 bind(&done);
4239 }
4240
4241
GetObjectType(Register object,Register map,Register type_reg)4242 void MacroAssembler::GetObjectType(Register object,
4243 Register map,
4244 Register type_reg) {
4245 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
4246 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4247 }
4248
4249
4250 // -----------------------------------------------------------------------------
4251 // Runtime calls.
4252
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)4253 void MacroAssembler::CallStub(CodeStub* stub,
4254 TypeFeedbackId ast_id,
4255 Condition cond,
4256 Register r1,
4257 const Operand& r2,
4258 BranchDelaySlot bd) {
4259 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
4260 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4261 cond, r1, r2, bd);
4262 }
4263
4264
TailCallStub(CodeStub * stub,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)4265 void MacroAssembler::TailCallStub(CodeStub* stub,
4266 Condition cond,
4267 Register r1,
4268 const Operand& r2,
4269 BranchDelaySlot bd) {
4270 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4271 }
4272
4273
AllowThisStubCall(CodeStub * stub)4274 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4275 return has_frame_ || !stub->SometimesSetsUpAFrame();
4276 }
4277
4278
IndexFromHash(Register hash,Register index)4279 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4280 // If the hash field contains an array index pick it out. The assert checks
4281 // that the constants for the maximum number of digits for an array index
4282 // cached in the hash field and the number of bits reserved for it does not
4283 // conflict.
4284 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4285 (1 << String::kArrayIndexValueBits));
4286 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4287 }
4288
4289
ObjectToDoubleFPURegister(Register object,FPURegister result,Register scratch1,Register scratch2,Register heap_number_map,Label * not_number,ObjectToDoubleFlags flags)4290 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4291 FPURegister result,
4292 Register scratch1,
4293 Register scratch2,
4294 Register heap_number_map,
4295 Label* not_number,
4296 ObjectToDoubleFlags flags) {
4297 Label done;
4298 if ((flags & OBJECT_NOT_SMI) == 0) {
4299 Label not_smi;
4300 JumpIfNotSmi(object, ¬_smi);
4301 // Remove smi tag and convert to double.
4302 sra(scratch1, object, kSmiTagSize);
4303 mtc1(scratch1, result);
4304 cvt_d_w(result, result);
4305 Branch(&done);
4306 bind(¬_smi);
4307 }
4308 // Check for heap number and load double value from it.
4309 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4310 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4311
4312 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4313 // If exponent is all ones the number is either a NaN or +/-Infinity.
4314 Register exponent = scratch1;
4315 Register mask_reg = scratch2;
4316 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4317 li(mask_reg, HeapNumber::kExponentMask);
4318
4319 And(exponent, exponent, mask_reg);
4320 Branch(not_number, eq, exponent, Operand(mask_reg));
4321 }
4322 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4323 bind(&done);
4324 }
4325
4326
SmiToDoubleFPURegister(Register smi,FPURegister value,Register scratch1)4327 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4328 FPURegister value,
4329 Register scratch1) {
4330 sra(scratch1, smi, kSmiTagSize);
4331 mtc1(scratch1, value);
4332 cvt_d_w(value, value);
4333 }
4334
4335
BranchOvfHelper(MacroAssembler * masm,Register overflow_dst,Label * overflow_label,Label * no_overflow_label)4336 static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
4337 Label* overflow_label,
4338 Label* no_overflow_label) {
4339 DCHECK(overflow_label || no_overflow_label);
4340 if (!overflow_label) {
4341 DCHECK(no_overflow_label);
4342 masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
4343 } else {
4344 masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
4345 if (no_overflow_label) masm->Branch(no_overflow_label);
4346 }
4347 }
4348
4349
AddBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)4350 void MacroAssembler::AddBranchOvf(Register dst, Register left,
4351 const Operand& right, Label* overflow_label,
4352 Label* no_overflow_label, Register scratch) {
4353 if (right.is_reg()) {
4354 AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
4355 scratch);
4356 } else {
4357 if (IsMipsArchVariant(kMips32r6)) {
4358 Register right_reg = t9;
4359 DCHECK(!left.is(right_reg));
4360 li(right_reg, Operand(right));
4361 AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
4362 } else {
4363 Register overflow_dst = t9;
4364 DCHECK(!dst.is(scratch));
4365 DCHECK(!dst.is(overflow_dst));
4366 DCHECK(!scratch.is(overflow_dst));
4367 DCHECK(!left.is(overflow_dst));
4368 if (dst.is(left)) {
4369 mov(scratch, left); // Preserve left.
4370 Addu(dst, left, right.immediate()); // Left is overwritten.
4371 xor_(scratch, dst, scratch); // Original left.
4372 // Load right since xori takes uint16 as immediate.
4373 Addu(overflow_dst, zero_reg, right);
4374 xor_(overflow_dst, dst, overflow_dst);
4375 and_(overflow_dst, overflow_dst, scratch);
4376 } else {
4377 Addu(dst, left, right.immediate());
4378 xor_(overflow_dst, dst, left);
4379 // Load right since xori takes uint16 as immediate.
4380 Addu(scratch, zero_reg, right);
4381 xor_(scratch, dst, scratch);
4382 and_(overflow_dst, scratch, overflow_dst);
4383 }
4384 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
4385 }
4386 }
4387 }
4388
4389
AddBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)4390 void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
4391 Label* overflow_label,
4392 Label* no_overflow_label, Register scratch) {
4393 if (IsMipsArchVariant(kMips32r6)) {
4394 if (!overflow_label) {
4395 DCHECK(no_overflow_label);
4396 DCHECK(!dst.is(scratch));
4397 Register left_reg = left.is(dst) ? scratch : left;
4398 Register right_reg = right.is(dst) ? t9 : right;
4399 DCHECK(!dst.is(left_reg));
4400 DCHECK(!dst.is(right_reg));
4401 Move(left_reg, left);
4402 Move(right_reg, right);
4403 addu(dst, left, right);
4404 bnvc(left_reg, right_reg, no_overflow_label);
4405 } else {
4406 bovc(left, right, overflow_label);
4407 addu(dst, left, right);
4408 if (no_overflow_label) bc(no_overflow_label);
4409 }
4410 } else {
4411 Register overflow_dst = t9;
4412 DCHECK(!dst.is(scratch));
4413 DCHECK(!dst.is(overflow_dst));
4414 DCHECK(!scratch.is(overflow_dst));
4415 DCHECK(!left.is(overflow_dst));
4416 DCHECK(!right.is(overflow_dst));
4417 DCHECK(!left.is(scratch));
4418 DCHECK(!right.is(scratch));
4419
4420 if (left.is(right) && dst.is(left)) {
4421 mov(overflow_dst, right);
4422 right = overflow_dst;
4423 }
4424
4425 if (dst.is(left)) {
4426 mov(scratch, left); // Preserve left.
4427 addu(dst, left, right); // Left is overwritten.
4428 xor_(scratch, dst, scratch); // Original left.
4429 xor_(overflow_dst, dst, right);
4430 and_(overflow_dst, overflow_dst, scratch);
4431 } else if (dst.is(right)) {
4432 mov(scratch, right); // Preserve right.
4433 addu(dst, left, right); // Right is overwritten.
4434 xor_(scratch, dst, scratch); // Original right.
4435 xor_(overflow_dst, dst, left);
4436 and_(overflow_dst, overflow_dst, scratch);
4437 } else {
4438 addu(dst, left, right);
4439 xor_(overflow_dst, dst, left);
4440 xor_(scratch, dst, right);
4441 and_(overflow_dst, scratch, overflow_dst);
4442 }
4443 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
4444 }
4445 }
4446
4447
SubBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)4448 void MacroAssembler::SubBranchOvf(Register dst, Register left,
4449 const Operand& right, Label* overflow_label,
4450 Label* no_overflow_label, Register scratch) {
4451 DCHECK(overflow_label || no_overflow_label);
4452 if (right.is_reg()) {
4453 SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
4454 scratch);
4455 } else {
4456 Register overflow_dst = t9;
4457 DCHECK(!dst.is(scratch));
4458 DCHECK(!dst.is(overflow_dst));
4459 DCHECK(!scratch.is(overflow_dst));
4460 DCHECK(!left.is(overflow_dst));
4461 DCHECK(!left.is(scratch));
4462 if (dst.is(left)) {
4463 mov(scratch, left); // Preserve left.
4464 Subu(dst, left, right.immediate()); // Left is overwritten.
4465 // Load right since xori takes uint16 as immediate.
4466 Addu(overflow_dst, zero_reg, right);
4467 xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
4468 xor_(scratch, dst, scratch); // scratch is original left.
4469 and_(overflow_dst, scratch, overflow_dst);
4470 } else {
4471 Subu(dst, left, right);
4472 xor_(overflow_dst, dst, left);
4473 // Load right since xori takes uint16 as immediate.
4474 Addu(scratch, zero_reg, right);
4475 xor_(scratch, left, scratch);
4476 and_(overflow_dst, scratch, overflow_dst);
4477 }
4478 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
4479 }
4480 }
4481
4482
SubBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)4483 void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
4484 Label* overflow_label,
4485 Label* no_overflow_label, Register scratch) {
4486 DCHECK(overflow_label || no_overflow_label);
4487 Register overflow_dst = t9;
4488 DCHECK(!dst.is(scratch));
4489 DCHECK(!dst.is(overflow_dst));
4490 DCHECK(!scratch.is(overflow_dst));
4491 DCHECK(!overflow_dst.is(left));
4492 DCHECK(!overflow_dst.is(right));
4493 DCHECK(!scratch.is(left));
4494 DCHECK(!scratch.is(right));
4495
4496 // This happens with some crankshaft code. Since Subu works fine if
4497 // left == right, let's not make that restriction here.
4498 if (left.is(right)) {
4499 mov(dst, zero_reg);
4500 if (no_overflow_label) {
4501 Branch(no_overflow_label);
4502 }
4503 }
4504
4505 if (dst.is(left)) {
4506 mov(scratch, left); // Preserve left.
4507 subu(dst, left, right); // Left is overwritten.
4508 xor_(overflow_dst, dst, scratch); // scratch is original left.
4509 xor_(scratch, scratch, right); // scratch is original left.
4510 and_(overflow_dst, scratch, overflow_dst);
4511 } else if (dst.is(right)) {
4512 mov(scratch, right); // Preserve right.
4513 subu(dst, left, right); // Right is overwritten.
4514 xor_(overflow_dst, dst, left);
4515 xor_(scratch, left, scratch); // Original right.
4516 and_(overflow_dst, scratch, overflow_dst);
4517 } else {
4518 subu(dst, left, right);
4519 xor_(overflow_dst, dst, left);
4520 xor_(scratch, left, right);
4521 and_(overflow_dst, scratch, overflow_dst);
4522 }
4523 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
4524 }
4525
4526
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles,BranchDelaySlot bd)4527 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
4528 SaveFPRegsMode save_doubles,
4529 BranchDelaySlot bd) {
4530 // All parameters are on the stack. v0 has the return value after call.
4531
4532 // If the expected number of arguments of the runtime function is
4533 // constant, we check that the actual number of arguments match the
4534 // expectation.
4535 CHECK(f->nargs < 0 || f->nargs == num_arguments);
4536
4537 // TODO(1236192): Most runtime routines don't need the number of
4538 // arguments passed in because it is constant. At some point we
4539 // should remove this need and make the runtime routine entry code
4540 // smarter.
4541 PrepareCEntryArgs(num_arguments);
4542 PrepareCEntryFunction(ExternalReference(f, isolate()));
4543 CEntryStub stub(isolate(), 1, save_doubles);
4544 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4545 }
4546
4547
CallExternalReference(const ExternalReference & ext,int num_arguments,BranchDelaySlot bd)4548 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4549 int num_arguments,
4550 BranchDelaySlot bd) {
4551 PrepareCEntryArgs(num_arguments);
4552 PrepareCEntryFunction(ext);
4553
4554 CEntryStub stub(isolate(), 1);
4555 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4556 }
4557
4558
TailCallRuntime(Runtime::FunctionId fid)4559 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
4560 const Runtime::Function* function = Runtime::FunctionForId(fid);
4561 DCHECK_EQ(1, function->result_size);
4562 if (function->nargs >= 0) {
4563 PrepareCEntryArgs(function->nargs);
4564 }
4565 JumpToExternalReference(ExternalReference(fid, isolate()));
4566 }
4567
4568
JumpToExternalReference(const ExternalReference & builtin,BranchDelaySlot bd)4569 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4570 BranchDelaySlot bd) {
4571 PrepareCEntryFunction(builtin);
4572 CEntryStub stub(isolate(), 1);
4573 Jump(stub.GetCode(),
4574 RelocInfo::CODE_TARGET,
4575 al,
4576 zero_reg,
4577 Operand(zero_reg),
4578 bd);
4579 }
4580
4581
InvokeBuiltin(int native_context_index,InvokeFlag flag,const CallWrapper & call_wrapper)4582 void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
4583 const CallWrapper& call_wrapper) {
4584 // You can't call a builtin without a valid frame.
4585 DCHECK(flag == JUMP_FUNCTION || has_frame());
4586
4587 // Fake a parameter count to avoid emitting code to do the check.
4588 ParameterCount expected(0);
4589 LoadNativeContextSlot(native_context_index, a1);
4590 InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper);
4591 }
4592
4593
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)4594 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4595 Register scratch1, Register scratch2) {
4596 if (FLAG_native_code_counters && counter->Enabled()) {
4597 li(scratch1, Operand(value));
4598 li(scratch2, Operand(ExternalReference(counter)));
4599 sw(scratch1, MemOperand(scratch2));
4600 }
4601 }
4602
4603
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)4604 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4605 Register scratch1, Register scratch2) {
4606 DCHECK(value > 0);
4607 if (FLAG_native_code_counters && counter->Enabled()) {
4608 li(scratch2, Operand(ExternalReference(counter)));
4609 lw(scratch1, MemOperand(scratch2));
4610 Addu(scratch1, scratch1, Operand(value));
4611 sw(scratch1, MemOperand(scratch2));
4612 }
4613 }
4614
4615
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)4616 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4617 Register scratch1, Register scratch2) {
4618 DCHECK(value > 0);
4619 if (FLAG_native_code_counters && counter->Enabled()) {
4620 li(scratch2, Operand(ExternalReference(counter)));
4621 lw(scratch1, MemOperand(scratch2));
4622 Subu(scratch1, scratch1, Operand(value));
4623 sw(scratch1, MemOperand(scratch2));
4624 }
4625 }
4626
4627
4628 // -----------------------------------------------------------------------------
4629 // Debugging.
4630
Assert(Condition cc,BailoutReason reason,Register rs,Operand rt)4631 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4632 Register rs, Operand rt) {
4633 if (emit_debug_code())
4634 Check(cc, reason, rs, rt);
4635 }
4636
4637
AssertFastElements(Register elements)4638 void MacroAssembler::AssertFastElements(Register elements) {
4639 if (emit_debug_code()) {
4640 DCHECK(!elements.is(at));
4641 Label ok;
4642 push(elements);
4643 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4644 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4645 Branch(&ok, eq, elements, Operand(at));
4646 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4647 Branch(&ok, eq, elements, Operand(at));
4648 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4649 Branch(&ok, eq, elements, Operand(at));
4650 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4651 bind(&ok);
4652 pop(elements);
4653 }
4654 }
4655
4656
Check(Condition cc,BailoutReason reason,Register rs,Operand rt)4657 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4658 Register rs, Operand rt) {
4659 Label L;
4660 Branch(&L, cc, rs, rt);
4661 Abort(reason);
4662 // Will not return here.
4663 bind(&L);
4664 }
4665
4666
Abort(BailoutReason reason)4667 void MacroAssembler::Abort(BailoutReason reason) {
4668 Label abort_start;
4669 bind(&abort_start);
4670 #ifdef DEBUG
4671 const char* msg = GetBailoutReason(reason);
4672 if (msg != NULL) {
4673 RecordComment("Abort message: ");
4674 RecordComment(msg);
4675 }
4676
4677 if (FLAG_trap_on_abort) {
4678 stop(msg);
4679 return;
4680 }
4681 #endif
4682
4683 li(a0, Operand(Smi::FromInt(reason)));
4684 push(a0);
4685 // Disable stub call restrictions to always allow calls to abort.
4686 if (!has_frame_) {
4687 // We don't actually want to generate a pile of code for this, so just
4688 // claim there is a stack frame, without generating one.
4689 FrameScope scope(this, StackFrame::NONE);
4690 CallRuntime(Runtime::kAbort, 1);
4691 } else {
4692 CallRuntime(Runtime::kAbort, 1);
4693 }
4694 // Will not return here.
4695 if (is_trampoline_pool_blocked()) {
4696 // If the calling code cares about the exact number of
4697 // instructions generated, we insert padding here to keep the size
4698 // of the Abort macro constant.
4699 // Currently in debug mode with debug_code enabled the number of
4700 // generated instructions is 10, so we use this as a maximum value.
4701 static const int kExpectedAbortInstructions = 10;
4702 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4703 DCHECK(abort_instructions <= kExpectedAbortInstructions);
4704 while (abort_instructions++ < kExpectedAbortInstructions) {
4705 nop();
4706 }
4707 }
4708 }
4709
4710
LoadContext(Register dst,int context_chain_length)4711 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4712 if (context_chain_length > 0) {
4713 // Move up the chain of contexts to the context containing the slot.
4714 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4715 for (int i = 1; i < context_chain_length; i++) {
4716 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4717 }
4718 } else {
4719 // Slot is in the current function context. Move it into the
4720 // destination register in case we store into it (the write barrier
4721 // cannot be allowed to destroy the context in esi).
4722 Move(dst, cp);
4723 }
4724 }
4725
4726
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)4727 void MacroAssembler::LoadTransitionedArrayMapConditional(
4728 ElementsKind expected_kind,
4729 ElementsKind transitioned_kind,
4730 Register map_in_out,
4731 Register scratch,
4732 Label* no_map_match) {
4733 DCHECK(IsFastElementsKind(expected_kind));
4734 DCHECK(IsFastElementsKind(transitioned_kind));
4735
4736 // Check that the function's map is the same as the expected cached map.
4737 lw(scratch, NativeContextMemOperand());
4738 lw(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
4739 Branch(no_map_match, ne, map_in_out, Operand(at));
4740
4741 // Use the transitioned cached map.
4742 lw(map_in_out,
4743 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
4744 }
4745
4746
LoadNativeContextSlot(int index,Register dst)4747 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
4748 lw(dst, NativeContextMemOperand());
4749 lw(dst, ContextMemOperand(dst, index));
4750 }
4751
4752
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)4753 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4754 Register map,
4755 Register scratch) {
4756 // Load the initial map. The global functions all have initial maps.
4757 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4758 if (emit_debug_code()) {
4759 Label ok, fail;
4760 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4761 Branch(&ok);
4762 bind(&fail);
4763 Abort(kGlobalFunctionsMustHaveInitialMap);
4764 bind(&ok);
4765 }
4766 }
4767
4768
StubPrologue()4769 void MacroAssembler::StubPrologue() {
4770 Push(ra, fp, cp);
4771 Push(Smi::FromInt(StackFrame::STUB));
4772 // Adjust FP to point to saved FP.
4773 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4774 }
4775
4776
Prologue(bool code_pre_aging)4777 void MacroAssembler::Prologue(bool code_pre_aging) {
4778 PredictableCodeSizeScope predictible_code_size_scope(
4779 this, kNoCodeAgeSequenceLength);
4780 // The following three instructions must remain together and unmodified
4781 // for code aging to work properly.
4782 if (code_pre_aging) {
4783 // Pre-age the code.
4784 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4785 nop(Assembler::CODE_AGE_MARKER_NOP);
4786 // Load the stub address to t9 and call it,
4787 // GetCodeAgeAndParity() extracts the stub address from this instruction.
4788 li(t9,
4789 Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
4790 CONSTANT_SIZE);
4791 nop(); // Prevent jalr to jal optimization.
4792 jalr(t9, a0);
4793 nop(); // Branch delay slot nop.
4794 nop(); // Pad the empty space.
4795 } else {
4796 Push(ra, fp, cp, a1);
4797 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4798 // Adjust fp to point to caller's fp.
4799 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4800 }
4801 }
4802
4803
EmitLoadTypeFeedbackVector(Register vector)4804 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
4805 lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4806 lw(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
4807 lw(vector,
4808 FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
4809 }
4810
4811
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)4812 void MacroAssembler::EnterFrame(StackFrame::Type type,
4813 bool load_constant_pool_pointer_reg) {
4814 // Out-of-line constant pool not implemented on mips.
4815 UNREACHABLE();
4816 }
4817
4818
EnterFrame(StackFrame::Type type)4819 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4820 addiu(sp, sp, -5 * kPointerSize);
4821 li(t8, Operand(Smi::FromInt(type)));
4822 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4823 sw(ra, MemOperand(sp, 4 * kPointerSize));
4824 sw(fp, MemOperand(sp, 3 * kPointerSize));
4825 sw(cp, MemOperand(sp, 2 * kPointerSize));
4826 sw(t8, MemOperand(sp, 1 * kPointerSize));
4827 sw(t9, MemOperand(sp, 0 * kPointerSize));
4828 // Adjust FP to point to saved FP.
4829 Addu(fp, sp,
4830 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4831 }
4832
4833
LeaveFrame(StackFrame::Type type)4834 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4835 mov(sp, fp);
4836 lw(fp, MemOperand(sp, 0 * kPointerSize));
4837 lw(ra, MemOperand(sp, 1 * kPointerSize));
4838 addiu(sp, sp, 2 * kPointerSize);
4839 }
4840
4841
EnterExitFrame(bool save_doubles,int stack_space)4842 void MacroAssembler::EnterExitFrame(bool save_doubles,
4843 int stack_space) {
4844 // Set up the frame structure on the stack.
4845 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4846 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4847 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4848
4849 // This is how the stack will look:
4850 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4851 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4852 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4853 // [fp - 1 (==kSPOffset)] - sp of the called function
4854 // [fp - 2 (==kCodeOffset)] - CodeObject
4855 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4856 // new stack (will contain saved ra)
4857
4858 // Save registers.
4859 addiu(sp, sp, -4 * kPointerSize);
4860 sw(ra, MemOperand(sp, 3 * kPointerSize));
4861 sw(fp, MemOperand(sp, 2 * kPointerSize));
4862 addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4863
4864 if (emit_debug_code()) {
4865 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4866 }
4867
4868 // Accessed from ExitFrame::code_slot.
4869 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4870 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4871
4872 // Save the frame pointer and the context in top.
4873 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4874 sw(fp, MemOperand(t8));
4875 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4876 sw(cp, MemOperand(t8));
4877
4878 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4879 if (save_doubles) {
4880 // The stack must be allign to 0 modulo 8 for stores with sdc1.
4881 DCHECK(kDoubleSize == frame_alignment);
4882 if (frame_alignment > 0) {
4883 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4884 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4885 }
4886 int space = FPURegister::kMaxNumRegisters * kDoubleSize;
4887 Subu(sp, sp, Operand(space));
4888 // Remember: we only need to save every 2nd double FPU value.
4889 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4890 FPURegister reg = FPURegister::from_code(i);
4891 sdc1(reg, MemOperand(sp, i * kDoubleSize));
4892 }
4893 }
4894
4895 // Reserve place for the return address, stack space and an optional slot
4896 // (used by the DirectCEntryStub to hold the return value if a struct is
4897 // returned) and align the frame preparing for calling the runtime function.
4898 DCHECK(stack_space >= 0);
4899 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4900 if (frame_alignment > 0) {
4901 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4902 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4903 }
4904
4905 // Set the exit frame sp value to point just before the return address
4906 // location.
4907 addiu(at, sp, kPointerSize);
4908 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4909 }
4910
4911
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool do_return,bool argument_count_is_length)4912 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
4913 bool restore_context, bool do_return,
4914 bool argument_count_is_length) {
4915 // Optionally restore all double registers.
4916 if (save_doubles) {
4917 // Remember: we only need to restore every 2nd double FPU value.
4918 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4919 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4920 FPURegister reg = FPURegister::from_code(i);
4921 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
4922 }
4923 }
4924
4925 // Clear top frame.
4926 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4927 sw(zero_reg, MemOperand(t8));
4928
4929 // Restore current context from top and clear it in debug mode.
4930 if (restore_context) {
4931 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4932 lw(cp, MemOperand(t8));
4933 }
4934 #ifdef DEBUG
4935 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4936 sw(a3, MemOperand(t8));
4937 #endif
4938
4939 // Pop the arguments, restore registers, and return.
4940 mov(sp, fp); // Respect ABI stack constraint.
4941 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4942 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4943
4944 if (argument_count.is_valid()) {
4945 if (argument_count_is_length) {
4946 addu(sp, sp, argument_count);
4947 } else {
4948 sll(t8, argument_count, kPointerSizeLog2);
4949 addu(sp, sp, t8);
4950 }
4951 }
4952
4953 if (do_return) {
4954 Ret(USE_DELAY_SLOT);
4955 // If returning, the instruction in the delay slot will be the addiu below.
4956 }
4957 addiu(sp, sp, 8);
4958 }
4959
4960
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)4961 void MacroAssembler::InitializeNewString(Register string,
4962 Register length,
4963 Heap::RootListIndex map_index,
4964 Register scratch1,
4965 Register scratch2) {
4966 sll(scratch1, length, kSmiTagSize);
4967 LoadRoot(scratch2, map_index);
4968 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4969 li(scratch1, Operand(String::kEmptyHashField));
4970 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4971 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4972 }
4973
4974
ActivationFrameAlignment()4975 int MacroAssembler::ActivationFrameAlignment() {
4976 #if V8_HOST_ARCH_MIPS
4977 // Running on the real platform. Use the alignment as mandated by the local
4978 // environment.
4979 // Note: This will break if we ever start generating snapshots on one Mips
4980 // platform for another Mips platform with a different alignment.
4981 return base::OS::ActivationFrameAlignment();
4982 #else // V8_HOST_ARCH_MIPS
4983 // If we are using the simulator then we should always align to the expected
4984 // alignment. As the simulator is used to generate snapshots we do not know
4985 // if the target platform will need alignment, so this is controlled from a
4986 // flag.
4987 return FLAG_sim_stack_alignment;
4988 #endif // V8_HOST_ARCH_MIPS
4989 }
4990
4991
AssertStackIsAligned()4992 void MacroAssembler::AssertStackIsAligned() {
4993 if (emit_debug_code()) {
4994 const int frame_alignment = ActivationFrameAlignment();
4995 const int frame_alignment_mask = frame_alignment - 1;
4996
4997 if (frame_alignment > kPointerSize) {
4998 Label alignment_as_expected;
4999 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5000 andi(at, sp, frame_alignment_mask);
5001 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5002 // Don't use Check here, as it will call Runtime_Abort re-entering here.
5003 stop("Unexpected stack alignment");
5004 bind(&alignment_as_expected);
5005 }
5006 }
5007 }
5008
5009
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)5010 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5011 Register reg,
5012 Register scratch,
5013 Label* not_power_of_two_or_zero) {
5014 Subu(scratch, reg, Operand(1));
5015 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
5016 scratch, Operand(zero_reg));
5017 and_(at, scratch, reg); // In the delay slot.
5018 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
5019 }
5020
5021
SmiTagCheckOverflow(Register reg,Register overflow)5022 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
5023 DCHECK(!reg.is(overflow));
5024 mov(overflow, reg); // Save original value.
5025 SmiTag(reg);
5026 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
5027 }
5028
5029
SmiTagCheckOverflow(Register dst,Register src,Register overflow)5030 void MacroAssembler::SmiTagCheckOverflow(Register dst,
5031 Register src,
5032 Register overflow) {
5033 if (dst.is(src)) {
5034 // Fall back to slower case.
5035 SmiTagCheckOverflow(dst, overflow);
5036 } else {
5037 DCHECK(!dst.is(src));
5038 DCHECK(!dst.is(overflow));
5039 DCHECK(!src.is(overflow));
5040 SmiTag(dst, src);
5041 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
5042 }
5043 }
5044
5045
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)5046 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5047 Register src,
5048 Label* smi_case) {
5049 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5050 SmiUntag(dst, src);
5051 }
5052
5053
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)5054 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5055 Register src,
5056 Label* non_smi_case) {
5057 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5058 SmiUntag(dst, src);
5059 }
5060
JumpIfSmi(Register value,Label * smi_label,Register scratch,BranchDelaySlot bd)5061 void MacroAssembler::JumpIfSmi(Register value,
5062 Label* smi_label,
5063 Register scratch,
5064 BranchDelaySlot bd) {
5065 DCHECK_EQ(0, kSmiTag);
5066 andi(scratch, value, kSmiTagMask);
5067 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5068 }
5069
JumpIfNotSmi(Register value,Label * not_smi_label,Register scratch,BranchDelaySlot bd)5070 void MacroAssembler::JumpIfNotSmi(Register value,
5071 Label* not_smi_label,
5072 Register scratch,
5073 BranchDelaySlot bd) {
5074 DCHECK_EQ(0, kSmiTag);
5075 andi(scratch, value, kSmiTagMask);
5076 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5077 }
5078
5079
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)5080 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5081 Register reg2,
5082 Label* on_not_both_smi) {
5083 STATIC_ASSERT(kSmiTag == 0);
5084 DCHECK_EQ(1, kSmiTagMask);
5085 or_(at, reg1, reg2);
5086 JumpIfNotSmi(at, on_not_both_smi);
5087 }
5088
5089
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)5090 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5091 Register reg2,
5092 Label* on_either_smi) {
5093 STATIC_ASSERT(kSmiTag == 0);
5094 DCHECK_EQ(1, kSmiTagMask);
5095 // Both Smi tags must be 1 (not Smi).
5096 and_(at, reg1, reg2);
5097 JumpIfSmi(at, on_either_smi);
5098 }
5099
5100
AssertNotSmi(Register object)5101 void MacroAssembler::AssertNotSmi(Register object) {
5102 if (emit_debug_code()) {
5103 STATIC_ASSERT(kSmiTag == 0);
5104 andi(at, object, kSmiTagMask);
5105 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5106 }
5107 }
5108
5109
AssertSmi(Register object)5110 void MacroAssembler::AssertSmi(Register object) {
5111 if (emit_debug_code()) {
5112 STATIC_ASSERT(kSmiTag == 0);
5113 andi(at, object, kSmiTagMask);
5114 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5115 }
5116 }
5117
5118
AssertString(Register object)5119 void MacroAssembler::AssertString(Register object) {
5120 if (emit_debug_code()) {
5121 STATIC_ASSERT(kSmiTag == 0);
5122 SmiTst(object, t8);
5123 Check(ne, kOperandIsASmiAndNotAString, t8, Operand(zero_reg));
5124 GetObjectType(object, t8, t8);
5125 Check(lo, kOperandIsNotAString, t8, Operand(FIRST_NONSTRING_TYPE));
5126 }
5127 }
5128
5129
AssertName(Register object)5130 void MacroAssembler::AssertName(Register object) {
5131 if (emit_debug_code()) {
5132 STATIC_ASSERT(kSmiTag == 0);
5133 SmiTst(object, t8);
5134 Check(ne, kOperandIsASmiAndNotAName, t8, Operand(zero_reg));
5135 GetObjectType(object, t8, t8);
5136 Check(le, kOperandIsNotAName, t8, Operand(LAST_NAME_TYPE));
5137 }
5138 }
5139
5140
AssertFunction(Register object)5141 void MacroAssembler::AssertFunction(Register object) {
5142 if (emit_debug_code()) {
5143 STATIC_ASSERT(kSmiTag == 0);
5144 SmiTst(object, t8);
5145 Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
5146 GetObjectType(object, t8, t8);
5147 Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
5148 }
5149 }
5150
5151
AssertBoundFunction(Register object)5152 void MacroAssembler::AssertBoundFunction(Register object) {
5153 if (emit_debug_code()) {
5154 STATIC_ASSERT(kSmiTag == 0);
5155 SmiTst(object, t8);
5156 Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
5157 GetObjectType(object, t8, t8);
5158 Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
5159 }
5160 }
5161
5162
AssertUndefinedOrAllocationSite(Register object,Register scratch)5163 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5164 Register scratch) {
5165 if (emit_debug_code()) {
5166 Label done_checking;
5167 AssertNotSmi(object);
5168 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5169 Branch(&done_checking, eq, object, Operand(scratch));
5170 lw(t8, FieldMemOperand(object, HeapObject::kMapOffset));
5171 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5172 Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
5173 bind(&done_checking);
5174 }
5175 }
5176
5177
AssertIsRoot(Register reg,Heap::RootListIndex index)5178 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5179 if (emit_debug_code()) {
5180 DCHECK(!reg.is(at));
5181 LoadRoot(at, index);
5182 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5183 }
5184 }
5185
5186
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)5187 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5188 Register heap_number_map,
5189 Register scratch,
5190 Label* on_not_heap_number) {
5191 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5192 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5193 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
5194 }
5195
5196
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5197 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5198 Register first, Register second, Register scratch1, Register scratch2,
5199 Label* failure) {
5200 // Test that both first and second are sequential one-byte strings.
5201 // Assume that they are non-smis.
5202 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5203 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5204 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5205 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5206
5207 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5208 scratch2, failure);
5209 }
5210
5211
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5212 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5213 Register second,
5214 Register scratch1,
5215 Register scratch2,
5216 Label* failure) {
5217 // Check that neither is a smi.
5218 STATIC_ASSERT(kSmiTag == 0);
5219 And(scratch1, first, Operand(second));
5220 JumpIfSmi(scratch1, failure);
5221 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5222 scratch2, failure);
5223 }
5224
5225
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5226 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5227 Register first, Register second, Register scratch1, Register scratch2,
5228 Label* failure) {
5229 const int kFlatOneByteStringMask =
5230 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5231 const int kFlatOneByteStringTag =
5232 kStringTag | kOneByteStringTag | kSeqStringTag;
5233 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5234 andi(scratch1, first, kFlatOneByteStringMask);
5235 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
5236 andi(scratch2, second, kFlatOneByteStringMask);
5237 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
5238 }
5239
5240
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)5241 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
5242 Register scratch,
5243 Label* failure) {
5244 const int kFlatOneByteStringMask =
5245 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5246 const int kFlatOneByteStringTag =
5247 kStringTag | kOneByteStringTag | kSeqStringTag;
5248 And(scratch, type, Operand(kFlatOneByteStringMask));
5249 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
5250 }
5251
5252
5253 static const int kRegisterPassedArguments = 4;
5254
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)5255 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5256 int num_double_arguments) {
5257 int stack_passed_words = 0;
5258 num_reg_arguments += 2 * num_double_arguments;
5259
5260 // Up to four simple arguments are passed in registers a0..a3.
5261 if (num_reg_arguments > kRegisterPassedArguments) {
5262 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5263 }
5264 stack_passed_words += kCArgSlotCount;
5265 return stack_passed_words;
5266 }
5267
5268
EmitSeqStringSetCharCheck(Register string,Register index,Register value,Register scratch,uint32_t encoding_mask)5269 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5270 Register index,
5271 Register value,
5272 Register scratch,
5273 uint32_t encoding_mask) {
5274 Label is_object;
5275 SmiTst(string, at);
5276 Check(ne, kNonObject, at, Operand(zero_reg));
5277
5278 lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
5279 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5280
5281 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5282 li(scratch, Operand(encoding_mask));
5283 Check(eq, kUnexpectedStringType, at, Operand(scratch));
5284
5285 // The index is assumed to be untagged coming in, tag it to compare with the
5286 // string length without using a temp register, it is restored at the end of
5287 // this function.
5288 Label index_tag_ok, index_tag_bad;
5289 TrySmiTag(index, scratch, &index_tag_bad);
5290 Branch(&index_tag_ok);
5291 bind(&index_tag_bad);
5292 Abort(kIndexIsTooLarge);
5293 bind(&index_tag_ok);
5294
5295 lw(at, FieldMemOperand(string, String::kLengthOffset));
5296 Check(lt, kIndexIsTooLarge, index, Operand(at));
5297
5298 DCHECK(Smi::FromInt(0) == 0);
5299 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5300
5301 SmiUntag(index, index);
5302 }
5303
5304
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)5305 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5306 int num_double_arguments,
5307 Register scratch) {
5308 int frame_alignment = ActivationFrameAlignment();
5309
5310 // Up to four simple arguments are passed in registers a0..a3.
5311 // Those four arguments must have reserved argument slots on the stack for
5312 // mips, even though those argument slots are not normally used.
5313 // Remaining arguments are pushed on the stack, above (higher address than)
5314 // the argument slots.
5315 int stack_passed_arguments = CalculateStackPassedWords(
5316 num_reg_arguments, num_double_arguments);
5317 if (frame_alignment > kPointerSize) {
5318 // Make stack end at alignment and make room for num_arguments - 4 words
5319 // and the original value of sp.
5320 mov(scratch, sp);
5321 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5322 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5323 And(sp, sp, Operand(-frame_alignment));
5324 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5325 } else {
5326 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5327 }
5328 }
5329
5330
PrepareCallCFunction(int num_reg_arguments,Register scratch)5331 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5332 Register scratch) {
5333 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5334 }
5335
5336
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)5337 void MacroAssembler::CallCFunction(ExternalReference function,
5338 int num_reg_arguments,
5339 int num_double_arguments) {
5340 li(t8, Operand(function));
5341 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5342 }
5343
5344
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)5345 void MacroAssembler::CallCFunction(Register function,
5346 int num_reg_arguments,
5347 int num_double_arguments) {
5348 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5349 }
5350
5351
CallCFunction(ExternalReference function,int num_arguments)5352 void MacroAssembler::CallCFunction(ExternalReference function,
5353 int num_arguments) {
5354 CallCFunction(function, num_arguments, 0);
5355 }
5356
5357
CallCFunction(Register function,int num_arguments)5358 void MacroAssembler::CallCFunction(Register function,
5359 int num_arguments) {
5360 CallCFunction(function, num_arguments, 0);
5361 }
5362
5363
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)5364 void MacroAssembler::CallCFunctionHelper(Register function,
5365 int num_reg_arguments,
5366 int num_double_arguments) {
5367 DCHECK(has_frame());
5368 // Make sure that the stack is aligned before calling a C function unless
5369 // running in the simulator. The simulator has its own alignment check which
5370 // provides more information.
5371 // The argument stots are presumed to have been set up by
5372 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5373
5374 #if V8_HOST_ARCH_MIPS
5375 if (emit_debug_code()) {
5376 int frame_alignment = base::OS::ActivationFrameAlignment();
5377 int frame_alignment_mask = frame_alignment - 1;
5378 if (frame_alignment > kPointerSize) {
5379 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5380 Label alignment_as_expected;
5381 And(at, sp, Operand(frame_alignment_mask));
5382 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5383 // Don't use Check here, as it will call Runtime_Abort possibly
5384 // re-entering here.
5385 stop("Unexpected alignment in CallCFunction");
5386 bind(&alignment_as_expected);
5387 }
5388 }
5389 #endif // V8_HOST_ARCH_MIPS
5390
5391 // Just call directly. The function called cannot cause a GC, or
5392 // allow preemption, so the return address in the link register
5393 // stays correct.
5394
5395 if (!function.is(t9)) {
5396 mov(t9, function);
5397 function = t9;
5398 }
5399
5400 Call(function);
5401
5402 int stack_passed_arguments = CalculateStackPassedWords(
5403 num_reg_arguments, num_double_arguments);
5404
5405 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5406 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5407 } else {
5408 Addu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5409 }
5410 }
5411
5412
5413 #undef BRANCH_ARGS_CHECK
5414
5415
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)5416 void MacroAssembler::CheckPageFlag(
5417 Register object,
5418 Register scratch,
5419 int mask,
5420 Condition cc,
5421 Label* condition_met) {
5422 And(scratch, object, Operand(~Page::kPageAlignmentMask));
5423 lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5424 And(scratch, scratch, Operand(mask));
5425 Branch(condition_met, cc, scratch, Operand(zero_reg));
5426 }
5427
5428
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)5429 void MacroAssembler::JumpIfBlack(Register object,
5430 Register scratch0,
5431 Register scratch1,
5432 Label* on_black) {
5433 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
5434 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
5435 }
5436
5437
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)5438 void MacroAssembler::HasColor(Register object,
5439 Register bitmap_scratch,
5440 Register mask_scratch,
5441 Label* has_color,
5442 int first_bit,
5443 int second_bit) {
5444 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5445 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5446
5447 GetMarkBits(object, bitmap_scratch, mask_scratch);
5448
5449 Label other_color, word_boundary;
5450 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5451 And(t8, t9, Operand(mask_scratch));
5452 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5453 // Shift left 1 by adding.
5454 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5455 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5456 And(t8, t9, Operand(mask_scratch));
5457 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5458 jmp(&other_color);
5459
5460 bind(&word_boundary);
5461 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5462 And(t9, t9, Operand(1));
5463 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5464 bind(&other_color);
5465 }
5466
5467
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)5468 void MacroAssembler::GetMarkBits(Register addr_reg,
5469 Register bitmap_reg,
5470 Register mask_reg) {
5471 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5472 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5473 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5474 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5475 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5476 sll(t8, t8, kPointerSizeLog2);
5477 Addu(bitmap_reg, bitmap_reg, t8);
5478 li(t8, Operand(1));
5479 sllv(mask_reg, t8, mask_reg);
5480 }
5481
5482
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)5483 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
5484 Register mask_scratch, Register load_scratch,
5485 Label* value_is_white) {
5486 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5487 GetMarkBits(value, bitmap_scratch, mask_scratch);
5488
5489 // If the value is black or grey we don't need to do anything.
5490 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5491 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
5492 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
5493 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5494
5495 // Since both black and grey have a 1 in the first position and white does
5496 // not have a 1 there we only need to check one bit.
5497 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5498 And(t8, mask_scratch, load_scratch);
5499 Branch(value_is_white, eq, t8, Operand(zero_reg));
5500 }
5501
5502
LoadInstanceDescriptors(Register map,Register descriptors)5503 void MacroAssembler::LoadInstanceDescriptors(Register map,
5504 Register descriptors) {
5505 lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5506 }
5507
5508
NumberOfOwnDescriptors(Register dst,Register map)5509 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5510 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5511 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5512 }
5513
5514
EnumLength(Register dst,Register map)5515 void MacroAssembler::EnumLength(Register dst, Register map) {
5516 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5517 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5518 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5519 SmiTag(dst);
5520 }
5521
5522
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)5523 void MacroAssembler::LoadAccessor(Register dst, Register holder,
5524 int accessor_index,
5525 AccessorComponent accessor) {
5526 lw(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
5527 LoadInstanceDescriptors(dst, dst);
5528 lw(dst,
5529 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
5530 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
5531 : AccessorPair::kSetterOffset;
5532 lw(dst, FieldMemOperand(dst, offset));
5533 }
5534
5535
CheckEnumCache(Register null_value,Label * call_runtime)5536 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5537 Register empty_fixed_array_value = t2;
5538 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5539 Label next, start;
5540 mov(a2, a0);
5541
5542 // Check if the enum length field is properly initialized, indicating that
5543 // there is an enum cache.
5544 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5545
5546 EnumLength(a3, a1);
5547 Branch(
5548 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5549
5550 jmp(&start);
5551
5552 bind(&next);
5553 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5554
5555 // For all objects but the receiver, check that the cache is empty.
5556 EnumLength(a3, a1);
5557 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5558
5559 bind(&start);
5560
5561 // Check that there are no elements. Register a2 contains the current JS
5562 // object we've reached through the prototype chain.
5563 Label no_elements;
5564 lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5565 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5566
5567 // Second chance, the object may be using the empty slow element dictionary.
5568 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5569 Branch(call_runtime, ne, a2, Operand(at));
5570
5571 bind(&no_elements);
5572 lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5573 Branch(&next, ne, a2, Operand(null_value));
5574 }
5575
5576
ClampUint8(Register output_reg,Register input_reg)5577 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5578 DCHECK(!output_reg.is(input_reg));
5579 Label done;
5580 li(output_reg, Operand(255));
5581 // Normal branch: nop in delay slot.
5582 Branch(&done, gt, input_reg, Operand(output_reg));
5583 // Use delay slot in this branch.
5584 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5585 mov(output_reg, zero_reg); // In delay slot.
5586 mov(output_reg, input_reg); // Value is in range 0..255.
5587 bind(&done);
5588 }
5589
5590
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister temp_double_reg)5591 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5592 DoubleRegister input_reg,
5593 DoubleRegister temp_double_reg) {
5594 Label above_zero;
5595 Label done;
5596 Label in_bounds;
5597
5598 Move(temp_double_reg, 0.0);
5599 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5600
5601 // Double value is less than zero, NaN or Inf, return 0.
5602 mov(result_reg, zero_reg);
5603 Branch(&done);
5604
5605 // Double value is >= 255, return 255.
5606 bind(&above_zero);
5607 Move(temp_double_reg, 255.0);
5608 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5609 li(result_reg, Operand(255));
5610 Branch(&done);
5611
5612 // In 0-255 range, round and truncate.
5613 bind(&in_bounds);
5614 cvt_w_d(temp_double_reg, input_reg);
5615 mfc1(result_reg, temp_double_reg);
5616 bind(&done);
5617 }
5618
5619
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found,Condition cond,Label * allocation_memento_present)5620 void MacroAssembler::TestJSArrayForAllocationMemento(
5621 Register receiver_reg,
5622 Register scratch_reg,
5623 Label* no_memento_found,
5624 Condition cond,
5625 Label* allocation_memento_present) {
5626 ExternalReference new_space_start =
5627 ExternalReference::new_space_start(isolate());
5628 ExternalReference new_space_allocation_top =
5629 ExternalReference::new_space_allocation_top_address(isolate());
5630 Addu(scratch_reg, receiver_reg,
5631 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5632 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5633 li(at, Operand(new_space_allocation_top));
5634 lw(at, MemOperand(at));
5635 Branch(no_memento_found, gt, scratch_reg, Operand(at));
5636 lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5637 if (allocation_memento_present) {
5638 Branch(allocation_memento_present, cond, scratch_reg,
5639 Operand(isolate()->factory()->allocation_memento_map()));
5640 }
5641 }
5642
5643
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)5644 Register GetRegisterThatIsNotOneOf(Register reg1,
5645 Register reg2,
5646 Register reg3,
5647 Register reg4,
5648 Register reg5,
5649 Register reg6) {
5650 RegList regs = 0;
5651 if (reg1.is_valid()) regs |= reg1.bit();
5652 if (reg2.is_valid()) regs |= reg2.bit();
5653 if (reg3.is_valid()) regs |= reg3.bit();
5654 if (reg4.is_valid()) regs |= reg4.bit();
5655 if (reg5.is_valid()) regs |= reg5.bit();
5656 if (reg6.is_valid()) regs |= reg6.bit();
5657
5658 const RegisterConfiguration* config =
5659 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
5660 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
5661 int code = config->GetAllocatableGeneralCode(i);
5662 Register candidate = Register::from_code(code);
5663 if (regs & candidate.bit()) continue;
5664 return candidate;
5665 }
5666 UNREACHABLE();
5667 return no_reg;
5668 }
5669
5670
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)5671 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5672 Register object,
5673 Register scratch0,
5674 Register scratch1,
5675 Label* found) {
5676 DCHECK(!scratch1.is(scratch0));
5677 Factory* factory = isolate()->factory();
5678 Register current = scratch0;
5679 Label loop_again, end;
5680
5681 // Scratch contained elements pointer.
5682 Move(current, object);
5683 lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
5684 lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
5685 Branch(&end, eq, current, Operand(factory->null_value()));
5686
5687 // Loop based on the map going up the prototype chain.
5688 bind(&loop_again);
5689 lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
5690 lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
5691 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
5692 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
5693 Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
5694 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
5695 DecodeField<Map::ElementsKindBits>(scratch1);
5696 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
5697 lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
5698 Branch(&loop_again, ne, current, Operand(factory->null_value()));
5699
5700 bind(&end);
5701 }
5702
5703
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8,Register reg9,Register reg10)5704 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
5705 Register reg5, Register reg6, Register reg7, Register reg8,
5706 Register reg9, Register reg10) {
5707 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
5708 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5709 reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
5710 reg10.is_valid();
5711
5712 RegList regs = 0;
5713 if (reg1.is_valid()) regs |= reg1.bit();
5714 if (reg2.is_valid()) regs |= reg2.bit();
5715 if (reg3.is_valid()) regs |= reg3.bit();
5716 if (reg4.is_valid()) regs |= reg4.bit();
5717 if (reg5.is_valid()) regs |= reg5.bit();
5718 if (reg6.is_valid()) regs |= reg6.bit();
5719 if (reg7.is_valid()) regs |= reg7.bit();
5720 if (reg8.is_valid()) regs |= reg8.bit();
5721 if (reg9.is_valid()) regs |= reg9.bit();
5722 if (reg10.is_valid()) regs |= reg10.bit();
5723 int n_of_non_aliasing_regs = NumRegs(regs);
5724
5725 return n_of_valid_regs != n_of_non_aliasing_regs;
5726 }
5727
5728
CodePatcher(Isolate * isolate,byte * address,int instructions,FlushICache flush_cache)5729 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
5730 FlushICache flush_cache)
5731 : address_(address),
5732 size_(instructions * Assembler::kInstrSize),
5733 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
5734 flush_cache_(flush_cache) {
5735 // Create a new macro assembler pointing to the address of the code to patch.
5736 // The size is adjusted with kGap on order for the assembler to generate size
5737 // bytes of instructions without failing with buffer size constraints.
5738 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5739 }
5740
5741
~CodePatcher()5742 CodePatcher::~CodePatcher() {
5743 // Indicate that code has changed.
5744 if (flush_cache_ == FLUSH) {
5745 Assembler::FlushICache(masm_.isolate(), address_, size_);
5746 }
5747
5748 // Check that the code was patched as expected.
5749 DCHECK(masm_.pc_ == address_ + size_);
5750 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5751 }
5752
5753
Emit(Instr instr)5754 void CodePatcher::Emit(Instr instr) {
5755 masm()->emit(instr);
5756 }
5757
5758
Emit(Address addr)5759 void CodePatcher::Emit(Address addr) {
5760 masm()->emit(reinterpret_cast<Instr>(addr));
5761 }
5762
5763
ChangeBranchCondition(Instr current_instr,uint32_t new_opcode)5764 void CodePatcher::ChangeBranchCondition(Instr current_instr,
5765 uint32_t new_opcode) {
5766 current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
5767 masm_.emit(current_instr);
5768 }
5769
5770
TruncatingDiv(Register result,Register dividend,int32_t divisor)5771 void MacroAssembler::TruncatingDiv(Register result,
5772 Register dividend,
5773 int32_t divisor) {
5774 DCHECK(!dividend.is(result));
5775 DCHECK(!dividend.is(at));
5776 DCHECK(!result.is(at));
5777 base::MagicNumbersForDivision<uint32_t> mag =
5778 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5779 li(at, Operand(mag.multiplier));
5780 Mulh(result, dividend, Operand(at));
5781 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5782 if (divisor > 0 && neg) {
5783 Addu(result, result, Operand(dividend));
5784 }
5785 if (divisor < 0 && !neg && mag.multiplier > 0) {
5786 Subu(result, result, Operand(dividend));
5787 }
5788 if (mag.shift > 0) sra(result, result, mag.shift);
5789 srl(at, dividend, 31);
5790 Addu(result, result, Operand(at));
5791 }
5792
5793
5794 } // namespace internal
5795 } // namespace v8
5796
5797 #endif // V8_TARGET_ARCH_MIPS
5798