1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6
7 #if V8_TARGET_ARCH_MIPS
8
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/debug/debug.h"
14 #include "src/mips/macro-assembler-mips.h"
15 #include "src/register-configuration.h"
16 #include "src/runtime/runtime.h"
17
18 namespace v8 {
19 namespace internal {
20
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)21 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
22 CodeObjectRequired create_code_object)
23 : Assembler(arg_isolate, buffer, size),
24 generating_stub_(false),
25 has_frame_(false),
26 has_double_zero_reg_set_(false) {
27 if (create_code_object == CodeObjectRequired::kYes) {
28 code_object_ =
29 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
30 }
31 }
32
Load(Register dst,const MemOperand & src,Representation r)33 void MacroAssembler::Load(Register dst,
34 const MemOperand& src,
35 Representation r) {
36 DCHECK(!r.IsDouble());
37 if (r.IsInteger8()) {
38 lb(dst, src);
39 } else if (r.IsUInteger8()) {
40 lbu(dst, src);
41 } else if (r.IsInteger16()) {
42 lh(dst, src);
43 } else if (r.IsUInteger16()) {
44 lhu(dst, src);
45 } else {
46 lw(dst, src);
47 }
48 }
49
50
Store(Register src,const MemOperand & dst,Representation r)51 void MacroAssembler::Store(Register src,
52 const MemOperand& dst,
53 Representation r) {
54 DCHECK(!r.IsDouble());
55 if (r.IsInteger8() || r.IsUInteger8()) {
56 sb(src, dst);
57 } else if (r.IsInteger16() || r.IsUInteger16()) {
58 sh(src, dst);
59 } else {
60 if (r.IsHeapObject()) {
61 AssertNotSmi(src);
62 } else if (r.IsSmi()) {
63 AssertSmi(src);
64 }
65 sw(src, dst);
66 }
67 }
68
LoadRoot(Register destination,Heap::RootListIndex index)69 void MacroAssembler::LoadRoot(Register destination,
70 Heap::RootListIndex index) {
71 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
72 }
73
74
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)75 void MacroAssembler::LoadRoot(Register destination,
76 Heap::RootListIndex index,
77 Condition cond,
78 Register src1, const Operand& src2) {
79 Branch(2, NegateCondition(cond), src1, src2);
80 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
81 }
82
83
StoreRoot(Register source,Heap::RootListIndex index)84 void MacroAssembler::StoreRoot(Register source,
85 Heap::RootListIndex index) {
86 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
87 sw(source, MemOperand(s6, index << kPointerSizeLog2));
88 }
89
90
StoreRoot(Register source,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)91 void MacroAssembler::StoreRoot(Register source,
92 Heap::RootListIndex index,
93 Condition cond,
94 Register src1, const Operand& src2) {
95 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
96 Branch(2, NegateCondition(cond), src1, src2);
97 sw(source, MemOperand(s6, index << kPointerSizeLog2));
98 }
99
PushCommonFrame(Register marker_reg)100 void MacroAssembler::PushCommonFrame(Register marker_reg) {
101 if (marker_reg.is_valid()) {
102 Push(ra, fp, marker_reg);
103 Addu(fp, sp, Operand(kPointerSize));
104 } else {
105 Push(ra, fp);
106 mov(fp, sp);
107 }
108 }
109
PopCommonFrame(Register marker_reg)110 void MacroAssembler::PopCommonFrame(Register marker_reg) {
111 if (marker_reg.is_valid()) {
112 Pop(ra, fp, marker_reg);
113 } else {
114 Pop(ra, fp);
115 }
116 }
117
PushStandardFrame(Register function_reg)118 void MacroAssembler::PushStandardFrame(Register function_reg) {
119 int offset = -StandardFrameConstants::kContextOffset;
120 if (function_reg.is_valid()) {
121 Push(ra, fp, cp, function_reg);
122 offset += kPointerSize;
123 } else {
124 Push(ra, fp, cp);
125 }
126 Addu(fp, sp, Operand(offset));
127 }
128
129 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()130 void MacroAssembler::PushSafepointRegisters() {
131 // Safepoints expect a block of kNumSafepointRegisters values on the
132 // stack, so adjust the stack for unsaved registers.
133 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
134 DCHECK(num_unsaved >= 0);
135 if (num_unsaved > 0) {
136 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
137 }
138 MultiPush(kSafepointSavedRegisters);
139 }
140
141
PopSafepointRegisters()142 void MacroAssembler::PopSafepointRegisters() {
143 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
144 MultiPop(kSafepointSavedRegisters);
145 if (num_unsaved > 0) {
146 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
147 }
148 }
149
150
StoreToSafepointRegisterSlot(Register src,Register dst)151 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
152 sw(src, SafepointRegisterSlot(dst));
153 }
154
155
LoadFromSafepointRegisterSlot(Register dst,Register src)156 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
157 lw(dst, SafepointRegisterSlot(src));
158 }
159
160
SafepointRegisterStackIndex(int reg_code)161 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
162 // The registers are pushed starting with the highest encoding,
163 // which means that lowest encodings are closest to the stack pointer.
164 return kSafepointRegisterStackIndexMap[reg_code];
165 }
166
167
SafepointRegisterSlot(Register reg)168 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
169 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
170 }
171
172
SafepointRegistersAndDoublesSlot(Register reg)173 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
174 UNIMPLEMENTED_MIPS();
175 // General purpose registers are pushed last on the stack.
176 int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
177 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
178 return MemOperand(sp, doubles_size + register_offset);
179 }
180
181
InNewSpace(Register object,Register scratch,Condition cc,Label * branch)182 void MacroAssembler::InNewSpace(Register object,
183 Register scratch,
184 Condition cc,
185 Label* branch) {
186 DCHECK(cc == eq || cc == ne);
187 CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch);
188 }
189
190
191 // Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
192 // The register 'object' contains a heap object pointer. The heap object
193 // tag is shifted away.
RecordWriteField(Register object,int offset,Register value,Register dst,RAStatus ra_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)194 void MacroAssembler::RecordWriteField(
195 Register object,
196 int offset,
197 Register value,
198 Register dst,
199 RAStatus ra_status,
200 SaveFPRegsMode save_fp,
201 RememberedSetAction remembered_set_action,
202 SmiCheck smi_check,
203 PointersToHereCheck pointers_to_here_check_for_value) {
204 DCHECK(!AreAliased(value, dst, t8, object));
205 // First, check if a write barrier is even needed. The tests below
206 // catch stores of Smis.
207 Label done;
208
209 // Skip barrier if writing a smi.
210 if (smi_check == INLINE_SMI_CHECK) {
211 JumpIfSmi(value, &done);
212 }
213
214 // Although the object register is tagged, the offset is relative to the start
215 // of the object, so so offset must be a multiple of kPointerSize.
216 DCHECK(IsAligned(offset, kPointerSize));
217
218 Addu(dst, object, Operand(offset - kHeapObjectTag));
219 if (emit_debug_code()) {
220 Label ok;
221 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
222 Branch(&ok, eq, t8, Operand(zero_reg));
223 stop("Unaligned cell in write barrier");
224 bind(&ok);
225 }
226
227 RecordWrite(object,
228 dst,
229 value,
230 ra_status,
231 save_fp,
232 remembered_set_action,
233 OMIT_SMI_CHECK,
234 pointers_to_here_check_for_value);
235
236 bind(&done);
237
238 // Clobber clobbered input registers when running with the debug-code flag
239 // turned on to provoke errors.
240 if (emit_debug_code()) {
241 li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
242 li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
243 }
244 }
245
246
247 // Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
RecordWriteForMap(Register object,Register map,Register dst,RAStatus ra_status,SaveFPRegsMode fp_mode)248 void MacroAssembler::RecordWriteForMap(Register object,
249 Register map,
250 Register dst,
251 RAStatus ra_status,
252 SaveFPRegsMode fp_mode) {
253 if (emit_debug_code()) {
254 DCHECK(!dst.is(at));
255 lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
256 Check(eq,
257 kWrongAddressOrValuePassedToRecordWrite,
258 dst,
259 Operand(isolate()->factory()->meta_map()));
260 }
261
262 if (!FLAG_incremental_marking) {
263 return;
264 }
265
266 if (emit_debug_code()) {
267 lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
268 Check(eq,
269 kWrongAddressOrValuePassedToRecordWrite,
270 map,
271 Operand(at));
272 }
273
274 Label done;
275
276 // A single check of the map's pages interesting flag suffices, since it is
277 // only set during incremental collection, and then it's also guaranteed that
278 // the from object's page's interesting flag is also set. This optimization
279 // relies on the fact that maps can never be in new space.
280 CheckPageFlag(map,
281 map, // Used as scratch.
282 MemoryChunk::kPointersToHereAreInterestingMask,
283 eq,
284 &done);
285
286 Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
287 if (emit_debug_code()) {
288 Label ok;
289 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
290 Branch(&ok, eq, at, Operand(zero_reg));
291 stop("Unaligned cell in write barrier");
292 bind(&ok);
293 }
294
295 // Record the actual write.
296 if (ra_status == kRAHasNotBeenSaved) {
297 push(ra);
298 }
299 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
300 fp_mode);
301 CallStub(&stub);
302 if (ra_status == kRAHasNotBeenSaved) {
303 pop(ra);
304 }
305
306 bind(&done);
307
308 // Count number of write barriers in generated code.
309 isolate()->counters()->write_barriers_static()->Increment();
310 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
311
312 // Clobber clobbered registers when running with the debug-code flag
313 // turned on to provoke errors.
314 if (emit_debug_code()) {
315 li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
316 li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
317 }
318 }
319
320
321 // Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
322 // The register 'object' contains a heap object pointer. The heap object
323 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,RAStatus ra_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)324 void MacroAssembler::RecordWrite(
325 Register object,
326 Register address,
327 Register value,
328 RAStatus ra_status,
329 SaveFPRegsMode fp_mode,
330 RememberedSetAction remembered_set_action,
331 SmiCheck smi_check,
332 PointersToHereCheck pointers_to_here_check_for_value) {
333 DCHECK(!AreAliased(object, address, value, t8));
334 DCHECK(!AreAliased(object, address, value, t9));
335
336 if (emit_debug_code()) {
337 lw(at, MemOperand(address));
338 Assert(
339 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
340 }
341
342 if (remembered_set_action == OMIT_REMEMBERED_SET &&
343 !FLAG_incremental_marking) {
344 return;
345 }
346
347 // First, check if a write barrier is even needed. The tests below
348 // catch stores of smis and stores into the young generation.
349 Label done;
350
351 if (smi_check == INLINE_SMI_CHECK) {
352 DCHECK_EQ(0, kSmiTag);
353 JumpIfSmi(value, &done);
354 }
355
356 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
357 CheckPageFlag(value,
358 value, // Used as scratch.
359 MemoryChunk::kPointersToHereAreInterestingMask,
360 eq,
361 &done);
362 }
363 CheckPageFlag(object,
364 value, // Used as scratch.
365 MemoryChunk::kPointersFromHereAreInterestingMask,
366 eq,
367 &done);
368
369 // Record the actual write.
370 if (ra_status == kRAHasNotBeenSaved) {
371 push(ra);
372 }
373 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
374 fp_mode);
375 CallStub(&stub);
376 if (ra_status == kRAHasNotBeenSaved) {
377 pop(ra);
378 }
379
380 bind(&done);
381
382 // Count number of write barriers in generated code.
383 isolate()->counters()->write_barriers_static()->Increment();
384 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
385 value);
386
387 // Clobber clobbered registers when running with the debug-code flag
388 // turned on to provoke errors.
389 if (emit_debug_code()) {
390 li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
391 li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
392 }
393 }
394
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)395 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
396 Register code_entry,
397 Register scratch) {
398 const int offset = JSFunction::kCodeEntryOffset;
399
400 // Since a code entry (value) is always in old space, we don't need to update
401 // remembered set. If incremental marking is off, there is nothing for us to
402 // do.
403 if (!FLAG_incremental_marking) return;
404
405 DCHECK(js_function.is(a1));
406 DCHECK(code_entry.is(t0));
407 DCHECK(scratch.is(t1));
408 AssertNotSmi(js_function);
409
410 if (emit_debug_code()) {
411 Addu(scratch, js_function, Operand(offset - kHeapObjectTag));
412 lw(at, MemOperand(scratch));
413 Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
414 Operand(code_entry));
415 }
416
417 // First, check if a write barrier is even needed. The tests below
418 // catch stores of Smis and stores into young gen.
419 Label done;
420
421 CheckPageFlag(code_entry, scratch,
422 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
423 CheckPageFlag(js_function, scratch,
424 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
425
426 const Register dst = scratch;
427 Addu(dst, js_function, Operand(offset - kHeapObjectTag));
428
429 // Save caller-saved registers. js_function and code_entry are in the
430 // caller-saved register list.
431 DCHECK(kJSCallerSaved & js_function.bit());
432 DCHECK(kJSCallerSaved & code_entry.bit());
433 MultiPush(kJSCallerSaved | ra.bit());
434
435 int argument_count = 3;
436
437 PrepareCallCFunction(argument_count, 0, code_entry);
438
439 mov(a0, js_function);
440 mov(a1, dst);
441 li(a2, Operand(ExternalReference::isolate_address(isolate())));
442
443 {
444 AllowExternalCallThatCantCauseGC scope(this);
445 CallCFunction(
446 ExternalReference::incremental_marking_record_write_code_entry_function(
447 isolate()),
448 argument_count);
449 }
450
451 // Restore caller-saved registers.
452 MultiPop(kJSCallerSaved | ra.bit());
453
454 bind(&done);
455 }
456
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)457 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
458 Register address,
459 Register scratch,
460 SaveFPRegsMode fp_mode,
461 RememberedSetFinalAction and_then) {
462 Label done;
463 if (emit_debug_code()) {
464 Label ok;
465 JumpIfNotInNewSpace(object, scratch, &ok);
466 stop("Remembered set pointer is in new space");
467 bind(&ok);
468 }
469 // Load store buffer top.
470 ExternalReference store_buffer =
471 ExternalReference::store_buffer_top(isolate());
472 li(t8, Operand(store_buffer));
473 lw(scratch, MemOperand(t8));
474 // Store pointer to buffer and increment buffer top.
475 sw(address, MemOperand(scratch));
476 Addu(scratch, scratch, kPointerSize);
477 // Write back new top of buffer.
478 sw(scratch, MemOperand(t8));
479 // Call stub on end of buffer.
480 // Check for end of buffer.
481 And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
482 if (and_then == kFallThroughAtEnd) {
483 Branch(&done, ne, t8, Operand(zero_reg));
484 } else {
485 DCHECK(and_then == kReturnAtEnd);
486 Ret(ne, t8, Operand(zero_reg));
487 }
488 push(ra);
489 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
490 CallStub(&store_buffer_overflow);
491 pop(ra);
492 bind(&done);
493 if (and_then == kReturnAtEnd) {
494 Ret();
495 }
496 }
497
498
499 // -----------------------------------------------------------------------------
500 // Allocation support.
501
502
503 // Compute the hash code from the untagged key. This must be kept in sync with
504 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
505 // code-stub-hydrogen.cc
GetNumberHash(Register reg0,Register scratch)506 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
507 // First of all we assign the hash seed to scratch.
508 LoadRoot(scratch, Heap::kHashSeedRootIndex);
509 SmiUntag(scratch);
510
511 // Xor original key with a seed.
512 xor_(reg0, reg0, scratch);
513
514 // Compute the hash code from the untagged key. This must be kept in sync
515 // with ComputeIntegerHash in utils.h.
516 //
517 // hash = ~hash + (hash << 15);
518 nor(scratch, reg0, zero_reg);
519 Lsa(reg0, scratch, reg0, 15);
520
521 // hash = hash ^ (hash >> 12);
522 srl(at, reg0, 12);
523 xor_(reg0, reg0, at);
524
525 // hash = hash + (hash << 2);
526 Lsa(reg0, reg0, reg0, 2);
527
528 // hash = hash ^ (hash >> 4);
529 srl(at, reg0, 4);
530 xor_(reg0, reg0, at);
531
532 // hash = hash * 2057;
533 sll(scratch, reg0, 11);
534 Lsa(reg0, reg0, reg0, 3);
535 addu(reg0, reg0, scratch);
536
537 // hash = hash ^ (hash >> 16);
538 srl(at, reg0, 16);
539 xor_(reg0, reg0, at);
540 And(reg0, reg0, Operand(0x3fffffff));
541 }
542
543 // ---------------------------------------------------------------------------
544 // Instruction macros.
545
Addu(Register rd,Register rs,const Operand & rt)546 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
547 if (rt.is_reg()) {
548 addu(rd, rs, rt.rm());
549 } else {
550 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
551 addiu(rd, rs, rt.imm32_);
552 } else {
553 // li handles the relocation.
554 DCHECK(!rs.is(at));
555 li(at, rt);
556 addu(rd, rs, at);
557 }
558 }
559 }
560
561
Subu(Register rd,Register rs,const Operand & rt)562 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
563 if (rt.is_reg()) {
564 subu(rd, rs, rt.rm());
565 } else {
566 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
567 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
568 } else {
569 // li handles the relocation.
570 DCHECK(!rs.is(at));
571 li(at, rt);
572 subu(rd, rs, at);
573 }
574 }
575 }
576
577
Mul(Register rd,Register rs,const Operand & rt)578 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
579 if (rt.is_reg()) {
580 if (IsMipsArchVariant(kLoongson)) {
581 mult(rs, rt.rm());
582 mflo(rd);
583 } else {
584 mul(rd, rs, rt.rm());
585 }
586 } else {
587 // li handles the relocation.
588 DCHECK(!rs.is(at));
589 li(at, rt);
590 if (IsMipsArchVariant(kLoongson)) {
591 mult(rs, at);
592 mflo(rd);
593 } else {
594 mul(rd, rs, at);
595 }
596 }
597 }
598
599
Mul(Register rd_hi,Register rd_lo,Register rs,const Operand & rt)600 void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
601 Register rs, const Operand& rt) {
602 if (rt.is_reg()) {
603 if (!IsMipsArchVariant(kMips32r6)) {
604 mult(rs, rt.rm());
605 mflo(rd_lo);
606 mfhi(rd_hi);
607 } else {
608 if (rd_lo.is(rs)) {
609 DCHECK(!rd_hi.is(rs));
610 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
611 muh(rd_hi, rs, rt.rm());
612 mul(rd_lo, rs, rt.rm());
613 } else {
614 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
615 mul(rd_lo, rs, rt.rm());
616 muh(rd_hi, rs, rt.rm());
617 }
618 }
619 } else {
620 // li handles the relocation.
621 DCHECK(!rs.is(at));
622 li(at, rt);
623 if (!IsMipsArchVariant(kMips32r6)) {
624 mult(rs, at);
625 mflo(rd_lo);
626 mfhi(rd_hi);
627 } else {
628 if (rd_lo.is(rs)) {
629 DCHECK(!rd_hi.is(rs));
630 DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
631 muh(rd_hi, rs, at);
632 mul(rd_lo, rs, at);
633 } else {
634 DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
635 mul(rd_lo, rs, at);
636 muh(rd_hi, rs, at);
637 }
638 }
639 }
640 }
641
Mulu(Register rd_hi,Register rd_lo,Register rs,const Operand & rt)642 void MacroAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs,
643 const Operand& rt) {
644 Register reg;
645 if (rt.is_reg()) {
646 reg = rt.rm();
647 } else {
648 DCHECK(!rs.is(at));
649 reg = at;
650 li(reg, rt);
651 }
652
653 if (!IsMipsArchVariant(kMips32r6)) {
654 multu(rs, reg);
655 mflo(rd_lo);
656 mfhi(rd_hi);
657 } else {
658 if (rd_lo.is(rs)) {
659 DCHECK(!rd_hi.is(rs));
660 DCHECK(!rd_hi.is(reg) && !rd_lo.is(reg));
661 muhu(rd_hi, rs, reg);
662 mulu(rd_lo, rs, reg);
663 } else {
664 DCHECK(!rd_hi.is(reg) && !rd_lo.is(reg));
665 mulu(rd_lo, rs, reg);
666 muhu(rd_hi, rs, reg);
667 }
668 }
669 }
670
Mulh(Register rd,Register rs,const Operand & rt)671 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
672 if (rt.is_reg()) {
673 if (!IsMipsArchVariant(kMips32r6)) {
674 mult(rs, rt.rm());
675 mfhi(rd);
676 } else {
677 muh(rd, rs, rt.rm());
678 }
679 } else {
680 // li handles the relocation.
681 DCHECK(!rs.is(at));
682 li(at, rt);
683 if (!IsMipsArchVariant(kMips32r6)) {
684 mult(rs, at);
685 mfhi(rd);
686 } else {
687 muh(rd, rs, at);
688 }
689 }
690 }
691
692
Mult(Register rs,const Operand & rt)693 void MacroAssembler::Mult(Register rs, const Operand& rt) {
694 if (rt.is_reg()) {
695 mult(rs, rt.rm());
696 } else {
697 // li handles the relocation.
698 DCHECK(!rs.is(at));
699 li(at, rt);
700 mult(rs, at);
701 }
702 }
703
704
Mulhu(Register rd,Register rs,const Operand & rt)705 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
706 if (rt.is_reg()) {
707 if (!IsMipsArchVariant(kMips32r6)) {
708 multu(rs, rt.rm());
709 mfhi(rd);
710 } else {
711 muhu(rd, rs, rt.rm());
712 }
713 } else {
714 // li handles the relocation.
715 DCHECK(!rs.is(at));
716 li(at, rt);
717 if (!IsMipsArchVariant(kMips32r6)) {
718 multu(rs, at);
719 mfhi(rd);
720 } else {
721 muhu(rd, rs, at);
722 }
723 }
724 }
725
726
Multu(Register rs,const Operand & rt)727 void MacroAssembler::Multu(Register rs, const Operand& rt) {
728 if (rt.is_reg()) {
729 multu(rs, rt.rm());
730 } else {
731 // li handles the relocation.
732 DCHECK(!rs.is(at));
733 li(at, rt);
734 multu(rs, at);
735 }
736 }
737
738
Div(Register rs,const Operand & rt)739 void MacroAssembler::Div(Register rs, const Operand& rt) {
740 if (rt.is_reg()) {
741 div(rs, rt.rm());
742 } else {
743 // li handles the relocation.
744 DCHECK(!rs.is(at));
745 li(at, rt);
746 div(rs, at);
747 }
748 }
749
750
Div(Register rem,Register res,Register rs,const Operand & rt)751 void MacroAssembler::Div(Register rem, Register res,
752 Register rs, const Operand& rt) {
753 if (rt.is_reg()) {
754 if (!IsMipsArchVariant(kMips32r6)) {
755 div(rs, rt.rm());
756 mflo(res);
757 mfhi(rem);
758 } else {
759 div(res, rs, rt.rm());
760 mod(rem, rs, rt.rm());
761 }
762 } else {
763 // li handles the relocation.
764 DCHECK(!rs.is(at));
765 li(at, rt);
766 if (!IsMipsArchVariant(kMips32r6)) {
767 div(rs, at);
768 mflo(res);
769 mfhi(rem);
770 } else {
771 div(res, rs, at);
772 mod(rem, rs, at);
773 }
774 }
775 }
776
777
Div(Register res,Register rs,const Operand & rt)778 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
779 if (rt.is_reg()) {
780 if (!IsMipsArchVariant(kMips32r6)) {
781 div(rs, rt.rm());
782 mflo(res);
783 } else {
784 div(res, rs, rt.rm());
785 }
786 } else {
787 // li handles the relocation.
788 DCHECK(!rs.is(at));
789 li(at, rt);
790 if (!IsMipsArchVariant(kMips32r6)) {
791 div(rs, at);
792 mflo(res);
793 } else {
794 div(res, rs, at);
795 }
796 }
797 }
798
799
Mod(Register rd,Register rs,const Operand & rt)800 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
801 if (rt.is_reg()) {
802 if (!IsMipsArchVariant(kMips32r6)) {
803 div(rs, rt.rm());
804 mfhi(rd);
805 } else {
806 mod(rd, rs, rt.rm());
807 }
808 } else {
809 // li handles the relocation.
810 DCHECK(!rs.is(at));
811 li(at, rt);
812 if (!IsMipsArchVariant(kMips32r6)) {
813 div(rs, at);
814 mfhi(rd);
815 } else {
816 mod(rd, rs, at);
817 }
818 }
819 }
820
821
Modu(Register rd,Register rs,const Operand & rt)822 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
823 if (rt.is_reg()) {
824 if (!IsMipsArchVariant(kMips32r6)) {
825 divu(rs, rt.rm());
826 mfhi(rd);
827 } else {
828 modu(rd, rs, rt.rm());
829 }
830 } else {
831 // li handles the relocation.
832 DCHECK(!rs.is(at));
833 li(at, rt);
834 if (!IsMipsArchVariant(kMips32r6)) {
835 divu(rs, at);
836 mfhi(rd);
837 } else {
838 modu(rd, rs, at);
839 }
840 }
841 }
842
843
Divu(Register rs,const Operand & rt)844 void MacroAssembler::Divu(Register rs, const Operand& rt) {
845 if (rt.is_reg()) {
846 divu(rs, rt.rm());
847 } else {
848 // li handles the relocation.
849 DCHECK(!rs.is(at));
850 li(at, rt);
851 divu(rs, at);
852 }
853 }
854
855
Divu(Register res,Register rs,const Operand & rt)856 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
857 if (rt.is_reg()) {
858 if (!IsMipsArchVariant(kMips32r6)) {
859 divu(rs, rt.rm());
860 mflo(res);
861 } else {
862 divu(res, rs, rt.rm());
863 }
864 } else {
865 // li handles the relocation.
866 DCHECK(!rs.is(at));
867 li(at, rt);
868 if (!IsMipsArchVariant(kMips32r6)) {
869 divu(rs, at);
870 mflo(res);
871 } else {
872 divu(res, rs, at);
873 }
874 }
875 }
876
877
And(Register rd,Register rs,const Operand & rt)878 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
879 if (rt.is_reg()) {
880 and_(rd, rs, rt.rm());
881 } else {
882 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
883 andi(rd, rs, rt.imm32_);
884 } else {
885 // li handles the relocation.
886 DCHECK(!rs.is(at));
887 li(at, rt);
888 and_(rd, rs, at);
889 }
890 }
891 }
892
893
Or(Register rd,Register rs,const Operand & rt)894 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
895 if (rt.is_reg()) {
896 or_(rd, rs, rt.rm());
897 } else {
898 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
899 ori(rd, rs, rt.imm32_);
900 } else {
901 // li handles the relocation.
902 DCHECK(!rs.is(at));
903 li(at, rt);
904 or_(rd, rs, at);
905 }
906 }
907 }
908
909
Xor(Register rd,Register rs,const Operand & rt)910 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
911 if (rt.is_reg()) {
912 xor_(rd, rs, rt.rm());
913 } else {
914 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
915 xori(rd, rs, rt.imm32_);
916 } else {
917 // li handles the relocation.
918 DCHECK(!rs.is(at));
919 li(at, rt);
920 xor_(rd, rs, at);
921 }
922 }
923 }
924
925
Nor(Register rd,Register rs,const Operand & rt)926 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
927 if (rt.is_reg()) {
928 nor(rd, rs, rt.rm());
929 } else {
930 // li handles the relocation.
931 DCHECK(!rs.is(at));
932 li(at, rt);
933 nor(rd, rs, at);
934 }
935 }
936
937
Neg(Register rs,const Operand & rt)938 void MacroAssembler::Neg(Register rs, const Operand& rt) {
939 DCHECK(rt.is_reg());
940 DCHECK(!at.is(rs));
941 DCHECK(!at.is(rt.rm()));
942 li(at, -1);
943 xor_(rs, rt.rm(), at);
944 }
945
946
Slt(Register rd,Register rs,const Operand & rt)947 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
948 if (rt.is_reg()) {
949 slt(rd, rs, rt.rm());
950 } else {
951 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
952 slti(rd, rs, rt.imm32_);
953 } else {
954 // li handles the relocation.
955 DCHECK(!rs.is(at));
956 li(at, rt);
957 slt(rd, rs, at);
958 }
959 }
960 }
961
962
Sltu(Register rd,Register rs,const Operand & rt)963 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
964 if (rt.is_reg()) {
965 sltu(rd, rs, rt.rm());
966 } else {
967 const uint32_t int16_min = std::numeric_limits<int16_t>::min();
968 if (is_uint15(rt.imm32_) && !MustUseReg(rt.rmode_)) {
969 // Imm range is: [0, 32767].
970 sltiu(rd, rs, rt.imm32_);
971 } else if (is_uint15(rt.imm32_ - int16_min) && !MustUseReg(rt.rmode_)) {
972 // Imm range is: [max_unsigned-32767,max_unsigned].
973 sltiu(rd, rs, static_cast<uint16_t>(rt.imm32_));
974 } else {
975 // li handles the relocation.
976 DCHECK(!rs.is(at));
977 li(at, rt);
978 sltu(rd, rs, at);
979 }
980 }
981 }
982
983
Ror(Register rd,Register rs,const Operand & rt)984 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
985 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
986 if (rt.is_reg()) {
987 rotrv(rd, rs, rt.rm());
988 } else {
989 rotr(rd, rs, rt.imm32_ & 0x1f);
990 }
991 } else {
992 if (rt.is_reg()) {
993 subu(at, zero_reg, rt.rm());
994 sllv(at, rs, at);
995 srlv(rd, rs, rt.rm());
996 or_(rd, rd, at);
997 } else {
998 if (rt.imm32_ == 0) {
999 srl(rd, rs, 0);
1000 } else {
1001 srl(at, rs, rt.imm32_ & 0x1f);
1002 sll(rd, rs, (0x20 - (rt.imm32_ & 0x1f)) & 0x1f);
1003 or_(rd, rd, at);
1004 }
1005 }
1006 }
1007 }
1008
1009
Pref(int32_t hint,const MemOperand & rs)1010 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1011 if (IsMipsArchVariant(kLoongson)) {
1012 lw(zero_reg, rs);
1013 } else {
1014 pref(hint, rs);
1015 }
1016 }
1017
1018
Lsa(Register rd,Register rt,Register rs,uint8_t sa,Register scratch)1019 void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
1020 Register scratch) {
1021 DCHECK(sa >= 1 && sa <= 31);
1022 if (IsMipsArchVariant(kMips32r6) && sa <= 4) {
1023 lsa(rd, rt, rs, sa - 1);
1024 } else {
1025 Register tmp = rd.is(rt) ? scratch : rd;
1026 DCHECK(!tmp.is(rt));
1027 sll(tmp, rs, sa);
1028 Addu(rd, rt, tmp);
1029 }
1030 }
1031
Bovc(Register rs,Register rt,Label * L)1032 void MacroAssembler::Bovc(Register rs, Register rt, Label* L) {
1033 if (is_trampoline_emitted()) {
1034 Label skip;
1035 bnvc(rs, rt, &skip);
1036 BranchLong(L, PROTECT);
1037 bind(&skip);
1038 } else {
1039 bovc(rs, rt, L);
1040 }
1041 }
1042
Bnvc(Register rs,Register rt,Label * L)1043 void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
1044 if (is_trampoline_emitted()) {
1045 Label skip;
1046 bovc(rs, rt, &skip);
1047 BranchLong(L, PROTECT);
1048 bind(&skip);
1049 } else {
1050 bnvc(rs, rt, L);
1051 }
1052 }
1053
1054 // ------------Pseudo-instructions-------------
1055
1056 // Word Swap Byte
ByteSwapSigned(Register dest,Register src,int operand_size)1057 void MacroAssembler::ByteSwapSigned(Register dest, Register src,
1058 int operand_size) {
1059 DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
1060
1061 if (operand_size == 2) {
1062 Seh(src, src);
1063 } else if (operand_size == 1) {
1064 Seb(src, src);
1065 }
1066 // No need to do any preparation if operand_size is 4
1067
1068 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1069 wsbh(dest, src);
1070 rotr(dest, dest, 16);
1071 } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
1072 Register tmp = t0;
1073 Register tmp2 = t1;
1074
1075 andi(tmp2, src, 0xFF);
1076 sll(tmp2, tmp2, 24);
1077 or_(tmp, zero_reg, tmp2);
1078
1079 andi(tmp2, src, 0xFF00);
1080 sll(tmp2, tmp2, 8);
1081 or_(tmp, tmp, tmp2);
1082
1083 srl(src, src, 8);
1084 andi(tmp2, src, 0xFF00);
1085 or_(tmp, tmp, tmp2);
1086
1087 srl(src, src, 16);
1088 andi(tmp2, src, 0xFF);
1089 or_(tmp, tmp, tmp2);
1090
1091 or_(dest, tmp, zero_reg);
1092 }
1093 }
1094
ByteSwapUnsigned(Register dest,Register src,int operand_size)1095 void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
1096 int operand_size) {
1097 DCHECK(operand_size == 1 || operand_size == 2);
1098
1099 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1100 if (operand_size == 1) {
1101 andi(src, src, 0xFF);
1102 } else {
1103 andi(src, src, 0xFFFF);
1104 }
1105 // No need to do any preparation if operand_size is 4
1106
1107 wsbh(dest, src);
1108 rotr(dest, dest, 16);
1109 } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
1110 if (operand_size == 1) {
1111 sll(src, src, 24);
1112 } else {
1113 Register tmp = t0;
1114
1115 andi(tmp, src, 0xFF00);
1116 sll(src, src, 24);
1117 sll(tmp, tmp, 8);
1118 or_(dest, tmp, src);
1119 }
1120 }
1121 }
1122
Ulw(Register rd,const MemOperand & rs)1123 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1124 DCHECK(!rd.is(at));
1125 DCHECK(!rs.rm().is(at));
1126 if (IsMipsArchVariant(kMips32r6)) {
1127 lw(rd, rs);
1128 } else {
1129 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1130 IsMipsArchVariant(kLoongson));
1131 if (is_int16(rs.offset() + kMipsLwrOffset) &&
1132 is_int16(rs.offset() + kMipsLwlOffset)) {
1133 if (!rd.is(rs.rm())) {
1134 lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1135 lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1136 } else {
1137 lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1138 lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1139 mov(rd, at);
1140 }
1141 } else { // Offset > 16 bits, use multiple instructions to load.
1142 LoadRegPlusOffsetToAt(rs);
1143 lwr(rd, MemOperand(at, kMipsLwrOffset));
1144 lwl(rd, MemOperand(at, kMipsLwlOffset));
1145 }
1146 }
1147 }
1148
1149
Usw(Register rd,const MemOperand & rs)1150 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1151 DCHECK(!rd.is(at));
1152 DCHECK(!rs.rm().is(at));
1153 if (IsMipsArchVariant(kMips32r6)) {
1154 sw(rd, rs);
1155 } else {
1156 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1157 IsMipsArchVariant(kLoongson));
1158 if (is_int16(rs.offset() + kMipsSwrOffset) &&
1159 is_int16(rs.offset() + kMipsSwlOffset)) {
1160 swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
1161 swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
1162 } else {
1163 LoadRegPlusOffsetToAt(rs);
1164 swr(rd, MemOperand(at, kMipsSwrOffset));
1165 swl(rd, MemOperand(at, kMipsSwlOffset));
1166 }
1167 }
1168 }
1169
Ulh(Register rd,const MemOperand & rs)1170 void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
1171 DCHECK(!rd.is(at));
1172 DCHECK(!rs.rm().is(at));
1173 if (IsMipsArchVariant(kMips32r6)) {
1174 lh(rd, rs);
1175 } else {
1176 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1177 IsMipsArchVariant(kLoongson));
1178 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1179 #if defined(V8_TARGET_LITTLE_ENDIAN)
1180 lbu(at, rs);
1181 lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
1182 #elif defined(V8_TARGET_BIG_ENDIAN)
1183 lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1184 lb(rd, rs);
1185 #endif
1186 } else { // Offset > 16 bits, use multiple instructions to load.
1187 LoadRegPlusOffsetToAt(rs);
1188 #if defined(V8_TARGET_LITTLE_ENDIAN)
1189 lb(rd, MemOperand(at, 1));
1190 lbu(at, MemOperand(at, 0));
1191 #elif defined(V8_TARGET_BIG_ENDIAN)
1192 lb(rd, MemOperand(at, 0));
1193 lbu(at, MemOperand(at, 1));
1194 #endif
1195 }
1196 sll(rd, rd, 8);
1197 or_(rd, rd, at);
1198 }
1199 }
1200
Ulhu(Register rd,const MemOperand & rs)1201 void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
1202 DCHECK(!rd.is(at));
1203 DCHECK(!rs.rm().is(at));
1204 if (IsMipsArchVariant(kMips32r6)) {
1205 lhu(rd, rs);
1206 } else {
1207 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1208 IsMipsArchVariant(kLoongson));
1209 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1210 #if defined(V8_TARGET_LITTLE_ENDIAN)
1211 lbu(at, rs);
1212 lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
1213 #elif defined(V8_TARGET_BIG_ENDIAN)
1214 lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1215 lbu(rd, rs);
1216 #endif
1217 } else { // Offset > 16 bits, use multiple instructions to load.
1218 LoadRegPlusOffsetToAt(rs);
1219 #if defined(V8_TARGET_LITTLE_ENDIAN)
1220 lbu(rd, MemOperand(at, 1));
1221 lbu(at, MemOperand(at, 0));
1222 #elif defined(V8_TARGET_BIG_ENDIAN)
1223 lbu(rd, MemOperand(at, 0));
1224 lbu(at, MemOperand(at, 1));
1225 #endif
1226 }
1227 sll(rd, rd, 8);
1228 or_(rd, rd, at);
1229 }
1230 }
1231
Ush(Register rd,const MemOperand & rs,Register scratch)1232 void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
1233 DCHECK(!rd.is(at));
1234 DCHECK(!rs.rm().is(at));
1235 DCHECK(!rs.rm().is(scratch));
1236 DCHECK(!scratch.is(at));
1237 if (IsMipsArchVariant(kMips32r6)) {
1238 sh(rd, rs);
1239 } else {
1240 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1241 IsMipsArchVariant(kLoongson));
1242 MemOperand source = rs;
1243 // If offset > 16 bits, load address to at with offset 0.
1244 if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
1245 LoadRegPlusOffsetToAt(rs);
1246 source = MemOperand(at, 0);
1247 }
1248
1249 if (!scratch.is(rd)) {
1250 mov(scratch, rd);
1251 }
1252
1253 #if defined(V8_TARGET_LITTLE_ENDIAN)
1254 sb(scratch, source);
1255 srl(scratch, scratch, 8);
1256 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1257 #elif defined(V8_TARGET_BIG_ENDIAN)
1258 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1259 srl(scratch, scratch, 8);
1260 sb(scratch, source);
1261 #endif
1262 }
1263 }
1264
Ulwc1(FPURegister fd,const MemOperand & rs,Register scratch)1265 void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
1266 Register scratch) {
1267 if (IsMipsArchVariant(kMips32r6)) {
1268 lwc1(fd, rs);
1269 } else {
1270 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1271 IsMipsArchVariant(kLoongson));
1272 Ulw(scratch, rs);
1273 mtc1(scratch, fd);
1274 }
1275 }
1276
Uswc1(FPURegister fd,const MemOperand & rs,Register scratch)1277 void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
1278 Register scratch) {
1279 if (IsMipsArchVariant(kMips32r6)) {
1280 swc1(fd, rs);
1281 } else {
1282 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1283 IsMipsArchVariant(kLoongson));
1284 mfc1(scratch, fd);
1285 Usw(scratch, rs);
1286 }
1287 }
1288
Uldc1(FPURegister fd,const MemOperand & rs,Register scratch)1289 void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
1290 Register scratch) {
1291 DCHECK(!scratch.is(at));
1292 if (IsMipsArchVariant(kMips32r6)) {
1293 ldc1(fd, rs);
1294 } else {
1295 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1296 IsMipsArchVariant(kLoongson));
1297 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
1298 mtc1(scratch, fd);
1299 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
1300 Mthc1(scratch, fd);
1301 }
1302 }
1303
Usdc1(FPURegister fd,const MemOperand & rs,Register scratch)1304 void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
1305 Register scratch) {
1306 DCHECK(!scratch.is(at));
1307 if (IsMipsArchVariant(kMips32r6)) {
1308 sdc1(fd, rs);
1309 } else {
1310 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1311 IsMipsArchVariant(kLoongson));
1312 mfc1(scratch, fd);
1313 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
1314 Mfhc1(scratch, fd);
1315 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
1316 }
1317 }
1318
1319
li(Register dst,Handle<Object> value,LiFlags mode)1320 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1321 li(dst, Operand(value), mode);
1322 }
1323
1324
li(Register rd,Operand j,LiFlags mode)1325 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1326 DCHECK(!j.is_reg());
1327 BlockTrampolinePoolScope block_trampoline_pool(this);
1328 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1329 // Normal load of an immediate value which does not need Relocation Info.
1330 if (is_int16(j.imm32_)) {
1331 addiu(rd, zero_reg, j.imm32_);
1332 } else if (!(j.imm32_ & kHiMask)) {
1333 ori(rd, zero_reg, j.imm32_);
1334 } else if (!(j.imm32_ & kImm16Mask)) {
1335 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1336 } else {
1337 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1338 ori(rd, rd, (j.imm32_ & kImm16Mask));
1339 }
1340 } else {
1341 if (MustUseReg(j.rmode_)) {
1342 RecordRelocInfo(j.rmode_, j.imm32_);
1343 }
1344 // We always need the same number of instructions as we may need to patch
1345 // this code to load another value which may need 2 instructions to load.
1346 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1347 ori(rd, rd, (j.imm32_ & kImm16Mask));
1348 }
1349 }
1350
1351
MultiPush(RegList regs)1352 void MacroAssembler::MultiPush(RegList regs) {
1353 int16_t num_to_push = NumberOfBitsSet(regs);
1354 int16_t stack_offset = num_to_push * kPointerSize;
1355
1356 Subu(sp, sp, Operand(stack_offset));
1357 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1358 if ((regs & (1 << i)) != 0) {
1359 stack_offset -= kPointerSize;
1360 sw(ToRegister(i), MemOperand(sp, stack_offset));
1361 }
1362 }
1363 }
1364
1365
MultiPushReversed(RegList regs)1366 void MacroAssembler::MultiPushReversed(RegList regs) {
1367 int16_t num_to_push = NumberOfBitsSet(regs);
1368 int16_t stack_offset = num_to_push * kPointerSize;
1369
1370 Subu(sp, sp, Operand(stack_offset));
1371 for (int16_t i = 0; i < kNumRegisters; i++) {
1372 if ((regs & (1 << i)) != 0) {
1373 stack_offset -= kPointerSize;
1374 sw(ToRegister(i), MemOperand(sp, stack_offset));
1375 }
1376 }
1377 }
1378
1379
MultiPop(RegList regs)1380 void MacroAssembler::MultiPop(RegList regs) {
1381 int16_t stack_offset = 0;
1382
1383 for (int16_t i = 0; i < kNumRegisters; i++) {
1384 if ((regs & (1 << i)) != 0) {
1385 lw(ToRegister(i), MemOperand(sp, stack_offset));
1386 stack_offset += kPointerSize;
1387 }
1388 }
1389 addiu(sp, sp, stack_offset);
1390 }
1391
1392
MultiPopReversed(RegList regs)1393 void MacroAssembler::MultiPopReversed(RegList regs) {
1394 int16_t stack_offset = 0;
1395
1396 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1397 if ((regs & (1 << i)) != 0) {
1398 lw(ToRegister(i), MemOperand(sp, stack_offset));
1399 stack_offset += kPointerSize;
1400 }
1401 }
1402 addiu(sp, sp, stack_offset);
1403 }
1404
1405
MultiPushFPU(RegList regs)1406 void MacroAssembler::MultiPushFPU(RegList regs) {
1407 int16_t num_to_push = NumberOfBitsSet(regs);
1408 int16_t stack_offset = num_to_push * kDoubleSize;
1409
1410 Subu(sp, sp, Operand(stack_offset));
1411 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1412 if ((regs & (1 << i)) != 0) {
1413 stack_offset -= kDoubleSize;
1414 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1415 }
1416 }
1417 }
1418
1419
MultiPushReversedFPU(RegList regs)1420 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1421 int16_t num_to_push = NumberOfBitsSet(regs);
1422 int16_t stack_offset = num_to_push * kDoubleSize;
1423
1424 Subu(sp, sp, Operand(stack_offset));
1425 for (int16_t i = 0; i < kNumRegisters; i++) {
1426 if ((regs & (1 << i)) != 0) {
1427 stack_offset -= kDoubleSize;
1428 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1429 }
1430 }
1431 }
1432
1433
MultiPopFPU(RegList regs)1434 void MacroAssembler::MultiPopFPU(RegList regs) {
1435 int16_t stack_offset = 0;
1436
1437 for (int16_t i = 0; i < kNumRegisters; i++) {
1438 if ((regs & (1 << i)) != 0) {
1439 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1440 stack_offset += kDoubleSize;
1441 }
1442 }
1443 addiu(sp, sp, stack_offset);
1444 }
1445
1446
MultiPopReversedFPU(RegList regs)1447 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1448 int16_t stack_offset = 0;
1449
1450 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1451 if ((regs & (1 << i)) != 0) {
1452 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1453 stack_offset += kDoubleSize;
1454 }
1455 }
1456 addiu(sp, sp, stack_offset);
1457 }
1458
AddPair(Register dst_low,Register dst_high,Register left_low,Register left_high,Register right_low,Register right_high)1459 void MacroAssembler::AddPair(Register dst_low, Register dst_high,
1460 Register left_low, Register left_high,
1461 Register right_low, Register right_high) {
1462 Label no_overflow;
1463 Register kScratchReg = s3;
1464 Register kScratchReg2 = s4;
1465 // Add lower word
1466 Addu(dst_low, left_low, right_low);
1467 Addu(dst_high, left_high, right_high);
1468 // Check for lower word unsigned overflow
1469 Sltu(kScratchReg, dst_low, left_low);
1470 Sltu(kScratchReg2, dst_low, right_low);
1471 Or(kScratchReg, kScratchReg2, kScratchReg);
1472 Branch(&no_overflow, eq, kScratchReg, Operand(zero_reg));
1473 // Increment higher word if there was overflow
1474 Addu(dst_high, dst_high, 0x1);
1475 bind(&no_overflow);
1476 }
1477
SubPair(Register dst_low,Register dst_high,Register left_low,Register left_high,Register right_low,Register right_high)1478 void MacroAssembler::SubPair(Register dst_low, Register dst_high,
1479 Register left_low, Register left_high,
1480 Register right_low, Register right_high) {
1481 Label no_overflow;
1482 Register kScratchReg = s3;
1483 // Subtract lower word
1484 Subu(dst_low, left_low, right_low);
1485 Subu(dst_high, left_high, right_high);
1486 // Check for lower word unsigned underflow
1487 Sltu(kScratchReg, left_low, right_low);
1488 Branch(&no_overflow, eq, kScratchReg, Operand(zero_reg));
1489 // Decrement higher word if there was underflow
1490 Subu(dst_high, dst_high, 0x1);
1491 bind(&no_overflow);
1492 }
1493
ShlPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1494 void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
1495 Register src_low, Register src_high,
1496 Register shift) {
1497 Label less_than_32;
1498 Label zero_shift;
1499 Label word_shift;
1500 Label done;
1501 Register kScratchReg = s3;
1502 And(shift, shift, 0x3F);
1503 li(kScratchReg, 0x20);
1504 Branch(&less_than_32, lt, shift, Operand(kScratchReg));
1505
1506 Branch(&word_shift, eq, shift, Operand(kScratchReg));
1507 // Shift more than 32
1508 Subu(kScratchReg, shift, kScratchReg);
1509 mov(dst_low, zero_reg);
1510 sllv(dst_high, src_low, kScratchReg);
1511 Branch(&done);
1512 // Word shift
1513 bind(&word_shift);
1514 mov(dst_low, zero_reg);
1515 mov(dst_high, src_low);
1516 Branch(&done);
1517
1518 bind(&less_than_32);
1519 // Check if zero shift
1520 Branch(&zero_shift, eq, shift, Operand(zero_reg));
1521 // Shift less than 32
1522 Subu(kScratchReg, kScratchReg, shift);
1523 sllv(dst_high, src_high, shift);
1524 sllv(dst_low, src_low, shift);
1525 srlv(kScratchReg, src_low, kScratchReg);
1526 Or(dst_high, dst_high, kScratchReg);
1527 Branch(&done);
1528 // Zero shift
1529 bind(&zero_shift);
1530 mov(dst_low, src_low);
1531 mov(dst_high, src_high);
1532 bind(&done);
1533 }
1534
ShlPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1535 void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
1536 Register src_low, Register src_high,
1537 uint32_t shift) {
1538 Register kScratchReg = s3;
1539 shift = shift & 0x3F;
1540 if (shift < 32) {
1541 if (shift == 0) {
1542 mov(dst_low, src_low);
1543 mov(dst_high, src_high);
1544 } else {
1545 sll(dst_high, src_high, shift);
1546 sll(dst_low, src_low, shift);
1547 shift = 32 - shift;
1548 srl(kScratchReg, src_low, shift);
1549 Or(dst_high, dst_high, kScratchReg);
1550 }
1551 } else {
1552 if (shift == 32) {
1553 mov(dst_low, zero_reg);
1554 mov(dst_high, src_low);
1555 } else {
1556 shift = shift - 32;
1557 mov(dst_low, zero_reg);
1558 sll(dst_high, src_low, shift);
1559 }
1560 }
1561 }
1562
ShrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1563 void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
1564 Register src_low, Register src_high,
1565 Register shift) {
1566 Label less_than_32;
1567 Label zero_shift;
1568 Label word_shift;
1569 Label done;
1570 Register kScratchReg = s3;
1571 And(shift, shift, 0x3F);
1572 li(kScratchReg, 0x20);
1573 Branch(&less_than_32, lt, shift, Operand(kScratchReg));
1574
1575 Branch(&word_shift, eq, shift, Operand(kScratchReg));
1576 // Shift more than 32
1577 Subu(kScratchReg, shift, kScratchReg);
1578 mov(dst_high, zero_reg);
1579 srlv(dst_low, src_high, kScratchReg);
1580 Branch(&done);
1581 // Word shift
1582 bind(&word_shift);
1583 mov(dst_high, zero_reg);
1584 mov(dst_low, src_high);
1585 Branch(&done);
1586
1587 bind(&less_than_32);
1588 // Check if zero shift
1589 Branch(&zero_shift, eq, shift, Operand(zero_reg));
1590 // Shift less than 32
1591 Subu(kScratchReg, kScratchReg, shift);
1592 srlv(dst_high, src_high, shift);
1593 srlv(dst_low, src_low, shift);
1594 sllv(kScratchReg, src_high, kScratchReg);
1595 Or(dst_low, dst_low, kScratchReg);
1596 Branch(&done);
1597 // Zero shift
1598 bind(&zero_shift);
1599 mov(dst_low, src_low);
1600 mov(dst_high, src_high);
1601 bind(&done);
1602 }
1603
ShrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1604 void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
1605 Register src_low, Register src_high,
1606 uint32_t shift) {
1607 Register kScratchReg = s3;
1608 shift = shift & 0x3F;
1609 if (shift < 32) {
1610 if (shift == 0) {
1611 mov(dst_low, src_low);
1612 mov(dst_high, src_high);
1613 } else {
1614 srl(dst_high, src_high, shift);
1615 srl(dst_low, src_low, shift);
1616 shift = 32 - shift;
1617 sll(kScratchReg, src_high, shift);
1618 Or(dst_low, dst_low, kScratchReg);
1619 }
1620 } else {
1621 if (shift == 32) {
1622 mov(dst_high, zero_reg);
1623 mov(dst_low, src_high);
1624 } else {
1625 shift = shift - 32;
1626 mov(dst_high, zero_reg);
1627 srl(dst_low, src_high, shift);
1628 }
1629 }
1630 }
1631
SarPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1632 void MacroAssembler::SarPair(Register dst_low, Register dst_high,
1633 Register src_low, Register src_high,
1634 Register shift) {
1635 Label less_than_32;
1636 Label zero_shift;
1637 Label word_shift;
1638 Label done;
1639 Register kScratchReg = s3;
1640 Register kScratchReg2 = s4;
1641 And(shift, shift, 0x3F);
1642 li(kScratchReg, 0x20);
1643 Branch(&less_than_32, lt, shift, Operand(kScratchReg));
1644
1645 Branch(&word_shift, eq, shift, Operand(kScratchReg));
1646
1647 // Shift more than 32
1648 li(kScratchReg2, 0x1F);
1649 Subu(kScratchReg, shift, kScratchReg);
1650 srav(dst_high, src_high, kScratchReg2);
1651 srav(dst_low, src_high, kScratchReg);
1652 Branch(&done);
1653 // Word shift
1654 bind(&word_shift);
1655 li(kScratchReg2, 0x1F);
1656 srav(dst_high, src_high, kScratchReg2);
1657 mov(dst_low, src_high);
1658 Branch(&done);
1659
1660 bind(&less_than_32);
1661 // Check if zero shift
1662 Branch(&zero_shift, eq, shift, Operand(zero_reg));
1663
1664 // Shift less than 32
1665 Subu(kScratchReg, kScratchReg, shift);
1666 srav(dst_high, src_high, shift);
1667 srlv(dst_low, src_low, shift);
1668 sllv(kScratchReg, src_high, kScratchReg);
1669 Or(dst_low, dst_low, kScratchReg);
1670 Branch(&done);
1671 // Zero shift
1672 bind(&zero_shift);
1673 mov(dst_low, src_low);
1674 mov(dst_high, src_high);
1675 bind(&done);
1676 }
1677
SarPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1678 void MacroAssembler::SarPair(Register dst_low, Register dst_high,
1679 Register src_low, Register src_high,
1680 uint32_t shift) {
1681 Register kScratchReg = s3;
1682 shift = shift & 0x3F;
1683 if (shift < 32) {
1684 if (shift == 0) {
1685 mov(dst_low, src_low);
1686 mov(dst_high, src_high);
1687 } else {
1688 sra(dst_high, src_high, shift);
1689 srl(dst_low, src_low, shift);
1690 shift = 32 - shift;
1691 sll(kScratchReg, src_high, shift);
1692 Or(dst_low, dst_low, kScratchReg);
1693 }
1694 } else {
1695 if (shift == 32) {
1696 sra(dst_high, src_high, 31);
1697 mov(dst_low, src_high);
1698 } else {
1699 shift = shift - 32;
1700 sra(dst_high, src_high, 31);
1701 sra(dst_low, src_high, shift);
1702 }
1703 }
1704 }
1705
Ext(Register rt,Register rs,uint16_t pos,uint16_t size)1706 void MacroAssembler::Ext(Register rt,
1707 Register rs,
1708 uint16_t pos,
1709 uint16_t size) {
1710 DCHECK(pos < 32);
1711 DCHECK(pos + size < 33);
1712
1713 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1714 ext_(rt, rs, pos, size);
1715 } else {
1716 // Move rs to rt and shift it left then right to get the
1717 // desired bitfield on the right side and zeroes on the left.
1718 int shift_left = 32 - (pos + size);
1719 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
1720
1721 int shift_right = 32 - size;
1722 if (shift_right > 0) {
1723 srl(rt, rt, shift_right);
1724 }
1725 }
1726 }
1727
1728
Ins(Register rt,Register rs,uint16_t pos,uint16_t size)1729 void MacroAssembler::Ins(Register rt,
1730 Register rs,
1731 uint16_t pos,
1732 uint16_t size) {
1733 DCHECK(pos < 32);
1734 DCHECK(pos + size <= 32);
1735 DCHECK(size != 0);
1736
1737 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1738 ins_(rt, rs, pos, size);
1739 } else {
1740 DCHECK(!rt.is(t8) && !rs.is(t8));
1741 Subu(at, zero_reg, Operand(1));
1742 srl(at, at, 32 - size);
1743 and_(t8, rs, at);
1744 sll(t8, t8, pos);
1745 sll(at, at, pos);
1746 nor(at, at, zero_reg);
1747 and_(at, rt, at);
1748 or_(rt, t8, at);
1749 }
1750 }
1751
Seb(Register rd,Register rt)1752 void MacroAssembler::Seb(Register rd, Register rt) {
1753 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1754 seb(rd, rt);
1755 } else {
1756 DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson));
1757 sll(rd, rt, 24);
1758 sra(rd, rd, 24);
1759 }
1760 }
1761
Seh(Register rd,Register rt)1762 void MacroAssembler::Seh(Register rd, Register rt) {
1763 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1764 seh(rd, rt);
1765 } else {
1766 DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson));
1767 sll(rd, rt, 16);
1768 sra(rd, rd, 16);
1769 }
1770 }
1771
Neg_s(FPURegister fd,FPURegister fs)1772 void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
1773 if (IsMipsArchVariant(kMips32r6)) {
1774 // r6 neg_s changes the sign for NaN-like operands as well.
1775 neg_s(fd, fs);
1776 } else {
1777 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1778 IsMipsArchVariant(kLoongson));
1779 Label is_nan, done;
1780 Register scratch1 = t8;
1781 Register scratch2 = t9;
1782 BranchF32(nullptr, &is_nan, eq, fs, fs);
1783 Branch(USE_DELAY_SLOT, &done);
1784 // For NaN input, neg_s will return the same NaN value,
1785 // while the sign has to be changed separately.
1786 neg_s(fd, fs); // In delay slot.
1787 bind(&is_nan);
1788 mfc1(scratch1, fs);
1789 And(scratch2, scratch1, Operand(~kBinary32SignMask));
1790 And(scratch1, scratch1, Operand(kBinary32SignMask));
1791 Xor(scratch1, scratch1, Operand(kBinary32SignMask));
1792 Or(scratch2, scratch2, scratch1);
1793 mtc1(scratch2, fd);
1794 bind(&done);
1795 }
1796 }
1797
Neg_d(FPURegister fd,FPURegister fs)1798 void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
1799 if (IsMipsArchVariant(kMips32r6)) {
1800 // r6 neg_d changes the sign for NaN-like operands as well.
1801 neg_d(fd, fs);
1802 } else {
1803 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1804 IsMipsArchVariant(kLoongson));
1805 Label is_nan, done;
1806 Register scratch1 = t8;
1807 Register scratch2 = t9;
1808 BranchF64(nullptr, &is_nan, eq, fs, fs);
1809 Branch(USE_DELAY_SLOT, &done);
1810 // For NaN input, neg_d will return the same NaN value,
1811 // while the sign has to be changed separately.
1812 neg_d(fd, fs); // In delay slot.
1813 bind(&is_nan);
1814 Mfhc1(scratch1, fs);
1815 And(scratch2, scratch1, Operand(~HeapNumber::kSignMask));
1816 And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
1817 Xor(scratch1, scratch1, Operand(HeapNumber::kSignMask));
1818 Or(scratch2, scratch2, scratch1);
1819 Mthc1(scratch2, fd);
1820 bind(&done);
1821 }
1822 }
1823
Cvt_d_uw(FPURegister fd,Register rs,FPURegister scratch)1824 void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs,
1825 FPURegister scratch) {
1826 // In FP64Mode we do convertion from long.
1827 if (IsFp64Mode()) {
1828 mtc1(rs, scratch);
1829 Mthc1(zero_reg, scratch);
1830 cvt_d_l(fd, scratch);
1831 } else {
1832 // Convert rs to a FP value in fd.
1833 DCHECK(!fd.is(scratch));
1834 DCHECK(!rs.is(at));
1835
1836 Label msb_clear, conversion_done;
1837 // For a value which is < 2^31, regard it as a signed positve word.
1838 Branch(&msb_clear, ge, rs, Operand(zero_reg), USE_DELAY_SLOT);
1839 mtc1(rs, fd);
1840
1841 li(at, 0x41F00000); // FP value: 2^32.
1842
1843 // For unsigned inputs > 2^31, we convert to double as a signed int32,
1844 // then add 2^32 to move it back to unsigned value in range 2^31..2^31-1.
1845 mtc1(zero_reg, scratch);
1846 Mthc1(at, scratch);
1847
1848 cvt_d_w(fd, fd);
1849
1850 Branch(USE_DELAY_SLOT, &conversion_done);
1851 add_d(fd, fd, scratch);
1852
1853 bind(&msb_clear);
1854 cvt_d_w(fd, fd);
1855
1856 bind(&conversion_done);
1857 }
1858 }
1859
1860
Trunc_uw_d(FPURegister fd,FPURegister fs,FPURegister scratch)1861 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1862 FPURegister fs,
1863 FPURegister scratch) {
1864 Trunc_uw_d(fs, t8, scratch);
1865 mtc1(t8, fd);
1866 }
1867
Trunc_uw_s(FPURegister fd,FPURegister fs,FPURegister scratch)1868 void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
1869 FPURegister scratch) {
1870 Trunc_uw_s(fs, t8, scratch);
1871 mtc1(t8, fd);
1872 }
1873
Trunc_w_d(FPURegister fd,FPURegister fs)1874 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1875 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1876 Mfhc1(t8, fs);
1877 trunc_w_d(fd, fs);
1878 Mthc1(t8, fs);
1879 } else {
1880 trunc_w_d(fd, fs);
1881 }
1882 }
1883
1884
Round_w_d(FPURegister fd,FPURegister fs)1885 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1886 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1887 Mfhc1(t8, fs);
1888 round_w_d(fd, fs);
1889 Mthc1(t8, fs);
1890 } else {
1891 round_w_d(fd, fs);
1892 }
1893 }
1894
1895
Floor_w_d(FPURegister fd,FPURegister fs)1896 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1897 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1898 Mfhc1(t8, fs);
1899 floor_w_d(fd, fs);
1900 Mthc1(t8, fs);
1901 } else {
1902 floor_w_d(fd, fs);
1903 }
1904 }
1905
1906
Ceil_w_d(FPURegister fd,FPURegister fs)1907 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1908 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1909 Mfhc1(t8, fs);
1910 ceil_w_d(fd, fs);
1911 Mthc1(t8, fs);
1912 } else {
1913 ceil_w_d(fd, fs);
1914 }
1915 }
1916
1917
Trunc_uw_d(FPURegister fd,Register rs,FPURegister scratch)1918 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1919 Register rs,
1920 FPURegister scratch) {
1921 DCHECK(!fd.is(scratch));
1922 DCHECK(!rs.is(at));
1923
1924 // Load 2^31 into scratch as its float representation.
1925 li(at, 0x41E00000);
1926 mtc1(zero_reg, scratch);
1927 Mthc1(at, scratch);
1928 // Test if scratch > fd.
1929 // If fd < 2^31 we can convert it normally.
1930 Label simple_convert;
1931 BranchF(&simple_convert, NULL, lt, fd, scratch);
1932
1933 // First we subtract 2^31 from fd, then trunc it to rs
1934 // and add 2^31 to rs.
1935 sub_d(scratch, fd, scratch);
1936 trunc_w_d(scratch, scratch);
1937 mfc1(rs, scratch);
1938 Or(rs, rs, 1 << 31);
1939
1940 Label done;
1941 Branch(&done);
1942 // Simple conversion.
1943 bind(&simple_convert);
1944 trunc_w_d(scratch, fd);
1945 mfc1(rs, scratch);
1946
1947 bind(&done);
1948 }
1949
Trunc_uw_s(FPURegister fd,Register rs,FPURegister scratch)1950 void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
1951 FPURegister scratch) {
1952 DCHECK(!fd.is(scratch));
1953 DCHECK(!rs.is(at));
1954
1955 // Load 2^31 into scratch as its float representation.
1956 li(at, 0x4F000000);
1957 mtc1(at, scratch);
1958 // Test if scratch > fd.
1959 // If fd < 2^31 we can convert it normally.
1960 Label simple_convert;
1961 BranchF32(&simple_convert, NULL, lt, fd, scratch);
1962
1963 // First we subtract 2^31 from fd, then trunc it to rs
1964 // and add 2^31 to rs.
1965 sub_s(scratch, fd, scratch);
1966 trunc_w_s(scratch, scratch);
1967 mfc1(rs, scratch);
1968 Or(rs, rs, 1 << 31);
1969
1970 Label done;
1971 Branch(&done);
1972 // Simple conversion.
1973 bind(&simple_convert);
1974 trunc_w_s(scratch, fd);
1975 mfc1(rs, scratch);
1976
1977 bind(&done);
1978 }
1979
Mthc1(Register rt,FPURegister fs)1980 void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
1981 if (IsFp32Mode()) {
1982 mtc1(rt, fs.high());
1983 } else {
1984 DCHECK(IsFp64Mode() || IsFpxxMode());
1985 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1986 mthc1(rt, fs);
1987 }
1988 }
1989
1990
Mfhc1(Register rt,FPURegister fs)1991 void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
1992 if (IsFp32Mode()) {
1993 mfc1(rt, fs.high());
1994 } else {
1995 DCHECK(IsFp64Mode() || IsFpxxMode());
1996 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1997 mfhc1(rt, fs);
1998 }
1999 }
2000
2001
BranchFCommon(SecondaryField sizeField,Label * target,Label * nan,Condition cond,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)2002 void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
2003 Label* nan, Condition cond, FPURegister cmp1,
2004 FPURegister cmp2, BranchDelaySlot bd) {
2005 {
2006 BlockTrampolinePoolScope block_trampoline_pool(this);
2007 if (cond == al) {
2008 Branch(bd, target);
2009 return;
2010 }
2011
2012 if (IsMipsArchVariant(kMips32r6)) {
2013 sizeField = sizeField == D ? L : W;
2014 }
2015 DCHECK(nan || target);
2016 // Check for unordered (NaN) cases.
2017 if (nan) {
2018 bool long_branch =
2019 nan->is_bound() ? !is_near(nan) : is_trampoline_emitted();
2020 if (!IsMipsArchVariant(kMips32r6)) {
2021 if (long_branch) {
2022 Label skip;
2023 c(UN, sizeField, cmp1, cmp2);
2024 bc1f(&skip);
2025 nop();
2026 BranchLong(nan, bd);
2027 bind(&skip);
2028 } else {
2029 c(UN, sizeField, cmp1, cmp2);
2030 bc1t(nan);
2031 if (bd == PROTECT) {
2032 nop();
2033 }
2034 }
2035 } else {
2036 // Use kDoubleCompareReg for comparison result. It has to be unavailable
2037 // to lithium register allocator.
2038 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2039 if (long_branch) {
2040 Label skip;
2041 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2042 bc1eqz(&skip, kDoubleCompareReg);
2043 nop();
2044 BranchLong(nan, bd);
2045 bind(&skip);
2046 } else {
2047 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2048 bc1nez(nan, kDoubleCompareReg);
2049 if (bd == PROTECT) {
2050 nop();
2051 }
2052 }
2053 }
2054 }
2055
2056 if (target) {
2057 bool long_branch =
2058 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2059 if (long_branch) {
2060 Label skip;
2061 Condition neg_cond = NegateFpuCondition(cond);
2062 BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
2063 BranchLong(target, bd);
2064 bind(&skip);
2065 } else {
2066 BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
2067 }
2068 }
2069 }
2070 }
2071
BranchShortF(SecondaryField sizeField,Label * target,Condition cc,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)2072 void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
2073 Condition cc, FPURegister cmp1,
2074 FPURegister cmp2, BranchDelaySlot bd) {
2075 if (!IsMipsArchVariant(kMips32r6)) {
2076 BlockTrampolinePoolScope block_trampoline_pool(this);
2077 if (target) {
2078 // Here NaN cases were either handled by this function or are assumed to
2079 // have been handled by the caller.
2080 switch (cc) {
2081 case lt:
2082 c(OLT, sizeField, cmp1, cmp2);
2083 bc1t(target);
2084 break;
2085 case ult:
2086 c(ULT, sizeField, cmp1, cmp2);
2087 bc1t(target);
2088 break;
2089 case gt:
2090 c(ULE, sizeField, cmp1, cmp2);
2091 bc1f(target);
2092 break;
2093 case ugt:
2094 c(OLE, sizeField, cmp1, cmp2);
2095 bc1f(target);
2096 break;
2097 case ge:
2098 c(ULT, sizeField, cmp1, cmp2);
2099 bc1f(target);
2100 break;
2101 case uge:
2102 c(OLT, sizeField, cmp1, cmp2);
2103 bc1f(target);
2104 break;
2105 case le:
2106 c(OLE, sizeField, cmp1, cmp2);
2107 bc1t(target);
2108 break;
2109 case ule:
2110 c(ULE, sizeField, cmp1, cmp2);
2111 bc1t(target);
2112 break;
2113 case eq:
2114 c(EQ, sizeField, cmp1, cmp2);
2115 bc1t(target);
2116 break;
2117 case ueq:
2118 c(UEQ, sizeField, cmp1, cmp2);
2119 bc1t(target);
2120 break;
2121 case ne: // Unordered or not equal.
2122 c(EQ, sizeField, cmp1, cmp2);
2123 bc1f(target);
2124 break;
2125 case ogl:
2126 c(UEQ, sizeField, cmp1, cmp2);
2127 bc1f(target);
2128 break;
2129 default:
2130 CHECK(0);
2131 }
2132 }
2133 } else {
2134 BlockTrampolinePoolScope block_trampoline_pool(this);
2135 if (target) {
2136 // Here NaN cases were either handled by this function or are assumed to
2137 // have been handled by the caller.
2138 // Unsigned conditions are treated as their signed counterpart.
2139 // Use kDoubleCompareReg for comparison result, it is
2140 // valid in fp64 (FR = 1) mode which is implied for mips32r6.
2141 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2142 switch (cc) {
2143 case lt:
2144 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2145 bc1nez(target, kDoubleCompareReg);
2146 break;
2147 case ult:
2148 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2149 bc1nez(target, kDoubleCompareReg);
2150 break;
2151 case gt:
2152 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2153 bc1eqz(target, kDoubleCompareReg);
2154 break;
2155 case ugt:
2156 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2157 bc1eqz(target, kDoubleCompareReg);
2158 break;
2159 case ge:
2160 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2161 bc1eqz(target, kDoubleCompareReg);
2162 break;
2163 case uge:
2164 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2165 bc1eqz(target, kDoubleCompareReg);
2166 break;
2167 case le:
2168 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2169 bc1nez(target, kDoubleCompareReg);
2170 break;
2171 case ule:
2172 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2173 bc1nez(target, kDoubleCompareReg);
2174 break;
2175 case eq:
2176 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2177 bc1nez(target, kDoubleCompareReg);
2178 break;
2179 case ueq:
2180 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2181 bc1nez(target, kDoubleCompareReg);
2182 break;
2183 case ne:
2184 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2185 bc1eqz(target, kDoubleCompareReg);
2186 break;
2187 case ogl:
2188 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2189 bc1eqz(target, kDoubleCompareReg);
2190 break;
2191 default:
2192 CHECK(0);
2193 }
2194 }
2195 }
2196 if (bd == PROTECT) {
2197 nop();
2198 }
2199 }
2200
2201
FmoveLow(FPURegister dst,Register src_low)2202 void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
2203 if (IsFp32Mode()) {
2204 mtc1(src_low, dst);
2205 } else {
2206 DCHECK(IsFp64Mode() || IsFpxxMode());
2207 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2208 DCHECK(!src_low.is(at));
2209 mfhc1(at, dst);
2210 mtc1(src_low, dst);
2211 mthc1(at, dst);
2212 }
2213 }
2214
2215
Move(FPURegister dst,float imm)2216 void MacroAssembler::Move(FPURegister dst, float imm) {
2217 li(at, Operand(bit_cast<int32_t>(imm)));
2218 mtc1(at, dst);
2219 }
2220
2221
Move(FPURegister dst,double imm)2222 void MacroAssembler::Move(FPURegister dst, double imm) {
2223 int64_t imm_bits = bit_cast<int64_t>(imm);
2224 // Handle special values first.
2225 if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
2226 mov_d(dst, kDoubleRegZero);
2227 } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) {
2228 Neg_d(dst, kDoubleRegZero);
2229 } else {
2230 uint32_t lo, hi;
2231 DoubleAsTwoUInt32(imm, &lo, &hi);
2232 // Move the low part of the double into the lower of the corresponding FPU
2233 // register of FPU register pair.
2234 if (lo != 0) {
2235 li(at, Operand(lo));
2236 mtc1(at, dst);
2237 } else {
2238 mtc1(zero_reg, dst);
2239 }
2240 // Move the high part of the double into the higher of the corresponding FPU
2241 // register of FPU register pair.
2242 if (hi != 0) {
2243 li(at, Operand(hi));
2244 Mthc1(at, dst);
2245 } else {
2246 Mthc1(zero_reg, dst);
2247 }
2248 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
2249 }
2250 }
2251
2252
Movz(Register rd,Register rs,Register rt)2253 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
2254 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
2255 Label done;
2256 Branch(&done, ne, rt, Operand(zero_reg));
2257 mov(rd, rs);
2258 bind(&done);
2259 } else {
2260 movz(rd, rs, rt);
2261 }
2262 }
2263
2264
Movn(Register rd,Register rs,Register rt)2265 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
2266 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
2267 Label done;
2268 Branch(&done, eq, rt, Operand(zero_reg));
2269 mov(rd, rs);
2270 bind(&done);
2271 } else {
2272 movn(rd, rs, rt);
2273 }
2274 }
2275
2276
Movt(Register rd,Register rs,uint16_t cc)2277 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
2278 if (IsMipsArchVariant(kLoongson)) {
2279 // Tests an FP condition code and then conditionally move rs to rd.
2280 // We do not currently use any FPU cc bit other than bit 0.
2281 DCHECK(cc == 0);
2282 DCHECK(!(rs.is(t8) || rd.is(t8)));
2283 Label done;
2284 Register scratch = t8;
2285 // For testing purposes we need to fetch content of the FCSR register and
2286 // than test its cc (floating point condition code) bit (for cc = 0, it is
2287 // 24. bit of the FCSR).
2288 cfc1(scratch, FCSR);
2289 // For the MIPS I, II and III architectures, the contents of scratch is
2290 // UNPREDICTABLE for the instruction immediately following CFC1.
2291 nop();
2292 srl(scratch, scratch, 16);
2293 andi(scratch, scratch, 0x0080);
2294 Branch(&done, eq, scratch, Operand(zero_reg));
2295 mov(rd, rs);
2296 bind(&done);
2297 } else {
2298 movt(rd, rs, cc);
2299 }
2300 }
2301
2302
Movf(Register rd,Register rs,uint16_t cc)2303 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
2304 if (IsMipsArchVariant(kLoongson)) {
2305 // Tests an FP condition code and then conditionally move rs to rd.
2306 // We do not currently use any FPU cc bit other than bit 0.
2307 DCHECK(cc == 0);
2308 DCHECK(!(rs.is(t8) || rd.is(t8)));
2309 Label done;
2310 Register scratch = t8;
2311 // For testing purposes we need to fetch content of the FCSR register and
2312 // than test its cc (floating point condition code) bit (for cc = 0, it is
2313 // 24. bit of the FCSR).
2314 cfc1(scratch, FCSR);
2315 // For the MIPS I, II and III architectures, the contents of scratch is
2316 // UNPREDICTABLE for the instruction immediately following CFC1.
2317 nop();
2318 srl(scratch, scratch, 16);
2319 andi(scratch, scratch, 0x0080);
2320 Branch(&done, ne, scratch, Operand(zero_reg));
2321 mov(rd, rs);
2322 bind(&done);
2323 } else {
2324 movf(rd, rs, cc);
2325 }
2326 }
2327
2328 #define __ masm->
2329
ZeroHelper_d(MacroAssembler * masm,MaxMinKind kind,FPURegister dst,FPURegister src1,FPURegister src2,Label * equal)2330 static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
2331 FPURegister src1, FPURegister src2, Label* equal) {
2332 if (src1.is(src2)) {
2333 __ Move(dst, src1);
2334 return true;
2335 }
2336
2337 Label other, compare_not_equal;
2338 FPURegister left, right;
2339 if (kind == MaxMinKind::kMin) {
2340 left = src1;
2341 right = src2;
2342 } else {
2343 left = src2;
2344 right = src1;
2345 }
2346
2347 __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
2348 // Left and right hand side are equal, check for -0 vs. +0.
2349 __ FmoveHigh(t8, src1);
2350 __ Branch(&other, eq, t8, Operand(0x80000000));
2351 __ Move_d(dst, right);
2352 __ Branch(equal);
2353 __ bind(&other);
2354 __ Move_d(dst, left);
2355 __ Branch(equal);
2356 __ bind(&compare_not_equal);
2357 return false;
2358 }
2359
ZeroHelper_s(MacroAssembler * masm,MaxMinKind kind,FPURegister dst,FPURegister src1,FPURegister src2,Label * equal)2360 static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
2361 FPURegister src1, FPURegister src2, Label* equal) {
2362 if (src1.is(src2)) {
2363 __ Move(dst, src1);
2364 return true;
2365 }
2366
2367 Label other, compare_not_equal;
2368 FPURegister left, right;
2369 if (kind == MaxMinKind::kMin) {
2370 left = src1;
2371 right = src2;
2372 } else {
2373 left = src2;
2374 right = src1;
2375 }
2376
2377 __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
2378 // Left and right hand side are equal, check for -0 vs. +0.
2379 __ FmoveLow(t8, src1);
2380 __ Branch(&other, eq, t8, Operand(0x80000000));
2381 __ Move_s(dst, right);
2382 __ Branch(equal);
2383 __ bind(&other);
2384 __ Move_s(dst, left);
2385 __ Branch(equal);
2386 __ bind(&compare_not_equal);
2387 return false;
2388 }
2389
2390 #undef __
2391
MinNaNCheck_d(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2392 void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
2393 FPURegister src2, Label* nan) {
2394 if (nan) {
2395 BranchF64(nullptr, nan, eq, src1, src2);
2396 }
2397 if (IsMipsArchVariant(kMips32r6)) {
2398 min_d(dst, src1, src2);
2399 } else {
2400 Label skip;
2401 if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
2402 if (dst.is(src1)) {
2403 BranchF64(&skip, nullptr, le, src1, src2);
2404 Move_d(dst, src2);
2405 } else if (dst.is(src2)) {
2406 BranchF64(&skip, nullptr, ge, src1, src2);
2407 Move_d(dst, src1);
2408 } else {
2409 Label right;
2410 BranchF64(&right, nullptr, gt, src1, src2);
2411 Move_d(dst, src1);
2412 Branch(&skip);
2413 bind(&right);
2414 Move_d(dst, src2);
2415 }
2416 }
2417 bind(&skip);
2418 }
2419 }
2420
MaxNaNCheck_d(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2421 void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
2422 FPURegister src2, Label* nan) {
2423 if (nan) {
2424 BranchF64(nullptr, nan, eq, src1, src2);
2425 }
2426 if (IsMipsArchVariant(kMips32r6)) {
2427 max_d(dst, src1, src2);
2428 } else {
2429 Label skip;
2430 if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
2431 if (dst.is(src1)) {
2432 BranchF64(&skip, nullptr, ge, src1, src2);
2433 Move_d(dst, src2);
2434 } else if (dst.is(src2)) {
2435 BranchF64(&skip, nullptr, le, src1, src2);
2436 Move_d(dst, src1);
2437 } else {
2438 Label right;
2439 BranchF64(&right, nullptr, lt, src1, src2);
2440 Move_d(dst, src1);
2441 Branch(&skip);
2442 bind(&right);
2443 Move_d(dst, src2);
2444 }
2445 }
2446 bind(&skip);
2447 }
2448 }
2449
MinNaNCheck_s(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2450 void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
2451 FPURegister src2, Label* nan) {
2452 if (nan) {
2453 BranchF32(nullptr, nan, eq, src1, src2);
2454 }
2455 if (IsMipsArchVariant(kMips32r6)) {
2456 min_s(dst, src1, src2);
2457 } else {
2458 Label skip;
2459 if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
2460 if (dst.is(src1)) {
2461 BranchF32(&skip, nullptr, le, src1, src2);
2462 Move_s(dst, src2);
2463 } else if (dst.is(src2)) {
2464 BranchF32(&skip, nullptr, ge, src1, src2);
2465 Move_s(dst, src1);
2466 } else {
2467 Label right;
2468 BranchF32(&right, nullptr, gt, src1, src2);
2469 Move_s(dst, src1);
2470 Branch(&skip);
2471 bind(&right);
2472 Move_s(dst, src2);
2473 }
2474 }
2475 bind(&skip);
2476 }
2477 }
2478
MaxNaNCheck_s(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2479 void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
2480 FPURegister src2, Label* nan) {
2481 if (nan) {
2482 BranchF32(nullptr, nan, eq, src1, src2);
2483 }
2484 if (IsMipsArchVariant(kMips32r6)) {
2485 max_s(dst, src1, src2);
2486 } else {
2487 Label skip;
2488 if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
2489 if (dst.is(src1)) {
2490 BranchF32(&skip, nullptr, ge, src1, src2);
2491 Move_s(dst, src2);
2492 } else if (dst.is(src2)) {
2493 BranchF32(&skip, nullptr, le, src1, src2);
2494 Move_s(dst, src1);
2495 } else {
2496 Label right;
2497 BranchF32(&right, nullptr, lt, src1, src2);
2498 Move_s(dst, src1);
2499 Branch(&skip);
2500 bind(&right);
2501 Move_s(dst, src2);
2502 }
2503 }
2504 bind(&skip);
2505 }
2506 }
2507
Clz(Register rd,Register rs)2508 void MacroAssembler::Clz(Register rd, Register rs) {
2509 if (IsMipsArchVariant(kLoongson)) {
2510 DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
2511 Register mask = t8;
2512 Register scratch = t9;
2513 Label loop, end;
2514 mov(at, rs);
2515 mov(rd, zero_reg);
2516 lui(mask, 0x8000);
2517 bind(&loop);
2518 and_(scratch, at, mask);
2519 Branch(&end, ne, scratch, Operand(zero_reg));
2520 addiu(rd, rd, 1);
2521 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
2522 srl(mask, mask, 1);
2523 bind(&end);
2524 } else {
2525 clz(rd, rs);
2526 }
2527 }
2528
2529
EmitFPUTruncate(FPURoundingMode rounding_mode,Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch,Register except_flag,CheckForInexactConversion check_inexact)2530 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
2531 Register result,
2532 DoubleRegister double_input,
2533 Register scratch,
2534 DoubleRegister double_scratch,
2535 Register except_flag,
2536 CheckForInexactConversion check_inexact) {
2537 DCHECK(!result.is(scratch));
2538 DCHECK(!double_input.is(double_scratch));
2539 DCHECK(!except_flag.is(scratch));
2540
2541 Label done;
2542
2543 // Clear the except flag (0 = no exception)
2544 mov(except_flag, zero_reg);
2545
2546 // Test for values that can be exactly represented as a signed 32-bit integer.
2547 cvt_w_d(double_scratch, double_input);
2548 mfc1(result, double_scratch);
2549 cvt_d_w(double_scratch, double_scratch);
2550 BranchF(&done, NULL, eq, double_input, double_scratch);
2551
2552 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
2553
2554 if (check_inexact == kDontCheckForInexactConversion) {
2555 // Ignore inexact exceptions.
2556 except_mask &= ~kFCSRInexactFlagMask;
2557 }
2558
2559 // Save FCSR.
2560 cfc1(scratch, FCSR);
2561 // Disable FPU exceptions.
2562 ctc1(zero_reg, FCSR);
2563
2564 // Do operation based on rounding mode.
2565 switch (rounding_mode) {
2566 case kRoundToNearest:
2567 Round_w_d(double_scratch, double_input);
2568 break;
2569 case kRoundToZero:
2570 Trunc_w_d(double_scratch, double_input);
2571 break;
2572 case kRoundToPlusInf:
2573 Ceil_w_d(double_scratch, double_input);
2574 break;
2575 case kRoundToMinusInf:
2576 Floor_w_d(double_scratch, double_input);
2577 break;
2578 } // End of switch-statement.
2579
2580 // Retrieve FCSR.
2581 cfc1(except_flag, FCSR);
2582 // Restore FCSR.
2583 ctc1(scratch, FCSR);
2584 // Move the converted value into the result register.
2585 mfc1(result, double_scratch);
2586
2587 // Check for fpu exceptions.
2588 And(except_flag, except_flag, Operand(except_mask));
2589
2590 bind(&done);
2591 }
2592
2593
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)2594 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2595 DoubleRegister double_input,
2596 Label* done) {
2597 DoubleRegister single_scratch = kLithiumScratchDouble.low();
2598 Register scratch = at;
2599 Register scratch2 = t9;
2600
2601 // Clear cumulative exception flags and save the FCSR.
2602 cfc1(scratch2, FCSR);
2603 ctc1(zero_reg, FCSR);
2604 // Try a conversion to a signed integer.
2605 trunc_w_d(single_scratch, double_input);
2606 mfc1(result, single_scratch);
2607 // Retrieve and restore the FCSR.
2608 cfc1(scratch, FCSR);
2609 ctc1(scratch2, FCSR);
2610 // Check for overflow and NaNs.
2611 And(scratch,
2612 scratch,
2613 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2614 // If we had no exceptions we are done.
2615 Branch(done, eq, scratch, Operand(zero_reg));
2616 }
2617
2618
TruncateDoubleToI(Register result,DoubleRegister double_input)2619 void MacroAssembler::TruncateDoubleToI(Register result,
2620 DoubleRegister double_input) {
2621 Label done;
2622
2623 TryInlineTruncateDoubleToI(result, double_input, &done);
2624
2625 // If we fell through then inline version didn't succeed - call stub instead.
2626 push(ra);
2627 Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2628 sdc1(double_input, MemOperand(sp, 0));
2629
2630 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2631 CallStub(&stub);
2632
2633 Addu(sp, sp, Operand(kDoubleSize));
2634 pop(ra);
2635
2636 bind(&done);
2637 }
2638
2639
TruncateHeapNumberToI(Register result,Register object)2640 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2641 Label done;
2642 DoubleRegister double_scratch = f12;
2643 DCHECK(!result.is(object));
2644
2645 ldc1(double_scratch,
2646 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2647 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2648
2649 // If we fell through then inline version didn't succeed - call stub instead.
2650 push(ra);
2651 DoubleToIStub stub(isolate(),
2652 object,
2653 result,
2654 HeapNumber::kValueOffset - kHeapObjectTag,
2655 true,
2656 true);
2657 CallStub(&stub);
2658 pop(ra);
2659
2660 bind(&done);
2661 }
2662
2663
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch,Label * not_number)2664 void MacroAssembler::TruncateNumberToI(Register object,
2665 Register result,
2666 Register heap_number_map,
2667 Register scratch,
2668 Label* not_number) {
2669 Label done;
2670 DCHECK(!result.is(object));
2671
2672 UntagAndJumpIfSmi(result, object, &done);
2673 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
2674 TruncateHeapNumberToI(result, object);
2675
2676 bind(&done);
2677 }
2678
2679
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2680 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2681 Register src,
2682 int num_least_bits) {
2683 Ext(dst, src, kSmiTagSize, num_least_bits);
2684 }
2685
2686
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2687 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2688 Register src,
2689 int num_least_bits) {
2690 And(dst, src, Operand((1 << num_least_bits) - 1));
2691 }
2692
2693
2694 // Emulated condtional branches do not emit a nop in the branch delay slot.
2695 //
2696 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
2697 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
2698 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
2699 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
2700
2701
Branch(int32_t offset,BranchDelaySlot bdslot)2702 void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
2703 DCHECK(IsMipsArchVariant(kMips32r6) ? is_int26(offset) : is_int16(offset));
2704 BranchShort(offset, bdslot);
2705 }
2706
2707
Branch(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2708 void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
2709 const Operand& rt, BranchDelaySlot bdslot) {
2710 bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2711 DCHECK(is_near);
2712 USE(is_near);
2713 }
2714
2715
Branch(Label * L,BranchDelaySlot bdslot)2716 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
2717 if (L->is_bound()) {
2718 if (is_near_branch(L)) {
2719 BranchShort(L, bdslot);
2720 } else {
2721 BranchLong(L, bdslot);
2722 }
2723 } else {
2724 if (is_trampoline_emitted()) {
2725 BranchLong(L, bdslot);
2726 } else {
2727 BranchShort(L, bdslot);
2728 }
2729 }
2730 }
2731
2732
Branch(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2733 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
2734 const Operand& rt,
2735 BranchDelaySlot bdslot) {
2736 if (L->is_bound()) {
2737 if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
2738 if (cond != cc_always) {
2739 Label skip;
2740 Condition neg_cond = NegateCondition(cond);
2741 BranchShort(&skip, neg_cond, rs, rt);
2742 BranchLong(L, bdslot);
2743 bind(&skip);
2744 } else {
2745 BranchLong(L, bdslot);
2746 }
2747 }
2748 } else {
2749 if (is_trampoline_emitted()) {
2750 if (cond != cc_always) {
2751 Label skip;
2752 Condition neg_cond = NegateCondition(cond);
2753 BranchShort(&skip, neg_cond, rs, rt);
2754 BranchLong(L, bdslot);
2755 bind(&skip);
2756 } else {
2757 BranchLong(L, bdslot);
2758 }
2759 } else {
2760 BranchShort(L, cond, rs, rt, bdslot);
2761 }
2762 }
2763 }
2764
2765
Branch(Label * L,Condition cond,Register rs,Heap::RootListIndex index,BranchDelaySlot bdslot)2766 void MacroAssembler::Branch(Label* L,
2767 Condition cond,
2768 Register rs,
2769 Heap::RootListIndex index,
2770 BranchDelaySlot bdslot) {
2771 LoadRoot(at, index);
2772 Branch(L, cond, rs, Operand(at), bdslot);
2773 }
2774
2775
BranchShortHelper(int16_t offset,Label * L,BranchDelaySlot bdslot)2776 void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
2777 BranchDelaySlot bdslot) {
2778 DCHECK(L == nullptr || offset == 0);
2779 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2780 b(offset);
2781
2782 // Emit a nop in the branch delay slot if required.
2783 if (bdslot == PROTECT)
2784 nop();
2785 }
2786
2787
BranchShortHelperR6(int32_t offset,Label * L)2788 void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
2789 DCHECK(L == nullptr || offset == 0);
2790 offset = GetOffset(offset, L, OffsetSize::kOffset26);
2791 bc(offset);
2792 }
2793
2794
BranchShort(int32_t offset,BranchDelaySlot bdslot)2795 void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
2796 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2797 DCHECK(is_int26(offset));
2798 BranchShortHelperR6(offset, nullptr);
2799 } else {
2800 DCHECK(is_int16(offset));
2801 BranchShortHelper(offset, nullptr, bdslot);
2802 }
2803 }
2804
2805
BranchShort(Label * L,BranchDelaySlot bdslot)2806 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2807 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2808 BranchShortHelperR6(0, L);
2809 } else {
2810 BranchShortHelper(0, L, bdslot);
2811 }
2812 }
2813
2814
IsZero(const Operand & rt)2815 static inline bool IsZero(const Operand& rt) {
2816 if (rt.is_reg()) {
2817 return rt.rm().is(zero_reg);
2818 } else {
2819 return rt.immediate() == 0;
2820 }
2821 }
2822
2823
GetOffset(int32_t offset,Label * L,OffsetSize bits)2824 int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
2825 if (L) {
2826 offset = branch_offset_helper(L, bits) >> 2;
2827 } else {
2828 DCHECK(is_intn(offset, bits));
2829 }
2830 return offset;
2831 }
2832
2833
GetRtAsRegisterHelper(const Operand & rt,Register scratch)2834 Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
2835 Register scratch) {
2836 Register r2 = no_reg;
2837 if (rt.is_reg()) {
2838 r2 = rt.rm_;
2839 } else {
2840 r2 = scratch;
2841 li(r2, rt);
2842 }
2843
2844 return r2;
2845 }
2846
2847
BranchShortHelperR6(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt)2848 bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
2849 Condition cond, Register rs,
2850 const Operand& rt) {
2851 DCHECK(L == nullptr || offset == 0);
2852 Register scratch = rs.is(at) ? t8 : at;
2853 OffsetSize bits = OffsetSize::kOffset16;
2854
2855 // Be careful to always use shifted_branch_offset only just before the
2856 // branch instruction, as the location will be remember for patching the
2857 // target.
2858 {
2859 BlockTrampolinePoolScope block_trampoline_pool(this);
2860 switch (cond) {
2861 case cc_always:
2862 bits = OffsetSize::kOffset26;
2863 if (!is_near(L, bits)) return false;
2864 offset = GetOffset(offset, L, bits);
2865 bc(offset);
2866 break;
2867 case eq:
2868 if (rs.code() == rt.rm_.reg_code) {
2869 // Pre R6 beq is used here to make the code patchable. Otherwise bc
2870 // should be used which has no condition field so is not patchable.
2871 bits = OffsetSize::kOffset16;
2872 if (!is_near(L, bits)) return false;
2873 scratch = GetRtAsRegisterHelper(rt, scratch);
2874 offset = GetOffset(offset, L, bits);
2875 beq(rs, scratch, offset);
2876 nop();
2877 } else if (IsZero(rt)) {
2878 bits = OffsetSize::kOffset21;
2879 if (!is_near(L, bits)) return false;
2880 offset = GetOffset(offset, L, bits);
2881 beqzc(rs, offset);
2882 } else {
2883 // We don't want any other register but scratch clobbered.
2884 bits = OffsetSize::kOffset16;
2885 if (!is_near(L, bits)) return false;
2886 scratch = GetRtAsRegisterHelper(rt, scratch);
2887 offset = GetOffset(offset, L, bits);
2888 beqc(rs, scratch, offset);
2889 }
2890 break;
2891 case ne:
2892 if (rs.code() == rt.rm_.reg_code) {
2893 // Pre R6 bne is used here to make the code patchable. Otherwise we
2894 // should not generate any instruction.
2895 bits = OffsetSize::kOffset16;
2896 if (!is_near(L, bits)) return false;
2897 scratch = GetRtAsRegisterHelper(rt, scratch);
2898 offset = GetOffset(offset, L, bits);
2899 bne(rs, scratch, offset);
2900 nop();
2901 } else if (IsZero(rt)) {
2902 bits = OffsetSize::kOffset21;
2903 if (!is_near(L, bits)) return false;
2904 offset = GetOffset(offset, L, bits);
2905 bnezc(rs, offset);
2906 } else {
2907 // We don't want any other register but scratch clobbered.
2908 bits = OffsetSize::kOffset16;
2909 if (!is_near(L, bits)) return false;
2910 scratch = GetRtAsRegisterHelper(rt, scratch);
2911 offset = GetOffset(offset, L, bits);
2912 bnec(rs, scratch, offset);
2913 }
2914 break;
2915
2916 // Signed comparison.
2917 case greater:
2918 // rs > rt
2919 if (rs.code() == rt.rm_.reg_code) {
2920 break; // No code needs to be emitted.
2921 } else if (rs.is(zero_reg)) {
2922 bits = OffsetSize::kOffset16;
2923 if (!is_near(L, bits)) return false;
2924 scratch = GetRtAsRegisterHelper(rt, scratch);
2925 offset = GetOffset(offset, L, bits);
2926 bltzc(scratch, offset);
2927 } else if (IsZero(rt)) {
2928 bits = OffsetSize::kOffset16;
2929 if (!is_near(L, bits)) return false;
2930 offset = GetOffset(offset, L, bits);
2931 bgtzc(rs, offset);
2932 } else {
2933 bits = OffsetSize::kOffset16;
2934 if (!is_near(L, bits)) return false;
2935 scratch = GetRtAsRegisterHelper(rt, scratch);
2936 DCHECK(!rs.is(scratch));
2937 offset = GetOffset(offset, L, bits);
2938 bltc(scratch, rs, offset);
2939 }
2940 break;
2941 case greater_equal:
2942 // rs >= rt
2943 if (rs.code() == rt.rm_.reg_code) {
2944 bits = OffsetSize::kOffset26;
2945 if (!is_near(L, bits)) return false;
2946 offset = GetOffset(offset, L, bits);
2947 bc(offset);
2948 } else if (rs.is(zero_reg)) {
2949 bits = OffsetSize::kOffset16;
2950 if (!is_near(L, bits)) return false;
2951 scratch = GetRtAsRegisterHelper(rt, scratch);
2952 offset = GetOffset(offset, L, bits);
2953 blezc(scratch, offset);
2954 } else if (IsZero(rt)) {
2955 bits = OffsetSize::kOffset16;
2956 if (!is_near(L, bits)) return false;
2957 offset = GetOffset(offset, L, bits);
2958 bgezc(rs, offset);
2959 } else {
2960 bits = OffsetSize::kOffset16;
2961 if (!is_near(L, bits)) return false;
2962 scratch = GetRtAsRegisterHelper(rt, scratch);
2963 DCHECK(!rs.is(scratch));
2964 offset = GetOffset(offset, L, bits);
2965 bgec(rs, scratch, offset);
2966 }
2967 break;
2968 case less:
2969 // rs < rt
2970 if (rs.code() == rt.rm_.reg_code) {
2971 break; // No code needs to be emitted.
2972 } else if (rs.is(zero_reg)) {
2973 bits = OffsetSize::kOffset16;
2974 if (!is_near(L, bits)) return false;
2975 scratch = GetRtAsRegisterHelper(rt, scratch);
2976 offset = GetOffset(offset, L, bits);
2977 bgtzc(scratch, offset);
2978 } else if (IsZero(rt)) {
2979 bits = OffsetSize::kOffset16;
2980 if (!is_near(L, bits)) return false;
2981 offset = GetOffset(offset, L, bits);
2982 bltzc(rs, offset);
2983 } else {
2984 bits = OffsetSize::kOffset16;
2985 if (!is_near(L, bits)) return false;
2986 scratch = GetRtAsRegisterHelper(rt, scratch);
2987 DCHECK(!rs.is(scratch));
2988 offset = GetOffset(offset, L, bits);
2989 bltc(rs, scratch, offset);
2990 }
2991 break;
2992 case less_equal:
2993 // rs <= rt
2994 if (rs.code() == rt.rm_.reg_code) {
2995 bits = OffsetSize::kOffset26;
2996 if (!is_near(L, bits)) return false;
2997 offset = GetOffset(offset, L, bits);
2998 bc(offset);
2999 } else if (rs.is(zero_reg)) {
3000 bits = OffsetSize::kOffset16;
3001 if (!is_near(L, bits)) return false;
3002 scratch = GetRtAsRegisterHelper(rt, scratch);
3003 offset = GetOffset(offset, L, bits);
3004 bgezc(scratch, offset);
3005 } else if (IsZero(rt)) {
3006 bits = OffsetSize::kOffset16;
3007 if (!is_near(L, bits)) return false;
3008 offset = GetOffset(offset, L, bits);
3009 blezc(rs, offset);
3010 } else {
3011 bits = OffsetSize::kOffset16;
3012 if (!is_near(L, bits)) return false;
3013 scratch = GetRtAsRegisterHelper(rt, scratch);
3014 DCHECK(!rs.is(scratch));
3015 offset = GetOffset(offset, L, bits);
3016 bgec(scratch, rs, offset);
3017 }
3018 break;
3019
3020 // Unsigned comparison.
3021 case Ugreater:
3022 // rs > rt
3023 if (rs.code() == rt.rm_.reg_code) {
3024 break; // No code needs to be emitted.
3025 } else if (rs.is(zero_reg)) {
3026 bits = OffsetSize::kOffset21;
3027 if (!is_near(L, bits)) return false;
3028 scratch = GetRtAsRegisterHelper(rt, scratch);
3029 offset = GetOffset(offset, L, bits);
3030 bnezc(scratch, offset);
3031 } else if (IsZero(rt)) {
3032 bits = OffsetSize::kOffset21;
3033 if (!is_near(L, bits)) return false;
3034 offset = GetOffset(offset, L, bits);
3035 bnezc(rs, offset);
3036 } else {
3037 bits = OffsetSize::kOffset16;
3038 if (!is_near(L, bits)) return false;
3039 scratch = GetRtAsRegisterHelper(rt, scratch);
3040 DCHECK(!rs.is(scratch));
3041 offset = GetOffset(offset, L, bits);
3042 bltuc(scratch, rs, offset);
3043 }
3044 break;
3045 case Ugreater_equal:
3046 // rs >= rt
3047 if (rs.code() == rt.rm_.reg_code) {
3048 bits = OffsetSize::kOffset26;
3049 if (!is_near(L, bits)) return false;
3050 offset = GetOffset(offset, L, bits);
3051 bc(offset);
3052 } else if (rs.is(zero_reg)) {
3053 bits = OffsetSize::kOffset21;
3054 if (!is_near(L, bits)) return false;
3055 scratch = GetRtAsRegisterHelper(rt, scratch);
3056 offset = GetOffset(offset, L, bits);
3057 beqzc(scratch, offset);
3058 } else if (IsZero(rt)) {
3059 bits = OffsetSize::kOffset26;
3060 if (!is_near(L, bits)) return false;
3061 offset = GetOffset(offset, L, bits);
3062 bc(offset);
3063 } else {
3064 bits = OffsetSize::kOffset16;
3065 if (!is_near(L, bits)) return false;
3066 scratch = GetRtAsRegisterHelper(rt, scratch);
3067 DCHECK(!rs.is(scratch));
3068 offset = GetOffset(offset, L, bits);
3069 bgeuc(rs, scratch, offset);
3070 }
3071 break;
3072 case Uless:
3073 // rs < rt
3074 if (rs.code() == rt.rm_.reg_code) {
3075 break; // No code needs to be emitted.
3076 } else if (rs.is(zero_reg)) {
3077 bits = OffsetSize::kOffset21;
3078 if (!is_near(L, bits)) return false;
3079 scratch = GetRtAsRegisterHelper(rt, scratch);
3080 offset = GetOffset(offset, L, bits);
3081 bnezc(scratch, offset);
3082 } else if (IsZero(rt)) {
3083 break; // No code needs to be emitted.
3084 } else {
3085 bits = OffsetSize::kOffset16;
3086 if (!is_near(L, bits)) return false;
3087 scratch = GetRtAsRegisterHelper(rt, scratch);
3088 DCHECK(!rs.is(scratch));
3089 offset = GetOffset(offset, L, bits);
3090 bltuc(rs, scratch, offset);
3091 }
3092 break;
3093 case Uless_equal:
3094 // rs <= rt
3095 if (rs.code() == rt.rm_.reg_code) {
3096 bits = OffsetSize::kOffset26;
3097 if (!is_near(L, bits)) return false;
3098 offset = GetOffset(offset, L, bits);
3099 bc(offset);
3100 } else if (rs.is(zero_reg)) {
3101 bits = OffsetSize::kOffset26;
3102 if (!is_near(L, bits)) return false;
3103 scratch = GetRtAsRegisterHelper(rt, scratch);
3104 offset = GetOffset(offset, L, bits);
3105 bc(offset);
3106 } else if (IsZero(rt)) {
3107 bits = OffsetSize::kOffset21;
3108 if (!is_near(L, bits)) return false;
3109 offset = GetOffset(offset, L, bits);
3110 beqzc(rs, offset);
3111 } else {
3112 bits = OffsetSize::kOffset16;
3113 if (!is_near(L, bits)) return false;
3114 scratch = GetRtAsRegisterHelper(rt, scratch);
3115 DCHECK(!rs.is(scratch));
3116 offset = GetOffset(offset, L, bits);
3117 bgeuc(scratch, rs, offset);
3118 }
3119 break;
3120 default:
3121 UNREACHABLE();
3122 }
3123 }
3124 CheckTrampolinePoolQuick(1);
3125 return true;
3126 }
3127
3128
BranchShortHelper(int16_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3129 bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
3130 Register rs, const Operand& rt,
3131 BranchDelaySlot bdslot) {
3132 DCHECK(L == nullptr || offset == 0);
3133 if (!is_near(L, OffsetSize::kOffset16)) return false;
3134
3135 Register scratch = at;
3136 int32_t offset32;
3137
3138 // Be careful to always use shifted_branch_offset only just before the
3139 // branch instruction, as the location will be remember for patching the
3140 // target.
3141 {
3142 BlockTrampolinePoolScope block_trampoline_pool(this);
3143 switch (cond) {
3144 case cc_always:
3145 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3146 b(offset32);
3147 break;
3148 case eq:
3149 if (IsZero(rt)) {
3150 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3151 beq(rs, zero_reg, offset32);
3152 } else {
3153 // We don't want any other register but scratch clobbered.
3154 scratch = GetRtAsRegisterHelper(rt, scratch);
3155 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3156 beq(rs, scratch, offset32);
3157 }
3158 break;
3159 case ne:
3160 if (IsZero(rt)) {
3161 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3162 bne(rs, zero_reg, offset32);
3163 } else {
3164 // We don't want any other register but scratch clobbered.
3165 scratch = GetRtAsRegisterHelper(rt, scratch);
3166 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3167 bne(rs, scratch, offset32);
3168 }
3169 break;
3170
3171 // Signed comparison.
3172 case greater:
3173 if (IsZero(rt)) {
3174 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3175 bgtz(rs, offset32);
3176 } else {
3177 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3178 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3179 bne(scratch, zero_reg, offset32);
3180 }
3181 break;
3182 case greater_equal:
3183 if (IsZero(rt)) {
3184 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3185 bgez(rs, offset32);
3186 } else {
3187 Slt(scratch, rs, rt);
3188 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3189 beq(scratch, zero_reg, offset32);
3190 }
3191 break;
3192 case less:
3193 if (IsZero(rt)) {
3194 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3195 bltz(rs, offset32);
3196 } else {
3197 Slt(scratch, rs, rt);
3198 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3199 bne(scratch, zero_reg, offset32);
3200 }
3201 break;
3202 case less_equal:
3203 if (IsZero(rt)) {
3204 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3205 blez(rs, offset32);
3206 } else {
3207 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3208 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3209 beq(scratch, zero_reg, offset32);
3210 }
3211 break;
3212
3213 // Unsigned comparison.
3214 case Ugreater:
3215 if (IsZero(rt)) {
3216 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3217 bne(rs, zero_reg, offset32);
3218 } else {
3219 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3220 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3221 bne(scratch, zero_reg, offset32);
3222 }
3223 break;
3224 case Ugreater_equal:
3225 if (IsZero(rt)) {
3226 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3227 b(offset32);
3228 } else {
3229 Sltu(scratch, rs, rt);
3230 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3231 beq(scratch, zero_reg, offset32);
3232 }
3233 break;
3234 case Uless:
3235 if (IsZero(rt)) {
3236 return true; // No code needs to be emitted.
3237 } else {
3238 Sltu(scratch, rs, rt);
3239 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3240 bne(scratch, zero_reg, offset32);
3241 }
3242 break;
3243 case Uless_equal:
3244 if (IsZero(rt)) {
3245 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3246 beq(rs, zero_reg, offset32);
3247 } else {
3248 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3249 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3250 beq(scratch, zero_reg, offset32);
3251 }
3252 break;
3253 default:
3254 UNREACHABLE();
3255 }
3256 }
3257 // Emit a nop in the branch delay slot if required.
3258 if (bdslot == PROTECT)
3259 nop();
3260
3261 return true;
3262 }
3263
3264
BranchShortCheck(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3265 bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
3266 Register rs, const Operand& rt,
3267 BranchDelaySlot bdslot) {
3268 BRANCH_ARGS_CHECK(cond, rs, rt);
3269 if (!L) {
3270 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3271 DCHECK(is_int26(offset));
3272 return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
3273 } else {
3274 DCHECK(is_int16(offset));
3275 return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3276 }
3277 } else {
3278 DCHECK(offset == 0);
3279 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3280 return BranchShortHelperR6(0, L, cond, rs, rt);
3281 } else {
3282 return BranchShortHelper(0, L, cond, rs, rt, bdslot);
3283 }
3284 }
3285 return false;
3286 }
3287
3288
BranchShort(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3289 void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
3290 const Operand& rt, BranchDelaySlot bdslot) {
3291 BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3292 }
3293
3294
BranchShort(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3295 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
3296 const Operand& rt, BranchDelaySlot bdslot) {
3297 BranchShortCheck(0, L, cond, rs, rt, bdslot);
3298 }
3299
3300
BranchAndLink(int32_t offset,BranchDelaySlot bdslot)3301 void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
3302 BranchAndLinkShort(offset, bdslot);
3303 }
3304
3305
BranchAndLink(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3306 void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
3307 const Operand& rt, BranchDelaySlot bdslot) {
3308 bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3309 DCHECK(is_near);
3310 USE(is_near);
3311 }
3312
3313
BranchAndLink(Label * L,BranchDelaySlot bdslot)3314 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
3315 if (L->is_bound()) {
3316 if (is_near_branch(L)) {
3317 BranchAndLinkShort(L, bdslot);
3318 } else {
3319 BranchAndLinkLong(L, bdslot);
3320 }
3321 } else {
3322 if (is_trampoline_emitted()) {
3323 BranchAndLinkLong(L, bdslot);
3324 } else {
3325 BranchAndLinkShort(L, bdslot);
3326 }
3327 }
3328 }
3329
3330
BranchAndLink(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3331 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
3332 const Operand& rt,
3333 BranchDelaySlot bdslot) {
3334 if (L->is_bound()) {
3335 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
3336 Label skip;
3337 Condition neg_cond = NegateCondition(cond);
3338 BranchShort(&skip, neg_cond, rs, rt);
3339 BranchAndLinkLong(L, bdslot);
3340 bind(&skip);
3341 }
3342 } else {
3343 if (is_trampoline_emitted()) {
3344 Label skip;
3345 Condition neg_cond = NegateCondition(cond);
3346 BranchShort(&skip, neg_cond, rs, rt);
3347 BranchAndLinkLong(L, bdslot);
3348 bind(&skip);
3349 } else {
3350 BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
3351 }
3352 }
3353 }
3354
3355
BranchAndLinkShortHelper(int16_t offset,Label * L,BranchDelaySlot bdslot)3356 void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3357 BranchDelaySlot bdslot) {
3358 DCHECK(L == nullptr || offset == 0);
3359 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3360 bal(offset);
3361
3362 // Emit a nop in the branch delay slot if required.
3363 if (bdslot == PROTECT)
3364 nop();
3365 }
3366
3367
BranchAndLinkShortHelperR6(int32_t offset,Label * L)3368 void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
3369 DCHECK(L == nullptr || offset == 0);
3370 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3371 balc(offset);
3372 }
3373
3374
BranchAndLinkShort(int32_t offset,BranchDelaySlot bdslot)3375 void MacroAssembler::BranchAndLinkShort(int32_t offset,
3376 BranchDelaySlot bdslot) {
3377 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3378 DCHECK(is_int26(offset));
3379 BranchAndLinkShortHelperR6(offset, nullptr);
3380 } else {
3381 DCHECK(is_int16(offset));
3382 BranchAndLinkShortHelper(offset, nullptr, bdslot);
3383 }
3384 }
3385
3386
BranchAndLinkShort(Label * L,BranchDelaySlot bdslot)3387 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
3388 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3389 BranchAndLinkShortHelperR6(0, L);
3390 } else {
3391 BranchAndLinkShortHelper(0, L, bdslot);
3392 }
3393 }
3394
3395
BranchAndLinkShortHelperR6(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt)3396 bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
3397 Condition cond, Register rs,
3398 const Operand& rt) {
3399 DCHECK(L == nullptr || offset == 0);
3400 Register scratch = rs.is(at) ? t8 : at;
3401 OffsetSize bits = OffsetSize::kOffset16;
3402
3403 BlockTrampolinePoolScope block_trampoline_pool(this);
3404 DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3405 switch (cond) {
3406 case cc_always:
3407 bits = OffsetSize::kOffset26;
3408 if (!is_near(L, bits)) return false;
3409 offset = GetOffset(offset, L, bits);
3410 balc(offset);
3411 break;
3412 case eq:
3413 if (!is_near(L, bits)) return false;
3414 Subu(scratch, rs, rt);
3415 offset = GetOffset(offset, L, bits);
3416 beqzalc(scratch, offset);
3417 break;
3418 case ne:
3419 if (!is_near(L, bits)) return false;
3420 Subu(scratch, rs, rt);
3421 offset = GetOffset(offset, L, bits);
3422 bnezalc(scratch, offset);
3423 break;
3424
3425 // Signed comparison.
3426 case greater:
3427 // rs > rt
3428 if (rs.code() == rt.rm_.reg_code) {
3429 break; // No code needs to be emitted.
3430 } else if (rs.is(zero_reg)) {
3431 if (!is_near(L, bits)) return false;
3432 scratch = GetRtAsRegisterHelper(rt, scratch);
3433 offset = GetOffset(offset, L, bits);
3434 bltzalc(scratch, offset);
3435 } else if (IsZero(rt)) {
3436 if (!is_near(L, bits)) return false;
3437 offset = GetOffset(offset, L, bits);
3438 bgtzalc(rs, offset);
3439 } else {
3440 if (!is_near(L, bits)) return false;
3441 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3442 offset = GetOffset(offset, L, bits);
3443 bnezalc(scratch, offset);
3444 }
3445 break;
3446 case greater_equal:
3447 // rs >= rt
3448 if (rs.code() == rt.rm_.reg_code) {
3449 bits = OffsetSize::kOffset26;
3450 if (!is_near(L, bits)) return false;
3451 offset = GetOffset(offset, L, bits);
3452 balc(offset);
3453 } else if (rs.is(zero_reg)) {
3454 if (!is_near(L, bits)) return false;
3455 scratch = GetRtAsRegisterHelper(rt, scratch);
3456 offset = GetOffset(offset, L, bits);
3457 blezalc(scratch, offset);
3458 } else if (IsZero(rt)) {
3459 if (!is_near(L, bits)) return false;
3460 offset = GetOffset(offset, L, bits);
3461 bgezalc(rs, offset);
3462 } else {
3463 if (!is_near(L, bits)) return false;
3464 Slt(scratch, rs, rt);
3465 offset = GetOffset(offset, L, bits);
3466 beqzalc(scratch, offset);
3467 }
3468 break;
3469 case less:
3470 // rs < rt
3471 if (rs.code() == rt.rm_.reg_code) {
3472 break; // No code needs to be emitted.
3473 } else if (rs.is(zero_reg)) {
3474 if (!is_near(L, bits)) return false;
3475 scratch = GetRtAsRegisterHelper(rt, scratch);
3476 offset = GetOffset(offset, L, bits);
3477 bgtzalc(scratch, offset);
3478 } else if (IsZero(rt)) {
3479 if (!is_near(L, bits)) return false;
3480 offset = GetOffset(offset, L, bits);
3481 bltzalc(rs, offset);
3482 } else {
3483 if (!is_near(L, bits)) return false;
3484 Slt(scratch, rs, rt);
3485 offset = GetOffset(offset, L, bits);
3486 bnezalc(scratch, offset);
3487 }
3488 break;
3489 case less_equal:
3490 // rs <= r2
3491 if (rs.code() == rt.rm_.reg_code) {
3492 bits = OffsetSize::kOffset26;
3493 if (!is_near(L, bits)) return false;
3494 offset = GetOffset(offset, L, bits);
3495 balc(offset);
3496 } else if (rs.is(zero_reg)) {
3497 if (!is_near(L, bits)) return false;
3498 scratch = GetRtAsRegisterHelper(rt, scratch);
3499 offset = GetOffset(offset, L, bits);
3500 bgezalc(scratch, offset);
3501 } else if (IsZero(rt)) {
3502 if (!is_near(L, bits)) return false;
3503 offset = GetOffset(offset, L, bits);
3504 blezalc(rs, offset);
3505 } else {
3506 if (!is_near(L, bits)) return false;
3507 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3508 offset = GetOffset(offset, L, bits);
3509 beqzalc(scratch, offset);
3510 }
3511 break;
3512
3513
3514 // Unsigned comparison.
3515 case Ugreater:
3516 // rs > r2
3517 if (!is_near(L, bits)) return false;
3518 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3519 offset = GetOffset(offset, L, bits);
3520 bnezalc(scratch, offset);
3521 break;
3522 case Ugreater_equal:
3523 // rs >= r2
3524 if (!is_near(L, bits)) return false;
3525 Sltu(scratch, rs, rt);
3526 offset = GetOffset(offset, L, bits);
3527 beqzalc(scratch, offset);
3528 break;
3529 case Uless:
3530 // rs < r2
3531 if (!is_near(L, bits)) return false;
3532 Sltu(scratch, rs, rt);
3533 offset = GetOffset(offset, L, bits);
3534 bnezalc(scratch, offset);
3535 break;
3536 case Uless_equal:
3537 // rs <= r2
3538 if (!is_near(L, bits)) return false;
3539 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3540 offset = GetOffset(offset, L, bits);
3541 beqzalc(scratch, offset);
3542 break;
3543 default:
3544 UNREACHABLE();
3545 }
3546 return true;
3547 }
3548
3549
3550 // Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
3551 // with the slt instructions. We could use sub or add instead but we would miss
3552 // overflow cases, so we keep slt and add an intermediate third instruction.
BranchAndLinkShortHelper(int16_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3553 bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3554 Condition cond, Register rs,
3555 const Operand& rt,
3556 BranchDelaySlot bdslot) {
3557 DCHECK(L == nullptr || offset == 0);
3558 if (!is_near(L, OffsetSize::kOffset16)) return false;
3559
3560 Register scratch = t8;
3561 BlockTrampolinePoolScope block_trampoline_pool(this);
3562
3563 switch (cond) {
3564 case cc_always:
3565 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3566 bal(offset);
3567 break;
3568 case eq:
3569 bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3570 nop();
3571 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3572 bal(offset);
3573 break;
3574 case ne:
3575 beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3576 nop();
3577 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3578 bal(offset);
3579 break;
3580
3581 // Signed comparison.
3582 case greater:
3583 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3584 addiu(scratch, scratch, -1);
3585 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3586 bgezal(scratch, offset);
3587 break;
3588 case greater_equal:
3589 Slt(scratch, rs, rt);
3590 addiu(scratch, scratch, -1);
3591 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3592 bltzal(scratch, offset);
3593 break;
3594 case less:
3595 Slt(scratch, rs, rt);
3596 addiu(scratch, scratch, -1);
3597 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3598 bgezal(scratch, offset);
3599 break;
3600 case less_equal:
3601 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3602 addiu(scratch, scratch, -1);
3603 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3604 bltzal(scratch, offset);
3605 break;
3606
3607 // Unsigned comparison.
3608 case Ugreater:
3609 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3610 addiu(scratch, scratch, -1);
3611 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3612 bgezal(scratch, offset);
3613 break;
3614 case Ugreater_equal:
3615 Sltu(scratch, rs, rt);
3616 addiu(scratch, scratch, -1);
3617 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3618 bltzal(scratch, offset);
3619 break;
3620 case Uless:
3621 Sltu(scratch, rs, rt);
3622 addiu(scratch, scratch, -1);
3623 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3624 bgezal(scratch, offset);
3625 break;
3626 case Uless_equal:
3627 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3628 addiu(scratch, scratch, -1);
3629 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3630 bltzal(scratch, offset);
3631 break;
3632
3633 default:
3634 UNREACHABLE();
3635 }
3636
3637 // Emit a nop in the branch delay slot if required.
3638 if (bdslot == PROTECT)
3639 nop();
3640
3641 return true;
3642 }
3643
3644
BranchAndLinkShortCheck(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3645 bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
3646 Condition cond, Register rs,
3647 const Operand& rt,
3648 BranchDelaySlot bdslot) {
3649 BRANCH_ARGS_CHECK(cond, rs, rt);
3650
3651 if (!L) {
3652 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3653 DCHECK(is_int26(offset));
3654 return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
3655 } else {
3656 DCHECK(is_int16(offset));
3657 return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3658 }
3659 } else {
3660 DCHECK(offset == 0);
3661 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3662 return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
3663 } else {
3664 return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
3665 }
3666 }
3667 return false;
3668 }
3669
3670
Jump(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3671 void MacroAssembler::Jump(Register target,
3672 Condition cond,
3673 Register rs,
3674 const Operand& rt,
3675 BranchDelaySlot bd) {
3676 BlockTrampolinePoolScope block_trampoline_pool(this);
3677 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3678 if (cond == cc_always) {
3679 jic(target, 0);
3680 } else {
3681 BRANCH_ARGS_CHECK(cond, rs, rt);
3682 Branch(2, NegateCondition(cond), rs, rt);
3683 jic(target, 0);
3684 }
3685 } else {
3686 if (cond == cc_always) {
3687 jr(target);
3688 } else {
3689 BRANCH_ARGS_CHECK(cond, rs, rt);
3690 Branch(2, NegateCondition(cond), rs, rt);
3691 jr(target);
3692 }
3693 // Emit a nop in the branch delay slot if required.
3694 if (bd == PROTECT) nop();
3695 }
3696 }
3697
3698
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3699 void MacroAssembler::Jump(intptr_t target,
3700 RelocInfo::Mode rmode,
3701 Condition cond,
3702 Register rs,
3703 const Operand& rt,
3704 BranchDelaySlot bd) {
3705 Label skip;
3706 if (cond != cc_always) {
3707 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3708 }
3709 // The first instruction of 'li' may be placed in the delay slot.
3710 // This is not an issue, t9 is expected to be clobbered anyway.
3711 li(t9, Operand(target, rmode));
3712 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
3713 bind(&skip);
3714 }
3715
3716
Jump(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3717 void MacroAssembler::Jump(Address target,
3718 RelocInfo::Mode rmode,
3719 Condition cond,
3720 Register rs,
3721 const Operand& rt,
3722 BranchDelaySlot bd) {
3723 DCHECK(!RelocInfo::IsCodeTarget(rmode));
3724 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3725 }
3726
3727
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3728 void MacroAssembler::Jump(Handle<Code> code,
3729 RelocInfo::Mode rmode,
3730 Condition cond,
3731 Register rs,
3732 const Operand& rt,
3733 BranchDelaySlot bd) {
3734 DCHECK(RelocInfo::IsCodeTarget(rmode));
3735 AllowDeferredHandleDereference embedding_raw_address;
3736 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
3737 }
3738
3739
CallSize(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3740 int MacroAssembler::CallSize(Register target,
3741 Condition cond,
3742 Register rs,
3743 const Operand& rt,
3744 BranchDelaySlot bd) {
3745 int size = 0;
3746
3747 if (cond == cc_always) {
3748 size += 1;
3749 } else {
3750 size += 3;
3751 }
3752
3753 if (bd == PROTECT && !IsMipsArchVariant(kMips32r6)) size += 1;
3754
3755 return size * kInstrSize;
3756 }
3757
3758
3759 // Note: To call gcc-compiled C code on mips, you must call thru t9.
Call(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3760 void MacroAssembler::Call(Register target,
3761 Condition cond,
3762 Register rs,
3763 const Operand& rt,
3764 BranchDelaySlot bd) {
3765 #ifdef DEBUG
3766 int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
3767 #endif
3768
3769 BlockTrampolinePoolScope block_trampoline_pool(this);
3770 Label start;
3771 bind(&start);
3772 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3773 if (cond == cc_always) {
3774 jialc(target, 0);
3775 } else {
3776 BRANCH_ARGS_CHECK(cond, rs, rt);
3777 Branch(2, NegateCondition(cond), rs, rt);
3778 jialc(target, 0);
3779 }
3780 } else {
3781 if (cond == cc_always) {
3782 jalr(target);
3783 } else {
3784 BRANCH_ARGS_CHECK(cond, rs, rt);
3785 Branch(2, NegateCondition(cond), rs, rt);
3786 jalr(target);
3787 }
3788 // Emit a nop in the branch delay slot if required.
3789 if (bd == PROTECT) nop();
3790 }
3791
3792 #ifdef DEBUG
3793 CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
3794 SizeOfCodeGeneratedSince(&start));
3795 #endif
3796 }
3797
3798
CallSize(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3799 int MacroAssembler::CallSize(Address target,
3800 RelocInfo::Mode rmode,
3801 Condition cond,
3802 Register rs,
3803 const Operand& rt,
3804 BranchDelaySlot bd) {
3805 int size = CallSize(t9, cond, rs, rt, bd);
3806 return size + 2 * kInstrSize;
3807 }
3808
3809
Call(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3810 void MacroAssembler::Call(Address target,
3811 RelocInfo::Mode rmode,
3812 Condition cond,
3813 Register rs,
3814 const Operand& rt,
3815 BranchDelaySlot bd) {
3816 BlockTrampolinePoolScope block_trampoline_pool(this);
3817 Label start;
3818 bind(&start);
3819 int32_t target_int = reinterpret_cast<int32_t>(target);
3820 li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
3821 Call(t9, cond, rs, rt, bd);
3822 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3823 SizeOfCodeGeneratedSince(&start));
3824 }
3825
3826
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3827 int MacroAssembler::CallSize(Handle<Code> code,
3828 RelocInfo::Mode rmode,
3829 TypeFeedbackId ast_id,
3830 Condition cond,
3831 Register rs,
3832 const Operand& rt,
3833 BranchDelaySlot bd) {
3834 AllowDeferredHandleDereference using_raw_address;
3835 return CallSize(reinterpret_cast<Address>(code.location()),
3836 rmode, cond, rs, rt, bd);
3837 }
3838
3839
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3840 void MacroAssembler::Call(Handle<Code> code,
3841 RelocInfo::Mode rmode,
3842 TypeFeedbackId ast_id,
3843 Condition cond,
3844 Register rs,
3845 const Operand& rt,
3846 BranchDelaySlot bd) {
3847 BlockTrampolinePoolScope block_trampoline_pool(this);
3848 Label start;
3849 bind(&start);
3850 DCHECK(RelocInfo::IsCodeTarget(rmode));
3851 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3852 SetRecordedAstId(ast_id);
3853 rmode = RelocInfo::CODE_TARGET_WITH_ID;
3854 }
3855 AllowDeferredHandleDereference embedding_raw_address;
3856 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3857 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3858 SizeOfCodeGeneratedSince(&start));
3859 }
3860
3861
Ret(Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3862 void MacroAssembler::Ret(Condition cond,
3863 Register rs,
3864 const Operand& rt,
3865 BranchDelaySlot bd) {
3866 Jump(ra, cond, rs, rt, bd);
3867 }
3868
3869
BranchLong(Label * L,BranchDelaySlot bdslot)3870 void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
3871 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
3872 (!L->is_bound() || is_near_r6(L))) {
3873 BranchShortHelperR6(0, L);
3874 } else {
3875 BlockTrampolinePoolScope block_trampoline_pool(this);
3876 uint32_t imm32;
3877 imm32 = jump_address(L);
3878 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3879 uint32_t lui_offset, jic_offset;
3880 UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
3881 {
3882 BlockGrowBufferScope block_buf_growth(this);
3883 // Buffer growth (and relocation) must be blocked for internal
3884 // references until associated instructions are emitted and
3885 // available to be patched.
3886 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3887 lui(at, lui_offset);
3888 jic(at, jic_offset);
3889 }
3890 CheckBuffer();
3891 } else {
3892 {
3893 BlockGrowBufferScope block_buf_growth(this);
3894 // Buffer growth (and relocation) must be blocked for internal
3895 // references
3896 // until associated instructions are emitted and available to be
3897 // patched.
3898 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3899 lui(at, (imm32 & kHiMask) >> kLuiShift);
3900 ori(at, at, (imm32 & kImm16Mask));
3901 }
3902 CheckBuffer();
3903 jr(at);
3904 // Emit a nop in the branch delay slot if required.
3905 if (bdslot == PROTECT) nop();
3906 }
3907 }
3908 }
3909
3910
BranchAndLinkLong(Label * L,BranchDelaySlot bdslot)3911 void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
3912 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
3913 (!L->is_bound() || is_near_r6(L))) {
3914 BranchAndLinkShortHelperR6(0, L);
3915 } else {
3916 BlockTrampolinePoolScope block_trampoline_pool(this);
3917 uint32_t imm32;
3918 imm32 = jump_address(L);
3919 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3920 uint32_t lui_offset, jic_offset;
3921 UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
3922 {
3923 BlockGrowBufferScope block_buf_growth(this);
3924 // Buffer growth (and relocation) must be blocked for internal
3925 // references until associated instructions are emitted and
3926 // available to be patched.
3927 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3928 lui(at, lui_offset);
3929 jialc(at, jic_offset);
3930 }
3931 CheckBuffer();
3932 } else {
3933 {
3934 BlockGrowBufferScope block_buf_growth(this);
3935 // Buffer growth (and relocation) must be blocked for internal
3936 // references
3937 // until associated instructions are emitted and available to be
3938 // patched.
3939 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3940 lui(at, (imm32 & kHiMask) >> kLuiShift);
3941 ori(at, at, (imm32 & kImm16Mask));
3942 }
3943 CheckBuffer();
3944 jalr(at);
3945 // Emit a nop in the branch delay slot if required.
3946 if (bdslot == PROTECT) nop();
3947 }
3948 }
3949 }
3950
3951
DropAndRet(int drop)3952 void MacroAssembler::DropAndRet(int drop) {
3953 DCHECK(is_int16(drop * kPointerSize));
3954 Ret(USE_DELAY_SLOT);
3955 addiu(sp, sp, drop * kPointerSize);
3956 }
3957
DropAndRet(int drop,Condition cond,Register r1,const Operand & r2)3958 void MacroAssembler::DropAndRet(int drop,
3959 Condition cond,
3960 Register r1,
3961 const Operand& r2) {
3962 // Both Drop and Ret need to be conditional.
3963 Label skip;
3964 if (cond != cc_always) {
3965 Branch(&skip, NegateCondition(cond), r1, r2);
3966 }
3967
3968 Drop(drop);
3969 Ret();
3970
3971 if (cond != cc_always) {
3972 bind(&skip);
3973 }
3974 }
3975
3976
Drop(int count,Condition cond,Register reg,const Operand & op)3977 void MacroAssembler::Drop(int count,
3978 Condition cond,
3979 Register reg,
3980 const Operand& op) {
3981 if (count <= 0) {
3982 return;
3983 }
3984
3985 Label skip;
3986
3987 if (cond != al) {
3988 Branch(&skip, NegateCondition(cond), reg, op);
3989 }
3990
3991 Addu(sp, sp, Operand(count * kPointerSize));
3992
3993 if (cond != al) {
3994 bind(&skip);
3995 }
3996 }
3997
3998
3999
Swap(Register reg1,Register reg2,Register scratch)4000 void MacroAssembler::Swap(Register reg1,
4001 Register reg2,
4002 Register scratch) {
4003 if (scratch.is(no_reg)) {
4004 Xor(reg1, reg1, Operand(reg2));
4005 Xor(reg2, reg2, Operand(reg1));
4006 Xor(reg1, reg1, Operand(reg2));
4007 } else {
4008 mov(scratch, reg1);
4009 mov(reg1, reg2);
4010 mov(reg2, scratch);
4011 }
4012 }
4013
4014
Call(Label * target)4015 void MacroAssembler::Call(Label* target) {
4016 BranchAndLink(target);
4017 }
4018
4019
Push(Handle<Object> handle)4020 void MacroAssembler::Push(Handle<Object> handle) {
4021 li(at, Operand(handle));
4022 push(at);
4023 }
4024
4025
DebugBreak()4026 void MacroAssembler::DebugBreak() {
4027 PrepareCEntryArgs(0);
4028 PrepareCEntryFunction(
4029 ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
4030 CEntryStub ces(isolate(), 1);
4031 DCHECK(AllowThisStubCall(&ces));
4032 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
4033 }
4034
4035
4036 // ---------------------------------------------------------------------------
4037 // Exception handling.
4038
PushStackHandler()4039 void MacroAssembler::PushStackHandler() {
4040 // Adjust this code if not the case.
4041 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
4042 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
4043
4044 // Link the current handler as the next handler.
4045 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
4046 lw(t1, MemOperand(t2));
4047 push(t1);
4048
4049 // Set this new handler as the current one.
4050 sw(sp, MemOperand(t2));
4051 }
4052
4053
PopStackHandler()4054 void MacroAssembler::PopStackHandler() {
4055 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4056 pop(a1);
4057 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
4058 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
4059 sw(a1, MemOperand(at));
4060 }
4061
4062
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)4063 void MacroAssembler::Allocate(int object_size,
4064 Register result,
4065 Register scratch1,
4066 Register scratch2,
4067 Label* gc_required,
4068 AllocationFlags flags) {
4069 DCHECK(object_size <= kMaxRegularHeapObjectSize);
4070 DCHECK((flags & ALLOCATION_FOLDED) == 0);
4071 if (!FLAG_inline_new) {
4072 if (emit_debug_code()) {
4073 // Trash the registers to simulate an allocation failure.
4074 li(result, 0x7091);
4075 li(scratch1, 0x7191);
4076 li(scratch2, 0x7291);
4077 }
4078 jmp(gc_required);
4079 return;
4080 }
4081
4082 DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
4083
4084 // Make object size into bytes.
4085 if ((flags & SIZE_IN_WORDS) != 0) {
4086 object_size *= kPointerSize;
4087 }
4088 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
4089
4090 // Check relative positions of allocation top and limit addresses.
4091 // ARM adds additional checks to make sure the ldm instruction can be
4092 // used. On MIPS we don't have ldm so we don't need additional checks either.
4093 ExternalReference allocation_top =
4094 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4095 ExternalReference allocation_limit =
4096 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4097
4098 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4099 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
4100 DCHECK((limit - top) == kPointerSize);
4101
4102 // Set up allocation top address and allocation limit registers.
4103 Register top_address = scratch1;
4104 // This code stores a temporary value in t9.
4105 Register alloc_limit = t9;
4106 Register result_end = scratch2;
4107 li(top_address, Operand(allocation_top));
4108
4109 if ((flags & RESULT_CONTAINS_TOP) == 0) {
4110 // Load allocation top into result and allocation limit into alloc_limit.
4111 lw(result, MemOperand(top_address));
4112 lw(alloc_limit, MemOperand(top_address, kPointerSize));
4113 } else {
4114 if (emit_debug_code()) {
4115 // Assert that result actually contains top on entry.
4116 lw(alloc_limit, MemOperand(top_address));
4117 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
4118 }
4119 // Load allocation limit. Result already contains allocation top.
4120 lw(alloc_limit, MemOperand(top_address, limit - top));
4121 }
4122
4123 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4124 // Align the next allocation. Storing the filler map without checking top is
4125 // safe in new-space because the limit of the heap is aligned there.
4126 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4127 And(result_end, result, Operand(kDoubleAlignmentMask));
4128 Label aligned;
4129 Branch(&aligned, eq, result_end, Operand(zero_reg));
4130 if ((flags & PRETENURE) != 0) {
4131 Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
4132 }
4133 li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
4134 sw(result_end, MemOperand(result));
4135 Addu(result, result, Operand(kDoubleSize / 2));
4136 bind(&aligned);
4137 }
4138
4139 // Calculate new top and bail out if new space is exhausted. Use result
4140 // to calculate the new top.
4141 Addu(result_end, result, Operand(object_size));
4142 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
4143
4144 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4145 // The top pointer is not updated for allocation folding dominators.
4146 sw(result_end, MemOperand(top_address));
4147 }
4148
4149 // Tag object.
4150 Addu(result, result, Operand(kHeapObjectTag));
4151 }
4152
4153
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4154 void MacroAssembler::Allocate(Register object_size, Register result,
4155 Register result_end, Register scratch,
4156 Label* gc_required, AllocationFlags flags) {
4157 DCHECK((flags & ALLOCATION_FOLDED) == 0);
4158 if (!FLAG_inline_new) {
4159 if (emit_debug_code()) {
4160 // Trash the registers to simulate an allocation failure.
4161 li(result, 0x7091);
4162 li(scratch, 0x7191);
4163 li(result_end, 0x7291);
4164 }
4165 jmp(gc_required);
4166 return;
4167 }
4168
4169 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
4170 // is not specified. Other registers must not overlap.
4171 DCHECK(!AreAliased(object_size, result, scratch, t9, at));
4172 DCHECK(!AreAliased(result_end, result, scratch, t9, at));
4173 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
4174
4175 // Check relative positions of allocation top and limit addresses.
4176 // ARM adds additional checks to make sure the ldm instruction can be
4177 // used. On MIPS we don't have ldm so we don't need additional checks either.
4178 ExternalReference allocation_top =
4179 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4180 ExternalReference allocation_limit =
4181 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4182 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4183 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
4184 DCHECK((limit - top) == kPointerSize);
4185
4186 // Set up allocation top address and allocation limit registers.
4187 Register top_address = scratch;
4188 // This code stores a temporary value in t9.
4189 Register alloc_limit = t9;
4190 li(top_address, Operand(allocation_top));
4191
4192 if ((flags & RESULT_CONTAINS_TOP) == 0) {
4193 // Load allocation top into result and allocation limit into alloc_limit.
4194 lw(result, MemOperand(top_address));
4195 lw(alloc_limit, MemOperand(top_address, kPointerSize));
4196 } else {
4197 if (emit_debug_code()) {
4198 // Assert that result actually contains top on entry.
4199 lw(alloc_limit, MemOperand(top_address));
4200 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
4201 }
4202 // Load allocation limit. Result already contains allocation top.
4203 lw(alloc_limit, MemOperand(top_address, limit - top));
4204 }
4205
4206 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4207 // Align the next allocation. Storing the filler map without checking top is
4208 // safe in new-space because the limit of the heap is aligned there.
4209 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4210 And(result_end, result, Operand(kDoubleAlignmentMask));
4211 Label aligned;
4212 Branch(&aligned, eq, result_end, Operand(zero_reg));
4213 if ((flags & PRETENURE) != 0) {
4214 Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
4215 }
4216 li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
4217 sw(result_end, MemOperand(result));
4218 Addu(result, result, Operand(kDoubleSize / 2));
4219 bind(&aligned);
4220 }
4221
4222 // Calculate new top and bail out if new space is exhausted. Use result
4223 // to calculate the new top. Object size may be in words so a shift is
4224 // required to get the number of bytes.
4225 if ((flags & SIZE_IN_WORDS) != 0) {
4226 Lsa(result_end, result, object_size, kPointerSizeLog2);
4227 } else {
4228 Addu(result_end, result, Operand(object_size));
4229 }
4230
4231 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
4232
4233 // Update allocation top. result temporarily holds the new top.
4234 if (emit_debug_code()) {
4235 And(alloc_limit, result_end, Operand(kObjectAlignmentMask));
4236 Check(eq, kUnalignedAllocationInNewSpace, alloc_limit, Operand(zero_reg));
4237 }
4238
4239 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4240 // The top pointer is not updated for allocation folding dominators.
4241 sw(result_end, MemOperand(top_address));
4242 }
4243
4244 // Tag object.
4245 Addu(result, result, Operand(kHeapObjectTag));
4246 }
4247
FastAllocate(int object_size,Register result,Register scratch1,Register scratch2,AllocationFlags flags)4248 void MacroAssembler::FastAllocate(int object_size, Register result,
4249 Register scratch1, Register scratch2,
4250 AllocationFlags flags) {
4251 DCHECK(object_size <= kMaxRegularHeapObjectSize);
4252 DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
4253
4254 // Make object size into bytes.
4255 if ((flags & SIZE_IN_WORDS) != 0) {
4256 object_size *= kPointerSize;
4257 }
4258 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
4259
4260 ExternalReference allocation_top =
4261 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4262
4263 // Set up allocation top address and allocation limit registers.
4264 Register top_address = scratch1;
4265 // This code stores a temporary value in t9.
4266 Register result_end = scratch2;
4267 li(top_address, Operand(allocation_top));
4268 lw(result, MemOperand(top_address));
4269
4270 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4271 // Align the next allocation. Storing the filler map without checking top is
4272 // safe in new-space because the limit of the heap is aligned there.
4273 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4274 And(result_end, result, Operand(kDoubleAlignmentMask));
4275 Label aligned;
4276 Branch(&aligned, eq, result_end, Operand(zero_reg));
4277 li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
4278 sw(result_end, MemOperand(result));
4279 Addu(result, result, Operand(kDoubleSize / 2));
4280 bind(&aligned);
4281 }
4282
4283 Addu(result_end, result, Operand(object_size));
4284
4285 // The top pointer is not updated for allocation folding dominators.
4286 sw(result_end, MemOperand(top_address));
4287
4288 Addu(result, result, Operand(kHeapObjectTag));
4289 }
4290
FastAllocate(Register object_size,Register result,Register result_end,Register scratch,AllocationFlags flags)4291 void MacroAssembler::FastAllocate(Register object_size, Register result,
4292 Register result_end, Register scratch,
4293 AllocationFlags flags) {
4294 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
4295 // is not specified. Other registers must not overlap.
4296 DCHECK(!AreAliased(object_size, result, scratch, t9, at));
4297 DCHECK(!AreAliased(result_end, result, scratch, t9, at));
4298 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
4299
4300 ExternalReference allocation_top =
4301 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4302
4303 // Set up allocation top address and allocation limit registers.
4304 Register top_address = scratch;
4305 // This code stores a temporary value in t9.
4306 li(top_address, Operand(allocation_top));
4307 lw(result, MemOperand(top_address));
4308
4309 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4310 // Align the next allocation. Storing the filler map without checking top is
4311 // safe in new-space because the limit of the heap is aligned there.
4312 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4313 And(result_end, result, Operand(kDoubleAlignmentMask));
4314 Label aligned;
4315 Branch(&aligned, eq, result_end, Operand(zero_reg));
4316 li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
4317 sw(result_end, MemOperand(result));
4318 Addu(result, result, Operand(kDoubleSize / 2));
4319 bind(&aligned);
4320 }
4321
4322 // Calculate new top and bail out if new space is exhausted. Use result
4323 // to calculate the new top. Object size may be in words so a shift is
4324 // required to get the number of bytes.
4325 if ((flags & SIZE_IN_WORDS) != 0) {
4326 Lsa(result_end, result, object_size, kPointerSizeLog2);
4327 } else {
4328 Addu(result_end, result, Operand(object_size));
4329 }
4330
4331 // The top pointer is not updated for allocation folding dominators.
4332 sw(result_end, MemOperand(top_address));
4333
4334 Addu(result, result, Operand(kHeapObjectTag));
4335 }
4336
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4337 void MacroAssembler::AllocateTwoByteString(Register result,
4338 Register length,
4339 Register scratch1,
4340 Register scratch2,
4341 Register scratch3,
4342 Label* gc_required) {
4343 // Calculate the number of bytes needed for the characters in the string while
4344 // observing object alignment.
4345 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4346 sll(scratch1, length, 1); // Length in bytes, not chars.
4347 addiu(scratch1, scratch1,
4348 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
4349 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4350
4351 // Allocate two-byte string in new space.
4352 Allocate(scratch1, result, scratch2, scratch3, gc_required,
4353 NO_ALLOCATION_FLAGS);
4354
4355 // Set the map, length and hash field.
4356 InitializeNewString(result,
4357 length,
4358 Heap::kStringMapRootIndex,
4359 scratch1,
4360 scratch2);
4361 }
4362
4363
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4364 void MacroAssembler::AllocateOneByteString(Register result, Register length,
4365 Register scratch1, Register scratch2,
4366 Register scratch3,
4367 Label* gc_required) {
4368 // Calculate the number of bytes needed for the characters in the string
4369 // while observing object alignment.
4370 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4371 DCHECK(kCharSize == 1);
4372 addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
4373 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4374
4375 // Allocate one-byte string in new space.
4376 Allocate(scratch1, result, scratch2, scratch3, gc_required,
4377 NO_ALLOCATION_FLAGS);
4378
4379 // Set the map, length and hash field.
4380 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
4381 scratch1, scratch2);
4382 }
4383
4384
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4385 void MacroAssembler::AllocateTwoByteConsString(Register result,
4386 Register length,
4387 Register scratch1,
4388 Register scratch2,
4389 Label* gc_required) {
4390 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4391 NO_ALLOCATION_FLAGS);
4392 InitializeNewString(result,
4393 length,
4394 Heap::kConsStringMapRootIndex,
4395 scratch1,
4396 scratch2);
4397 }
4398
4399
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4400 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
4401 Register scratch1,
4402 Register scratch2,
4403 Label* gc_required) {
4404 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4405 NO_ALLOCATION_FLAGS);
4406
4407 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
4408 scratch1, scratch2);
4409 }
4410
4411
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4412 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4413 Register length,
4414 Register scratch1,
4415 Register scratch2,
4416 Label* gc_required) {
4417 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4418 NO_ALLOCATION_FLAGS);
4419
4420 InitializeNewString(result,
4421 length,
4422 Heap::kSlicedStringMapRootIndex,
4423 scratch1,
4424 scratch2);
4425 }
4426
4427
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4428 void MacroAssembler::AllocateOneByteSlicedString(Register result,
4429 Register length,
4430 Register scratch1,
4431 Register scratch2,
4432 Label* gc_required) {
4433 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4434 NO_ALLOCATION_FLAGS);
4435
4436 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
4437 scratch1, scratch2);
4438 }
4439
4440
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)4441 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
4442 Label* not_unique_name) {
4443 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4444 Label succeed;
4445 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4446 Branch(&succeed, eq, at, Operand(zero_reg));
4447 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
4448
4449 bind(&succeed);
4450 }
4451
4452
4453 // Allocates a heap number or jumps to the label if the young space is full and
4454 // a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * need_gc,MutableMode mode)4455 void MacroAssembler::AllocateHeapNumber(Register result,
4456 Register scratch1,
4457 Register scratch2,
4458 Register heap_number_map,
4459 Label* need_gc,
4460 MutableMode mode) {
4461 // Allocate an object in the heap for the heap number and tag it as a heap
4462 // object.
4463 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
4464 NO_ALLOCATION_FLAGS);
4465
4466 Heap::RootListIndex map_index = mode == MUTABLE
4467 ? Heap::kMutableHeapNumberMapRootIndex
4468 : Heap::kHeapNumberMapRootIndex;
4469 AssertIsRoot(heap_number_map, map_index);
4470
4471 // Store heap number map in the allocated object.
4472 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
4473 }
4474
4475
AllocateHeapNumberWithValue(Register result,FPURegister value,Register scratch1,Register scratch2,Label * gc_required)4476 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
4477 FPURegister value,
4478 Register scratch1,
4479 Register scratch2,
4480 Label* gc_required) {
4481 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
4482 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
4483 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
4484 }
4485
4486
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)4487 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
4488 Register value, Register scratch1,
4489 Register scratch2, Label* gc_required) {
4490 DCHECK(!result.is(constructor));
4491 DCHECK(!result.is(scratch1));
4492 DCHECK(!result.is(scratch2));
4493 DCHECK(!result.is(value));
4494
4495 // Allocate JSValue in new space.
4496 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
4497 NO_ALLOCATION_FLAGS);
4498
4499 // Initialize the JSValue.
4500 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
4501 sw(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
4502 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
4503 sw(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
4504 sw(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
4505 sw(value, FieldMemOperand(result, JSValue::kValueOffset));
4506 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
4507 }
4508
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)4509 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
4510 Register end_address,
4511 Register filler) {
4512 Label loop, entry;
4513 Branch(&entry);
4514 bind(&loop);
4515 sw(filler, MemOperand(current_address));
4516 Addu(current_address, current_address, kPointerSize);
4517 bind(&entry);
4518 Branch(&loop, ult, current_address, Operand(end_address));
4519 }
4520
CheckFastObjectElements(Register map,Register scratch,Label * fail)4521 void MacroAssembler::CheckFastObjectElements(Register map,
4522 Register scratch,
4523 Label* fail) {
4524 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4525 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4526 STATIC_ASSERT(FAST_ELEMENTS == 2);
4527 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4528 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4529 Branch(fail, ls, scratch,
4530 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4531 Branch(fail, hi, scratch,
4532 Operand(Map::kMaximumBitField2FastHoleyElementValue));
4533 }
4534
4535
CheckFastSmiElements(Register map,Register scratch,Label * fail)4536 void MacroAssembler::CheckFastSmiElements(Register map,
4537 Register scratch,
4538 Label* fail) {
4539 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4540 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4541 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4542 Branch(fail, hi, scratch,
4543 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4544 }
4545
4546
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,Register scratch2,Register scratch3,Label * fail,int elements_offset)4547 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
4548 Register key_reg,
4549 Register elements_reg,
4550 Register scratch1,
4551 Register scratch2,
4552 Register scratch3,
4553 Label* fail,
4554 int elements_offset) {
4555 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2,
4556 scratch3));
4557 Label smi_value, done;
4558
4559 // Handle smi values specially.
4560 JumpIfSmi(value_reg, &smi_value);
4561
4562 // Ensure that the object is a heap number
4563 CheckMap(value_reg,
4564 scratch1,
4565 Heap::kHeapNumberMapRootIndex,
4566 fail,
4567 DONT_DO_SMI_CHECK);
4568
4569 // Double value, turn potential sNaN into qNan.
4570 DoubleRegister double_result = f0;
4571 DoubleRegister double_scratch = f2;
4572
4573 ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
4574 Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
4575 FPUCanonicalizeNaN(double_result, double_result);
4576
4577 bind(&smi_value);
4578 Register untagged_value = scratch2;
4579 SmiUntag(untagged_value, value_reg);
4580 mtc1(untagged_value, double_scratch);
4581 cvt_d_w(double_result, double_scratch);
4582
4583 bind(&done);
4584 Addu(scratch1, elements_reg,
4585 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
4586 elements_offset));
4587 Lsa(scratch1, scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
4588 // scratch1 is now effective address of the double element
4589 sdc1(double_result, MemOperand(scratch1, 0));
4590 }
4591
CompareMapAndBranch(Register obj,Register scratch,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)4592 void MacroAssembler::CompareMapAndBranch(Register obj,
4593 Register scratch,
4594 Handle<Map> map,
4595 Label* early_success,
4596 Condition cond,
4597 Label* branch_to) {
4598 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4599 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
4600 }
4601
4602
CompareMapAndBranch(Register obj_map,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)4603 void MacroAssembler::CompareMapAndBranch(Register obj_map,
4604 Handle<Map> map,
4605 Label* early_success,
4606 Condition cond,
4607 Label* branch_to) {
4608 Branch(branch_to, cond, obj_map, Operand(map));
4609 }
4610
4611
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)4612 void MacroAssembler::CheckMap(Register obj,
4613 Register scratch,
4614 Handle<Map> map,
4615 Label* fail,
4616 SmiCheckType smi_check_type) {
4617 if (smi_check_type == DO_SMI_CHECK) {
4618 JumpIfSmi(obj, fail);
4619 }
4620 Label success;
4621 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
4622 bind(&success);
4623 }
4624
4625
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)4626 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
4627 Register scratch2, Handle<WeakCell> cell,
4628 Handle<Code> success,
4629 SmiCheckType smi_check_type) {
4630 Label fail;
4631 if (smi_check_type == DO_SMI_CHECK) {
4632 JumpIfSmi(obj, &fail);
4633 }
4634 lw(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
4635 GetWeakValue(scratch2, cell);
4636 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
4637 bind(&fail);
4638 }
4639
4640
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)4641 void MacroAssembler::CheckMap(Register obj,
4642 Register scratch,
4643 Heap::RootListIndex index,
4644 Label* fail,
4645 SmiCheckType smi_check_type) {
4646 if (smi_check_type == DO_SMI_CHECK) {
4647 JumpIfSmi(obj, fail);
4648 }
4649 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4650 LoadRoot(at, index);
4651 Branch(fail, ne, scratch, Operand(at));
4652 }
4653
FPUCanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)4654 void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
4655 const DoubleRegister src) {
4656 sub_d(dst, src, kDoubleRegZero);
4657 }
4658
GetWeakValue(Register value,Handle<WeakCell> cell)4659 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
4660 li(value, Operand(cell));
4661 lw(value, FieldMemOperand(value, WeakCell::kValueOffset));
4662 }
4663
4664
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)4665 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
4666 Label* miss) {
4667 GetWeakValue(value, cell);
4668 JumpIfSmi(value, miss);
4669 }
4670
4671
MovFromFloatResult(DoubleRegister dst)4672 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
4673 if (IsMipsSoftFloatABI) {
4674 if (kArchEndian == kLittle) {
4675 Move(dst, v0, v1);
4676 } else {
4677 Move(dst, v1, v0);
4678 }
4679 } else {
4680 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
4681 }
4682 }
4683
4684
MovFromFloatParameter(DoubleRegister dst)4685 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
4686 if (IsMipsSoftFloatABI) {
4687 if (kArchEndian == kLittle) {
4688 Move(dst, a0, a1);
4689 } else {
4690 Move(dst, a1, a0);
4691 }
4692 } else {
4693 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
4694 }
4695 }
4696
4697
MovToFloatParameter(DoubleRegister src)4698 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4699 if (!IsMipsSoftFloatABI) {
4700 Move(f12, src);
4701 } else {
4702 if (kArchEndian == kLittle) {
4703 Move(a0, a1, src);
4704 } else {
4705 Move(a1, a0, src);
4706 }
4707 }
4708 }
4709
4710
MovToFloatResult(DoubleRegister src)4711 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4712 if (!IsMipsSoftFloatABI) {
4713 Move(f0, src);
4714 } else {
4715 if (kArchEndian == kLittle) {
4716 Move(v0, v1, src);
4717 } else {
4718 Move(v1, v0, src);
4719 }
4720 }
4721 }
4722
4723
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)4724 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4725 DoubleRegister src2) {
4726 if (!IsMipsSoftFloatABI) {
4727 if (src2.is(f12)) {
4728 DCHECK(!src1.is(f14));
4729 Move(f14, src2);
4730 Move(f12, src1);
4731 } else {
4732 Move(f12, src1);
4733 Move(f14, src2);
4734 }
4735 } else {
4736 if (kArchEndian == kLittle) {
4737 Move(a0, a1, src1);
4738 Move(a2, a3, src2);
4739 } else {
4740 Move(a1, a0, src1);
4741 Move(a3, a2, src2);
4742 }
4743 }
4744 }
4745
4746
4747 // -----------------------------------------------------------------------------
4748 // JavaScript invokes.
4749
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)4750 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
4751 Register caller_args_count_reg,
4752 Register scratch0, Register scratch1) {
4753 #if DEBUG
4754 if (callee_args_count.is_reg()) {
4755 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
4756 scratch1));
4757 } else {
4758 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
4759 }
4760 #endif
4761
4762 // Calculate the end of destination area where we will put the arguments
4763 // after we drop current frame. We add kPointerSize to count the receiver
4764 // argument which is not included into formal parameters count.
4765 Register dst_reg = scratch0;
4766 Lsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
4767 Addu(dst_reg, dst_reg,
4768 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
4769
4770 Register src_reg = caller_args_count_reg;
4771 // Calculate the end of source area. +kPointerSize is for the receiver.
4772 if (callee_args_count.is_reg()) {
4773 Lsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
4774 Addu(src_reg, src_reg, Operand(kPointerSize));
4775 } else {
4776 Addu(src_reg, sp,
4777 Operand((callee_args_count.immediate() + 1) * kPointerSize));
4778 }
4779
4780 if (FLAG_debug_code) {
4781 Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
4782 }
4783
4784 // Restore caller's frame pointer and return address now as they will be
4785 // overwritten by the copying loop.
4786 lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
4787 lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4788
4789 // Now copy callee arguments to the caller frame going backwards to avoid
4790 // callee arguments corruption (source and destination areas could overlap).
4791
4792 // Both src_reg and dst_reg are pointing to the word after the one to copy,
4793 // so they must be pre-decremented in the loop.
4794 Register tmp_reg = scratch1;
4795 Label loop, entry;
4796 Branch(&entry);
4797 bind(&loop);
4798 Subu(src_reg, src_reg, Operand(kPointerSize));
4799 Subu(dst_reg, dst_reg, Operand(kPointerSize));
4800 lw(tmp_reg, MemOperand(src_reg));
4801 sw(tmp_reg, MemOperand(dst_reg));
4802 bind(&entry);
4803 Branch(&loop, ne, sp, Operand(src_reg));
4804
4805 // Leave current frame.
4806 mov(sp, dst_reg);
4807 }
4808
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)4809 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4810 const ParameterCount& actual,
4811 Label* done,
4812 bool* definitely_mismatches,
4813 InvokeFlag flag,
4814 const CallWrapper& call_wrapper) {
4815 bool definitely_matches = false;
4816 *definitely_mismatches = false;
4817 Label regular_invoke;
4818
4819 // Check whether the expected and actual arguments count match. If not,
4820 // setup registers according to contract with ArgumentsAdaptorTrampoline:
4821 // a0: actual arguments count
4822 // a1: function (passed through to callee)
4823 // a2: expected arguments count
4824
4825 // The code below is made a lot easier because the calling code already sets
4826 // up actual and expected registers according to the contract if values are
4827 // passed in registers.
4828 DCHECK(actual.is_immediate() || actual.reg().is(a0));
4829 DCHECK(expected.is_immediate() || expected.reg().is(a2));
4830
4831 if (expected.is_immediate()) {
4832 DCHECK(actual.is_immediate());
4833 li(a0, Operand(actual.immediate()));
4834 if (expected.immediate() == actual.immediate()) {
4835 definitely_matches = true;
4836 } else {
4837 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4838 if (expected.immediate() == sentinel) {
4839 // Don't worry about adapting arguments for builtins that
4840 // don't want that done. Skip adaption code by making it look
4841 // like we have a match between expected and actual number of
4842 // arguments.
4843 definitely_matches = true;
4844 } else {
4845 *definitely_mismatches = true;
4846 li(a2, Operand(expected.immediate()));
4847 }
4848 }
4849 } else if (actual.is_immediate()) {
4850 li(a0, Operand(actual.immediate()));
4851 Branch(®ular_invoke, eq, expected.reg(), Operand(a0));
4852 } else {
4853 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
4854 }
4855
4856 if (!definitely_matches) {
4857 Handle<Code> adaptor =
4858 isolate()->builtins()->ArgumentsAdaptorTrampoline();
4859 if (flag == CALL_FUNCTION) {
4860 call_wrapper.BeforeCall(CallSize(adaptor));
4861 Call(adaptor);
4862 call_wrapper.AfterCall();
4863 if (!*definitely_mismatches) {
4864 Branch(done);
4865 }
4866 } else {
4867 Jump(adaptor, RelocInfo::CODE_TARGET);
4868 }
4869 bind(®ular_invoke);
4870 }
4871 }
4872
4873
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)4874 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
4875 const ParameterCount& expected,
4876 const ParameterCount& actual) {
4877 Label skip_flooding;
4878 ExternalReference last_step_action =
4879 ExternalReference::debug_last_step_action_address(isolate());
4880 STATIC_ASSERT(StepFrame > StepIn);
4881 li(t0, Operand(last_step_action));
4882 lb(t0, MemOperand(t0));
4883 Branch(&skip_flooding, lt, t0, Operand(StepIn));
4884 {
4885 FrameScope frame(this,
4886 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
4887 if (expected.is_reg()) {
4888 SmiTag(expected.reg());
4889 Push(expected.reg());
4890 }
4891 if (actual.is_reg()) {
4892 SmiTag(actual.reg());
4893 Push(actual.reg());
4894 }
4895 if (new_target.is_valid()) {
4896 Push(new_target);
4897 }
4898 Push(fun);
4899 Push(fun);
4900 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
4901 Pop(fun);
4902 if (new_target.is_valid()) {
4903 Pop(new_target);
4904 }
4905 if (actual.is_reg()) {
4906 Pop(actual.reg());
4907 SmiUntag(actual.reg());
4908 }
4909 if (expected.is_reg()) {
4910 Pop(expected.reg());
4911 SmiUntag(expected.reg());
4912 }
4913 }
4914 bind(&skip_flooding);
4915 }
4916
4917
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4918 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
4919 const ParameterCount& expected,
4920 const ParameterCount& actual,
4921 InvokeFlag flag,
4922 const CallWrapper& call_wrapper) {
4923 // You can't call a function without a valid frame.
4924 DCHECK(flag == JUMP_FUNCTION || has_frame());
4925 DCHECK(function.is(a1));
4926 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
4927
4928 if (call_wrapper.NeedsDebugStepCheck()) {
4929 FloodFunctionIfStepping(function, new_target, expected, actual);
4930 }
4931
4932 // Clear the new.target register if not given.
4933 if (!new_target.is_valid()) {
4934 LoadRoot(a3, Heap::kUndefinedValueRootIndex);
4935 }
4936
4937 Label done;
4938 bool definitely_mismatches = false;
4939 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
4940 call_wrapper);
4941 if (!definitely_mismatches) {
4942 // We call indirectly through the code field in the function to
4943 // allow recompilation to take effect without changing any of the
4944 // call sites.
4945 Register code = t0;
4946 lw(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4947 if (flag == CALL_FUNCTION) {
4948 call_wrapper.BeforeCall(CallSize(code));
4949 Call(code);
4950 call_wrapper.AfterCall();
4951 } else {
4952 DCHECK(flag == JUMP_FUNCTION);
4953 Jump(code);
4954 }
4955 // Continue here if InvokePrologue does handle the invocation due to
4956 // mismatched parameter counts.
4957 bind(&done);
4958 }
4959 }
4960
4961
InvokeFunction(Register function,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4962 void MacroAssembler::InvokeFunction(Register function,
4963 Register new_target,
4964 const ParameterCount& actual,
4965 InvokeFlag flag,
4966 const CallWrapper& call_wrapper) {
4967 // You can't call a function without a valid frame.
4968 DCHECK(flag == JUMP_FUNCTION || has_frame());
4969
4970 // Contract with called JS functions requires that function is passed in a1.
4971 DCHECK(function.is(a1));
4972 Register expected_reg = a2;
4973 Register temp_reg = t0;
4974
4975 lw(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4976 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4977 lw(expected_reg,
4978 FieldMemOperand(temp_reg,
4979 SharedFunctionInfo::kFormalParameterCountOffset));
4980 sra(expected_reg, expected_reg, kSmiTagSize);
4981
4982 ParameterCount expected(expected_reg);
4983 InvokeFunctionCode(function, new_target, expected, actual, flag,
4984 call_wrapper);
4985 }
4986
4987
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4988 void MacroAssembler::InvokeFunction(Register function,
4989 const ParameterCount& expected,
4990 const ParameterCount& actual,
4991 InvokeFlag flag,
4992 const CallWrapper& call_wrapper) {
4993 // You can't call a function without a valid frame.
4994 DCHECK(flag == JUMP_FUNCTION || has_frame());
4995
4996 // Contract with called JS functions requires that function is passed in a1.
4997 DCHECK(function.is(a1));
4998
4999 // Get the function and setup the context.
5000 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
5001
5002 InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
5003 }
5004
5005
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)5006 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
5007 const ParameterCount& expected,
5008 const ParameterCount& actual,
5009 InvokeFlag flag,
5010 const CallWrapper& call_wrapper) {
5011 li(a1, function);
5012 InvokeFunction(a1, expected, actual, flag, call_wrapper);
5013 }
5014
5015
IsObjectJSStringType(Register object,Register scratch,Label * fail)5016 void MacroAssembler::IsObjectJSStringType(Register object,
5017 Register scratch,
5018 Label* fail) {
5019 DCHECK(kNotStringTag != 0);
5020
5021 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5022 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5023 And(scratch, scratch, Operand(kIsNotStringMask));
5024 Branch(fail, ne, scratch, Operand(zero_reg));
5025 }
5026
5027
IsObjectNameType(Register object,Register scratch,Label * fail)5028 void MacroAssembler::IsObjectNameType(Register object,
5029 Register scratch,
5030 Label* fail) {
5031 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5032 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5033 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
5034 }
5035
5036
5037 // ---------------------------------------------------------------------------
5038 // Support functions.
5039
5040
GetMapConstructor(Register result,Register map,Register temp,Register temp2)5041 void MacroAssembler::GetMapConstructor(Register result, Register map,
5042 Register temp, Register temp2) {
5043 Label done, loop;
5044 lw(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
5045 bind(&loop);
5046 JumpIfSmi(result, &done);
5047 GetObjectType(result, temp, temp2);
5048 Branch(&done, ne, temp2, Operand(MAP_TYPE));
5049 lw(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
5050 Branch(&loop);
5051 bind(&done);
5052 }
5053
5054
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)5055 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
5056 Register scratch, Label* miss) {
5057 // Get the prototype or initial map from the function.
5058 lw(result,
5059 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5060
5061 // If the prototype or initial map is the hole, don't return it and
5062 // simply miss the cache instead. This will allow us to allocate a
5063 // prototype object on-demand in the runtime system.
5064 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
5065 Branch(miss, eq, result, Operand(t8));
5066
5067 // If the function does not have an initial map, we're done.
5068 Label done;
5069 GetObjectType(result, scratch, scratch);
5070 Branch(&done, ne, scratch, Operand(MAP_TYPE));
5071
5072 // Get the prototype from the initial map.
5073 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
5074
5075 // All done.
5076 bind(&done);
5077 }
5078
5079
GetObjectType(Register object,Register map,Register type_reg)5080 void MacroAssembler::GetObjectType(Register object,
5081 Register map,
5082 Register type_reg) {
5083 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
5084 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
5085 }
5086
5087
5088 // -----------------------------------------------------------------------------
5089 // Runtime calls.
5090
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)5091 void MacroAssembler::CallStub(CodeStub* stub,
5092 TypeFeedbackId ast_id,
5093 Condition cond,
5094 Register r1,
5095 const Operand& r2,
5096 BranchDelaySlot bd) {
5097 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
5098 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
5099 cond, r1, r2, bd);
5100 }
5101
5102
TailCallStub(CodeStub * stub,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)5103 void MacroAssembler::TailCallStub(CodeStub* stub,
5104 Condition cond,
5105 Register r1,
5106 const Operand& r2,
5107 BranchDelaySlot bd) {
5108 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
5109 }
5110
5111
AllowThisStubCall(CodeStub * stub)5112 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
5113 return has_frame_ || !stub->SometimesSetsUpAFrame();
5114 }
5115
ObjectToDoubleFPURegister(Register object,FPURegister result,Register scratch1,Register scratch2,Register heap_number_map,Label * not_number,ObjectToDoubleFlags flags)5116 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
5117 FPURegister result,
5118 Register scratch1,
5119 Register scratch2,
5120 Register heap_number_map,
5121 Label* not_number,
5122 ObjectToDoubleFlags flags) {
5123 Label done;
5124 if ((flags & OBJECT_NOT_SMI) == 0) {
5125 Label not_smi;
5126 JumpIfNotSmi(object, ¬_smi);
5127 // Remove smi tag and convert to double.
5128 sra(scratch1, object, kSmiTagSize);
5129 mtc1(scratch1, result);
5130 cvt_d_w(result, result);
5131 Branch(&done);
5132 bind(¬_smi);
5133 }
5134 // Check for heap number and load double value from it.
5135 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
5136 Branch(not_number, ne, scratch1, Operand(heap_number_map));
5137
5138 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
5139 // If exponent is all ones the number is either a NaN or +/-Infinity.
5140 Register exponent = scratch1;
5141 Register mask_reg = scratch2;
5142 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
5143 li(mask_reg, HeapNumber::kExponentMask);
5144
5145 And(exponent, exponent, mask_reg);
5146 Branch(not_number, eq, exponent, Operand(mask_reg));
5147 }
5148 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
5149 bind(&done);
5150 }
5151
5152
SmiToDoubleFPURegister(Register smi,FPURegister value,Register scratch1)5153 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
5154 FPURegister value,
5155 Register scratch1) {
5156 sra(scratch1, smi, kSmiTagSize);
5157 mtc1(scratch1, value);
5158 cvt_d_w(value, value);
5159 }
5160
5161
BranchOvfHelper(MacroAssembler * masm,Register overflow_dst,Label * overflow_label,Label * no_overflow_label)5162 static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
5163 Label* overflow_label,
5164 Label* no_overflow_label) {
5165 DCHECK(overflow_label || no_overflow_label);
5166 if (!overflow_label) {
5167 DCHECK(no_overflow_label);
5168 masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
5169 } else {
5170 masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
5171 if (no_overflow_label) masm->Branch(no_overflow_label);
5172 }
5173 }
5174
5175
AddBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5176 void MacroAssembler::AddBranchOvf(Register dst, Register left,
5177 const Operand& right, Label* overflow_label,
5178 Label* no_overflow_label, Register scratch) {
5179 if (right.is_reg()) {
5180 AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5181 scratch);
5182 } else {
5183 if (IsMipsArchVariant(kMips32r6)) {
5184 Register right_reg = t9;
5185 DCHECK(!left.is(right_reg));
5186 li(right_reg, Operand(right));
5187 AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
5188 } else {
5189 Register overflow_dst = t9;
5190 DCHECK(!dst.is(scratch));
5191 DCHECK(!dst.is(overflow_dst));
5192 DCHECK(!scratch.is(overflow_dst));
5193 DCHECK(!left.is(overflow_dst));
5194 if (dst.is(left)) {
5195 mov(scratch, left); // Preserve left.
5196 Addu(dst, left, right.immediate()); // Left is overwritten.
5197 xor_(scratch, dst, scratch); // Original left.
5198 // Load right since xori takes uint16 as immediate.
5199 Addu(overflow_dst, zero_reg, right);
5200 xor_(overflow_dst, dst, overflow_dst);
5201 and_(overflow_dst, overflow_dst, scratch);
5202 } else {
5203 Addu(dst, left, right.immediate());
5204 xor_(overflow_dst, dst, left);
5205 // Load right since xori takes uint16 as immediate.
5206 Addu(scratch, zero_reg, right);
5207 xor_(scratch, dst, scratch);
5208 and_(overflow_dst, scratch, overflow_dst);
5209 }
5210 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5211 }
5212 }
5213 }
5214
5215
AddBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5216 void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
5217 Label* overflow_label,
5218 Label* no_overflow_label, Register scratch) {
5219 if (IsMipsArchVariant(kMips32r6)) {
5220 if (!overflow_label) {
5221 DCHECK(no_overflow_label);
5222 DCHECK(!dst.is(scratch));
5223 Register left_reg = left.is(dst) ? scratch : left;
5224 Register right_reg = right.is(dst) ? t9 : right;
5225 DCHECK(!dst.is(left_reg));
5226 DCHECK(!dst.is(right_reg));
5227 Move(left_reg, left);
5228 Move(right_reg, right);
5229 addu(dst, left, right);
5230 Bnvc(left_reg, right_reg, no_overflow_label);
5231 } else {
5232 Bovc(left, right, overflow_label);
5233 addu(dst, left, right);
5234 if (no_overflow_label) bc(no_overflow_label);
5235 }
5236 } else {
5237 Register overflow_dst = t9;
5238 DCHECK(!dst.is(scratch));
5239 DCHECK(!dst.is(overflow_dst));
5240 DCHECK(!scratch.is(overflow_dst));
5241 DCHECK(!left.is(overflow_dst));
5242 DCHECK(!right.is(overflow_dst));
5243 DCHECK(!left.is(scratch));
5244 DCHECK(!right.is(scratch));
5245
5246 if (left.is(right) && dst.is(left)) {
5247 mov(overflow_dst, right);
5248 right = overflow_dst;
5249 }
5250
5251 if (dst.is(left)) {
5252 mov(scratch, left); // Preserve left.
5253 addu(dst, left, right); // Left is overwritten.
5254 xor_(scratch, dst, scratch); // Original left.
5255 xor_(overflow_dst, dst, right);
5256 and_(overflow_dst, overflow_dst, scratch);
5257 } else if (dst.is(right)) {
5258 mov(scratch, right); // Preserve right.
5259 addu(dst, left, right); // Right is overwritten.
5260 xor_(scratch, dst, scratch); // Original right.
5261 xor_(overflow_dst, dst, left);
5262 and_(overflow_dst, overflow_dst, scratch);
5263 } else {
5264 addu(dst, left, right);
5265 xor_(overflow_dst, dst, left);
5266 xor_(scratch, dst, right);
5267 and_(overflow_dst, scratch, overflow_dst);
5268 }
5269 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5270 }
5271 }
5272
5273
SubBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5274 void MacroAssembler::SubBranchOvf(Register dst, Register left,
5275 const Operand& right, Label* overflow_label,
5276 Label* no_overflow_label, Register scratch) {
5277 DCHECK(overflow_label || no_overflow_label);
5278 if (right.is_reg()) {
5279 SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5280 scratch);
5281 } else {
5282 Register overflow_dst = t9;
5283 DCHECK(!dst.is(scratch));
5284 DCHECK(!dst.is(overflow_dst));
5285 DCHECK(!scratch.is(overflow_dst));
5286 DCHECK(!left.is(overflow_dst));
5287 DCHECK(!left.is(scratch));
5288 if (dst.is(left)) {
5289 mov(scratch, left); // Preserve left.
5290 Subu(dst, left, right.immediate()); // Left is overwritten.
5291 // Load right since xori takes uint16 as immediate.
5292 Addu(overflow_dst, zero_reg, right);
5293 xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
5294 xor_(scratch, dst, scratch); // scratch is original left.
5295 and_(overflow_dst, scratch, overflow_dst);
5296 } else {
5297 Subu(dst, left, right);
5298 xor_(overflow_dst, dst, left);
5299 // Load right since xori takes uint16 as immediate.
5300 Addu(scratch, zero_reg, right);
5301 xor_(scratch, left, scratch);
5302 and_(overflow_dst, scratch, overflow_dst);
5303 }
5304 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5305 }
5306 }
5307
5308
SubBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5309 void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
5310 Label* overflow_label,
5311 Label* no_overflow_label, Register scratch) {
5312 DCHECK(overflow_label || no_overflow_label);
5313 Register overflow_dst = t9;
5314 DCHECK(!dst.is(scratch));
5315 DCHECK(!dst.is(overflow_dst));
5316 DCHECK(!scratch.is(overflow_dst));
5317 DCHECK(!overflow_dst.is(left));
5318 DCHECK(!overflow_dst.is(right));
5319 DCHECK(!scratch.is(left));
5320 DCHECK(!scratch.is(right));
5321
5322 // This happens with some crankshaft code. Since Subu works fine if
5323 // left == right, let's not make that restriction here.
5324 if (left.is(right)) {
5325 mov(dst, zero_reg);
5326 if (no_overflow_label) {
5327 Branch(no_overflow_label);
5328 }
5329 }
5330
5331 if (dst.is(left)) {
5332 mov(scratch, left); // Preserve left.
5333 subu(dst, left, right); // Left is overwritten.
5334 xor_(overflow_dst, dst, scratch); // scratch is original left.
5335 xor_(scratch, scratch, right); // scratch is original left.
5336 and_(overflow_dst, scratch, overflow_dst);
5337 } else if (dst.is(right)) {
5338 mov(scratch, right); // Preserve right.
5339 subu(dst, left, right); // Right is overwritten.
5340 xor_(overflow_dst, dst, left);
5341 xor_(scratch, left, scratch); // Original right.
5342 and_(overflow_dst, scratch, overflow_dst);
5343 } else {
5344 subu(dst, left, right);
5345 xor_(overflow_dst, dst, left);
5346 xor_(scratch, left, right);
5347 and_(overflow_dst, scratch, overflow_dst);
5348 }
5349 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5350 }
5351
BranchOvfHelperMult(MacroAssembler * masm,Register overflow_dst,Label * overflow_label,Label * no_overflow_label)5352 static inline void BranchOvfHelperMult(MacroAssembler* masm,
5353 Register overflow_dst,
5354 Label* overflow_label,
5355 Label* no_overflow_label) {
5356 DCHECK(overflow_label || no_overflow_label);
5357 if (!overflow_label) {
5358 DCHECK(no_overflow_label);
5359 masm->Branch(no_overflow_label, eq, overflow_dst, Operand(zero_reg));
5360 } else {
5361 masm->Branch(overflow_label, ne, overflow_dst, Operand(zero_reg));
5362 if (no_overflow_label) masm->Branch(no_overflow_label);
5363 }
5364 }
5365
MulBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5366 void MacroAssembler::MulBranchOvf(Register dst, Register left,
5367 const Operand& right, Label* overflow_label,
5368 Label* no_overflow_label, Register scratch) {
5369 DCHECK(overflow_label || no_overflow_label);
5370 if (right.is_reg()) {
5371 MulBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5372 scratch);
5373 } else {
5374 Register overflow_dst = t9;
5375 DCHECK(!dst.is(scratch));
5376 DCHECK(!dst.is(overflow_dst));
5377 DCHECK(!scratch.is(overflow_dst));
5378 DCHECK(!left.is(overflow_dst));
5379 DCHECK(!left.is(scratch));
5380
5381 Mul(overflow_dst, dst, left, right.immediate());
5382 sra(scratch, dst, 31);
5383 xor_(overflow_dst, overflow_dst, scratch);
5384
5385 BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
5386 }
5387 }
5388
MulBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5389 void MacroAssembler::MulBranchOvf(Register dst, Register left, Register right,
5390 Label* overflow_label,
5391 Label* no_overflow_label, Register scratch) {
5392 DCHECK(overflow_label || no_overflow_label);
5393 Register overflow_dst = t9;
5394 DCHECK(!dst.is(scratch));
5395 DCHECK(!dst.is(overflow_dst));
5396 DCHECK(!scratch.is(overflow_dst));
5397 DCHECK(!overflow_dst.is(left));
5398 DCHECK(!overflow_dst.is(right));
5399 DCHECK(!scratch.is(left));
5400 DCHECK(!scratch.is(right));
5401
5402 if (IsMipsArchVariant(kMips32r6) && dst.is(right)) {
5403 mov(scratch, right);
5404 Mul(overflow_dst, dst, left, scratch);
5405 sra(scratch, dst, 31);
5406 xor_(overflow_dst, overflow_dst, scratch);
5407 } else {
5408 Mul(overflow_dst, dst, left, right);
5409 sra(scratch, dst, 31);
5410 xor_(overflow_dst, overflow_dst, scratch);
5411 }
5412
5413 BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
5414 }
5415
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles,BranchDelaySlot bd)5416 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
5417 SaveFPRegsMode save_doubles,
5418 BranchDelaySlot bd) {
5419 // All parameters are on the stack. v0 has the return value after call.
5420
5421 // If the expected number of arguments of the runtime function is
5422 // constant, we check that the actual number of arguments match the
5423 // expectation.
5424 CHECK(f->nargs < 0 || f->nargs == num_arguments);
5425
5426 // TODO(1236192): Most runtime routines don't need the number of
5427 // arguments passed in because it is constant. At some point we
5428 // should remove this need and make the runtime routine entry code
5429 // smarter.
5430 PrepareCEntryArgs(num_arguments);
5431 PrepareCEntryFunction(ExternalReference(f, isolate()));
5432 CEntryStub stub(isolate(), 1, save_doubles);
5433 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5434 }
5435
5436
CallExternalReference(const ExternalReference & ext,int num_arguments,BranchDelaySlot bd)5437 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
5438 int num_arguments,
5439 BranchDelaySlot bd) {
5440 PrepareCEntryArgs(num_arguments);
5441 PrepareCEntryFunction(ext);
5442
5443 CEntryStub stub(isolate(), 1);
5444 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5445 }
5446
5447
TailCallRuntime(Runtime::FunctionId fid)5448 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
5449 const Runtime::Function* function = Runtime::FunctionForId(fid);
5450 DCHECK_EQ(1, function->result_size);
5451 if (function->nargs >= 0) {
5452 PrepareCEntryArgs(function->nargs);
5453 }
5454 JumpToExternalReference(ExternalReference(fid, isolate()));
5455 }
5456
JumpToExternalReference(const ExternalReference & builtin,BranchDelaySlot bd,bool builtin_exit_frame)5457 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
5458 BranchDelaySlot bd,
5459 bool builtin_exit_frame) {
5460 PrepareCEntryFunction(builtin);
5461 CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
5462 builtin_exit_frame);
5463 Jump(stub.GetCode(),
5464 RelocInfo::CODE_TARGET,
5465 al,
5466 zero_reg,
5467 Operand(zero_reg),
5468 bd);
5469 }
5470
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5471 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
5472 Register scratch1, Register scratch2) {
5473 if (FLAG_native_code_counters && counter->Enabled()) {
5474 li(scratch1, Operand(value));
5475 li(scratch2, Operand(ExternalReference(counter)));
5476 sw(scratch1, MemOperand(scratch2));
5477 }
5478 }
5479
5480
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5481 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
5482 Register scratch1, Register scratch2) {
5483 DCHECK(value > 0);
5484 if (FLAG_native_code_counters && counter->Enabled()) {
5485 li(scratch2, Operand(ExternalReference(counter)));
5486 lw(scratch1, MemOperand(scratch2));
5487 Addu(scratch1, scratch1, Operand(value));
5488 sw(scratch1, MemOperand(scratch2));
5489 }
5490 }
5491
5492
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5493 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
5494 Register scratch1, Register scratch2) {
5495 DCHECK(value > 0);
5496 if (FLAG_native_code_counters && counter->Enabled()) {
5497 li(scratch2, Operand(ExternalReference(counter)));
5498 lw(scratch1, MemOperand(scratch2));
5499 Subu(scratch1, scratch1, Operand(value));
5500 sw(scratch1, MemOperand(scratch2));
5501 }
5502 }
5503
5504
5505 // -----------------------------------------------------------------------------
5506 // Debugging.
5507
Assert(Condition cc,BailoutReason reason,Register rs,Operand rt)5508 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
5509 Register rs, Operand rt) {
5510 if (emit_debug_code())
5511 Check(cc, reason, rs, rt);
5512 }
5513
5514
AssertFastElements(Register elements)5515 void MacroAssembler::AssertFastElements(Register elements) {
5516 if (emit_debug_code()) {
5517 DCHECK(!elements.is(at));
5518 Label ok;
5519 push(elements);
5520 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
5521 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
5522 Branch(&ok, eq, elements, Operand(at));
5523 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
5524 Branch(&ok, eq, elements, Operand(at));
5525 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
5526 Branch(&ok, eq, elements, Operand(at));
5527 Abort(kJSObjectWithFastElementsMapHasSlowElements);
5528 bind(&ok);
5529 pop(elements);
5530 }
5531 }
5532
5533
Check(Condition cc,BailoutReason reason,Register rs,Operand rt)5534 void MacroAssembler::Check(Condition cc, BailoutReason reason,
5535 Register rs, Operand rt) {
5536 Label L;
5537 Branch(&L, cc, rs, rt);
5538 Abort(reason);
5539 // Will not return here.
5540 bind(&L);
5541 }
5542
5543
Abort(BailoutReason reason)5544 void MacroAssembler::Abort(BailoutReason reason) {
5545 Label abort_start;
5546 bind(&abort_start);
5547 #ifdef DEBUG
5548 const char* msg = GetBailoutReason(reason);
5549 if (msg != NULL) {
5550 RecordComment("Abort message: ");
5551 RecordComment(msg);
5552 }
5553
5554 if (FLAG_trap_on_abort) {
5555 stop(msg);
5556 return;
5557 }
5558 #endif
5559
5560 // Check if Abort() has already been initialized.
5561 DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
5562
5563 Move(a0, Smi::FromInt(static_cast<int>(reason)));
5564
5565 // Disable stub call restrictions to always allow calls to abort.
5566 if (!has_frame_) {
5567 // We don't actually want to generate a pile of code for this, so just
5568 // claim there is a stack frame, without generating one.
5569 FrameScope scope(this, StackFrame::NONE);
5570 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
5571 } else {
5572 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
5573 }
5574 // Will not return here.
5575 if (is_trampoline_pool_blocked()) {
5576 // If the calling code cares about the exact number of
5577 // instructions generated, we insert padding here to keep the size
5578 // of the Abort macro constant.
5579 // Currently in debug mode with debug_code enabled the number of
5580 // generated instructions is 10, so we use this as a maximum value.
5581 static const int kExpectedAbortInstructions = 10;
5582 int abort_instructions = InstructionsGeneratedSince(&abort_start);
5583 DCHECK(abort_instructions <= kExpectedAbortInstructions);
5584 while (abort_instructions++ < kExpectedAbortInstructions) {
5585 nop();
5586 }
5587 }
5588 }
5589
5590
LoadContext(Register dst,int context_chain_length)5591 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
5592 if (context_chain_length > 0) {
5593 // Move up the chain of contexts to the context containing the slot.
5594 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5595 for (int i = 1; i < context_chain_length; i++) {
5596 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5597 }
5598 } else {
5599 // Slot is in the current function context. Move it into the
5600 // destination register in case we store into it (the write barrier
5601 // cannot be allowed to destroy the context in esi).
5602 Move(dst, cp);
5603 }
5604 }
5605
5606
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)5607 void MacroAssembler::LoadTransitionedArrayMapConditional(
5608 ElementsKind expected_kind,
5609 ElementsKind transitioned_kind,
5610 Register map_in_out,
5611 Register scratch,
5612 Label* no_map_match) {
5613 DCHECK(IsFastElementsKind(expected_kind));
5614 DCHECK(IsFastElementsKind(transitioned_kind));
5615
5616 // Check that the function's map is the same as the expected cached map.
5617 lw(scratch, NativeContextMemOperand());
5618 lw(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
5619 Branch(no_map_match, ne, map_in_out, Operand(at));
5620
5621 // Use the transitioned cached map.
5622 lw(map_in_out,
5623 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
5624 }
5625
5626
LoadNativeContextSlot(int index,Register dst)5627 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
5628 lw(dst, NativeContextMemOperand());
5629 lw(dst, ContextMemOperand(dst, index));
5630 }
5631
5632
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)5633 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5634 Register map,
5635 Register scratch) {
5636 // Load the initial map. The global functions all have initial maps.
5637 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5638 if (emit_debug_code()) {
5639 Label ok, fail;
5640 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
5641 Branch(&ok);
5642 bind(&fail);
5643 Abort(kGlobalFunctionsMustHaveInitialMap);
5644 bind(&ok);
5645 }
5646 }
5647
StubPrologue(StackFrame::Type type)5648 void MacroAssembler::StubPrologue(StackFrame::Type type) {
5649 li(at, Operand(Smi::FromInt(type)));
5650 PushCommonFrame(at);
5651 }
5652
5653
Prologue(bool code_pre_aging)5654 void MacroAssembler::Prologue(bool code_pre_aging) {
5655 PredictableCodeSizeScope predictible_code_size_scope(
5656 this, kNoCodeAgeSequenceLength);
5657 // The following three instructions must remain together and unmodified
5658 // for code aging to work properly.
5659 if (code_pre_aging) {
5660 // Pre-age the code.
5661 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
5662 nop(Assembler::CODE_AGE_MARKER_NOP);
5663 // Load the stub address to t9 and call it,
5664 // GetCodeAgeAndParity() extracts the stub address from this instruction.
5665 li(t9,
5666 Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
5667 CONSTANT_SIZE);
5668 nop(); // Prevent jalr to jal optimization.
5669 jalr(t9, a0);
5670 nop(); // Branch delay slot nop.
5671 nop(); // Pad the empty space.
5672 } else {
5673 PushStandardFrame(a1);
5674 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5675 }
5676 }
5677
5678
EmitLoadTypeFeedbackVector(Register vector)5679 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
5680 lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5681 lw(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
5682 lw(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
5683 }
5684
5685
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)5686 void MacroAssembler::EnterFrame(StackFrame::Type type,
5687 bool load_constant_pool_pointer_reg) {
5688 // Out-of-line constant pool not implemented on mips.
5689 UNREACHABLE();
5690 }
5691
5692
EnterFrame(StackFrame::Type type)5693 void MacroAssembler::EnterFrame(StackFrame::Type type) {
5694 int stack_offset, fp_offset;
5695 if (type == StackFrame::INTERNAL) {
5696 stack_offset = -4 * kPointerSize;
5697 fp_offset = 2 * kPointerSize;
5698 } else {
5699 stack_offset = -3 * kPointerSize;
5700 fp_offset = 1 * kPointerSize;
5701 }
5702 addiu(sp, sp, stack_offset);
5703 stack_offset = -stack_offset - kPointerSize;
5704 sw(ra, MemOperand(sp, stack_offset));
5705 stack_offset -= kPointerSize;
5706 sw(fp, MemOperand(sp, stack_offset));
5707 stack_offset -= kPointerSize;
5708 li(t9, Operand(Smi::FromInt(type)));
5709 sw(t9, MemOperand(sp, stack_offset));
5710 if (type == StackFrame::INTERNAL) {
5711 DCHECK_EQ(stack_offset, kPointerSize);
5712 li(t9, Operand(CodeObject()));
5713 sw(t9, MemOperand(sp, 0));
5714 } else {
5715 DCHECK_EQ(stack_offset, 0);
5716 }
5717 // Adjust FP to point to saved FP.
5718 Addu(fp, sp, Operand(fp_offset));
5719 }
5720
5721
LeaveFrame(StackFrame::Type type)5722 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
5723 addiu(sp, fp, 2 * kPointerSize);
5724 lw(ra, MemOperand(fp, 1 * kPointerSize));
5725 lw(fp, MemOperand(fp, 0 * kPointerSize));
5726 }
5727
EnterBuiltinFrame(Register context,Register target,Register argc)5728 void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
5729 Register argc) {
5730 Push(ra, fp);
5731 Move(fp, sp);
5732 Push(context, target, argc);
5733 }
5734
LeaveBuiltinFrame(Register context,Register target,Register argc)5735 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
5736 Register argc) {
5737 Pop(context, target, argc);
5738 Pop(ra, fp);
5739 }
5740
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)5741 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
5742 StackFrame::Type frame_type) {
5743 DCHECK(frame_type == StackFrame::EXIT ||
5744 frame_type == StackFrame::BUILTIN_EXIT);
5745
5746 // Set up the frame structure on the stack.
5747 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
5748 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
5749 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
5750
5751 // This is how the stack will look:
5752 // fp + 2 (==kCallerSPDisplacement) - old stack's end
5753 // [fp + 1 (==kCallerPCOffset)] - saved old ra
5754 // [fp + 0 (==kCallerFPOffset)] - saved old fp
5755 // [fp - 1 StackFrame::EXIT Smi
5756 // [fp - 2 (==kSPOffset)] - sp of the called function
5757 // [fp - 3 (==kCodeOffset)] - CodeObject
5758 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
5759 // new stack (will contain saved ra)
5760
5761 // Save registers and reserve room for saved entry sp and code object.
5762 addiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
5763 sw(ra, MemOperand(sp, 4 * kPointerSize));
5764 sw(fp, MemOperand(sp, 3 * kPointerSize));
5765 li(at, Operand(Smi::FromInt(frame_type)));
5766 sw(at, MemOperand(sp, 2 * kPointerSize));
5767 // Set up new frame pointer.
5768 addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
5769
5770 if (emit_debug_code()) {
5771 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
5772 }
5773
5774 // Accessed from ExitFrame::code_slot.
5775 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
5776 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
5777
5778 // Save the frame pointer and the context in top.
5779 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5780 sw(fp, MemOperand(t8));
5781 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5782 sw(cp, MemOperand(t8));
5783
5784 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
5785 if (save_doubles) {
5786 // The stack must be allign to 0 modulo 8 for stores with sdc1.
5787 DCHECK(kDoubleSize == frame_alignment);
5788 if (frame_alignment > 0) {
5789 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5790 And(sp, sp, Operand(-frame_alignment)); // Align stack.
5791 }
5792 int space = FPURegister::kMaxNumRegisters * kDoubleSize;
5793 Subu(sp, sp, Operand(space));
5794 // Remember: we only need to save every 2nd double FPU value.
5795 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
5796 FPURegister reg = FPURegister::from_code(i);
5797 sdc1(reg, MemOperand(sp, i * kDoubleSize));
5798 }
5799 }
5800
5801 // Reserve place for the return address, stack space and an optional slot
5802 // (used by the DirectCEntryStub to hold the return value if a struct is
5803 // returned) and align the frame preparing for calling the runtime function.
5804 DCHECK(stack_space >= 0);
5805 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
5806 if (frame_alignment > 0) {
5807 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5808 And(sp, sp, Operand(-frame_alignment)); // Align stack.
5809 }
5810
5811 // Set the exit frame sp value to point just before the return address
5812 // location.
5813 addiu(at, sp, kPointerSize);
5814 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
5815 }
5816
5817
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool do_return,bool argument_count_is_length)5818 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
5819 bool restore_context, bool do_return,
5820 bool argument_count_is_length) {
5821 // Optionally restore all double registers.
5822 if (save_doubles) {
5823 // Remember: we only need to restore every 2nd double FPU value.
5824 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
5825 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
5826 FPURegister reg = FPURegister::from_code(i);
5827 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
5828 }
5829 }
5830
5831 // Clear top frame.
5832 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5833 sw(zero_reg, MemOperand(t8));
5834
5835 // Restore current context from top and clear it in debug mode.
5836 if (restore_context) {
5837 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5838 lw(cp, MemOperand(t8));
5839 }
5840 #ifdef DEBUG
5841 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5842 sw(a3, MemOperand(t8));
5843 #endif
5844
5845 // Pop the arguments, restore registers, and return.
5846 mov(sp, fp); // Respect ABI stack constraint.
5847 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
5848 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
5849
5850 if (argument_count.is_valid()) {
5851 if (argument_count_is_length) {
5852 addu(sp, sp, argument_count);
5853 } else {
5854 Lsa(sp, sp, argument_count, kPointerSizeLog2, t8);
5855 }
5856 }
5857
5858 if (do_return) {
5859 Ret(USE_DELAY_SLOT);
5860 // If returning, the instruction in the delay slot will be the addiu below.
5861 }
5862 addiu(sp, sp, 8);
5863 }
5864
5865
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)5866 void MacroAssembler::InitializeNewString(Register string,
5867 Register length,
5868 Heap::RootListIndex map_index,
5869 Register scratch1,
5870 Register scratch2) {
5871 sll(scratch1, length, kSmiTagSize);
5872 LoadRoot(scratch2, map_index);
5873 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
5874 li(scratch1, Operand(String::kEmptyHashField));
5875 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
5876 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
5877 }
5878
5879
ActivationFrameAlignment()5880 int MacroAssembler::ActivationFrameAlignment() {
5881 #if V8_HOST_ARCH_MIPS
5882 // Running on the real platform. Use the alignment as mandated by the local
5883 // environment.
5884 // Note: This will break if we ever start generating snapshots on one Mips
5885 // platform for another Mips platform with a different alignment.
5886 return base::OS::ActivationFrameAlignment();
5887 #else // V8_HOST_ARCH_MIPS
5888 // If we are using the simulator then we should always align to the expected
5889 // alignment. As the simulator is used to generate snapshots we do not know
5890 // if the target platform will need alignment, so this is controlled from a
5891 // flag.
5892 return FLAG_sim_stack_alignment;
5893 #endif // V8_HOST_ARCH_MIPS
5894 }
5895
5896
AssertStackIsAligned()5897 void MacroAssembler::AssertStackIsAligned() {
5898 if (emit_debug_code()) {
5899 const int frame_alignment = ActivationFrameAlignment();
5900 const int frame_alignment_mask = frame_alignment - 1;
5901
5902 if (frame_alignment > kPointerSize) {
5903 Label alignment_as_expected;
5904 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5905 andi(at, sp, frame_alignment_mask);
5906 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5907 // Don't use Check here, as it will call Runtime_Abort re-entering here.
5908 stop("Unexpected stack alignment");
5909 bind(&alignment_as_expected);
5910 }
5911 }
5912 }
5913
5914
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)5915 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5916 Register reg,
5917 Register scratch,
5918 Label* not_power_of_two_or_zero) {
5919 Subu(scratch, reg, Operand(1));
5920 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
5921 scratch, Operand(zero_reg));
5922 and_(at, scratch, reg); // In the delay slot.
5923 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
5924 }
5925
5926
SmiTagCheckOverflow(Register reg,Register overflow)5927 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
5928 DCHECK(!reg.is(overflow));
5929 mov(overflow, reg); // Save original value.
5930 SmiTag(reg);
5931 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
5932 }
5933
5934
SmiTagCheckOverflow(Register dst,Register src,Register overflow)5935 void MacroAssembler::SmiTagCheckOverflow(Register dst,
5936 Register src,
5937 Register overflow) {
5938 if (dst.is(src)) {
5939 // Fall back to slower case.
5940 SmiTagCheckOverflow(dst, overflow);
5941 } else {
5942 DCHECK(!dst.is(src));
5943 DCHECK(!dst.is(overflow));
5944 DCHECK(!src.is(overflow));
5945 SmiTag(dst, src);
5946 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
5947 }
5948 }
5949
5950
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)5951 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5952 Register src,
5953 Label* smi_case) {
5954 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5955 SmiUntag(dst, src);
5956 }
5957
5958
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)5959 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5960 Register src,
5961 Label* non_smi_case) {
5962 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5963 SmiUntag(dst, src);
5964 }
5965
JumpIfSmi(Register value,Label * smi_label,Register scratch,BranchDelaySlot bd)5966 void MacroAssembler::JumpIfSmi(Register value,
5967 Label* smi_label,
5968 Register scratch,
5969 BranchDelaySlot bd) {
5970 DCHECK_EQ(0, kSmiTag);
5971 andi(scratch, value, kSmiTagMask);
5972 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5973 }
5974
JumpIfNotSmi(Register value,Label * not_smi_label,Register scratch,BranchDelaySlot bd)5975 void MacroAssembler::JumpIfNotSmi(Register value,
5976 Label* not_smi_label,
5977 Register scratch,
5978 BranchDelaySlot bd) {
5979 DCHECK_EQ(0, kSmiTag);
5980 andi(scratch, value, kSmiTagMask);
5981 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5982 }
5983
5984
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)5985 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5986 Register reg2,
5987 Label* on_not_both_smi) {
5988 STATIC_ASSERT(kSmiTag == 0);
5989 DCHECK_EQ(1, kSmiTagMask);
5990 or_(at, reg1, reg2);
5991 JumpIfNotSmi(at, on_not_both_smi);
5992 }
5993
5994
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)5995 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5996 Register reg2,
5997 Label* on_either_smi) {
5998 STATIC_ASSERT(kSmiTag == 0);
5999 DCHECK_EQ(1, kSmiTagMask);
6000 // Both Smi tags must be 1 (not Smi).
6001 and_(at, reg1, reg2);
6002 JumpIfSmi(at, on_either_smi);
6003 }
6004
AssertNotNumber(Register object)6005 void MacroAssembler::AssertNotNumber(Register object) {
6006 if (emit_debug_code()) {
6007 STATIC_ASSERT(kSmiTag == 0);
6008 andi(at, object, kSmiTagMask);
6009 Check(ne, kOperandIsANumber, at, Operand(zero_reg));
6010 GetObjectType(object, t8, t8);
6011 Check(ne, kOperandIsNotANumber, t8, Operand(HEAP_NUMBER_TYPE));
6012 }
6013 }
6014
AssertNotSmi(Register object)6015 void MacroAssembler::AssertNotSmi(Register object) {
6016 if (emit_debug_code()) {
6017 STATIC_ASSERT(kSmiTag == 0);
6018 andi(at, object, kSmiTagMask);
6019 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
6020 }
6021 }
6022
6023
AssertSmi(Register object)6024 void MacroAssembler::AssertSmi(Register object) {
6025 if (emit_debug_code()) {
6026 STATIC_ASSERT(kSmiTag == 0);
6027 andi(at, object, kSmiTagMask);
6028 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
6029 }
6030 }
6031
6032
AssertString(Register object)6033 void MacroAssembler::AssertString(Register object) {
6034 if (emit_debug_code()) {
6035 STATIC_ASSERT(kSmiTag == 0);
6036 SmiTst(object, t8);
6037 Check(ne, kOperandIsASmiAndNotAString, t8, Operand(zero_reg));
6038 GetObjectType(object, t8, t8);
6039 Check(lo, kOperandIsNotAString, t8, Operand(FIRST_NONSTRING_TYPE));
6040 }
6041 }
6042
6043
AssertName(Register object)6044 void MacroAssembler::AssertName(Register object) {
6045 if (emit_debug_code()) {
6046 STATIC_ASSERT(kSmiTag == 0);
6047 SmiTst(object, t8);
6048 Check(ne, kOperandIsASmiAndNotAName, t8, Operand(zero_reg));
6049 GetObjectType(object, t8, t8);
6050 Check(le, kOperandIsNotAName, t8, Operand(LAST_NAME_TYPE));
6051 }
6052 }
6053
6054
AssertFunction(Register object)6055 void MacroAssembler::AssertFunction(Register object) {
6056 if (emit_debug_code()) {
6057 STATIC_ASSERT(kSmiTag == 0);
6058 SmiTst(object, t8);
6059 Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
6060 GetObjectType(object, t8, t8);
6061 Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
6062 }
6063 }
6064
6065
AssertBoundFunction(Register object)6066 void MacroAssembler::AssertBoundFunction(Register object) {
6067 if (emit_debug_code()) {
6068 STATIC_ASSERT(kSmiTag == 0);
6069 SmiTst(object, t8);
6070 Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
6071 GetObjectType(object, t8, t8);
6072 Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
6073 }
6074 }
6075
AssertGeneratorObject(Register object)6076 void MacroAssembler::AssertGeneratorObject(Register object) {
6077 if (emit_debug_code()) {
6078 STATIC_ASSERT(kSmiTag == 0);
6079 SmiTst(object, t8);
6080 Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
6081 GetObjectType(object, t8, t8);
6082 Check(eq, kOperandIsNotAGeneratorObject, t8,
6083 Operand(JS_GENERATOR_OBJECT_TYPE));
6084 }
6085 }
6086
AssertReceiver(Register object)6087 void MacroAssembler::AssertReceiver(Register object) {
6088 if (emit_debug_code()) {
6089 STATIC_ASSERT(kSmiTag == 0);
6090 SmiTst(object, t8);
6091 Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
6092 GetObjectType(object, t8, t8);
6093 Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
6094 }
6095 }
6096
6097
AssertUndefinedOrAllocationSite(Register object,Register scratch)6098 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
6099 Register scratch) {
6100 if (emit_debug_code()) {
6101 Label done_checking;
6102 AssertNotSmi(object);
6103 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
6104 Branch(&done_checking, eq, object, Operand(scratch));
6105 lw(t8, FieldMemOperand(object, HeapObject::kMapOffset));
6106 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
6107 Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
6108 bind(&done_checking);
6109 }
6110 }
6111
6112
AssertIsRoot(Register reg,Heap::RootListIndex index)6113 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
6114 if (emit_debug_code()) {
6115 DCHECK(!reg.is(at));
6116 LoadRoot(at, index);
6117 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
6118 }
6119 }
6120
6121
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)6122 void MacroAssembler::JumpIfNotHeapNumber(Register object,
6123 Register heap_number_map,
6124 Register scratch,
6125 Label* on_not_heap_number) {
6126 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
6127 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
6128 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
6129 }
6130
6131
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)6132 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
6133 Register first, Register second, Register scratch1, Register scratch2,
6134 Label* failure) {
6135 // Test that both first and second are sequential one-byte strings.
6136 // Assume that they are non-smis.
6137 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
6138 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
6139 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
6140 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
6141
6142 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
6143 scratch2, failure);
6144 }
6145
6146
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)6147 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
6148 Register second,
6149 Register scratch1,
6150 Register scratch2,
6151 Label* failure) {
6152 // Check that neither is a smi.
6153 STATIC_ASSERT(kSmiTag == 0);
6154 And(scratch1, first, Operand(second));
6155 JumpIfSmi(scratch1, failure);
6156 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
6157 scratch2, failure);
6158 }
6159
6160
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)6161 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
6162 Register first, Register second, Register scratch1, Register scratch2,
6163 Label* failure) {
6164 const int kFlatOneByteStringMask =
6165 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6166 const int kFlatOneByteStringTag =
6167 kStringTag | kOneByteStringTag | kSeqStringTag;
6168 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
6169 andi(scratch1, first, kFlatOneByteStringMask);
6170 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
6171 andi(scratch2, second, kFlatOneByteStringMask);
6172 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
6173 }
6174
6175
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)6176 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
6177 Register scratch,
6178 Label* failure) {
6179 const int kFlatOneByteStringMask =
6180 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6181 const int kFlatOneByteStringTag =
6182 kStringTag | kOneByteStringTag | kSeqStringTag;
6183 And(scratch, type, Operand(kFlatOneByteStringMask));
6184 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
6185 }
6186
6187
6188 static const int kRegisterPassedArguments = 4;
6189
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)6190 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
6191 int num_double_arguments) {
6192 int stack_passed_words = 0;
6193 num_reg_arguments += 2 * num_double_arguments;
6194
6195 // Up to four simple arguments are passed in registers a0..a3.
6196 if (num_reg_arguments > kRegisterPassedArguments) {
6197 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
6198 }
6199 stack_passed_words += kCArgSlotCount;
6200 return stack_passed_words;
6201 }
6202
6203
EmitSeqStringSetCharCheck(Register string,Register index,Register value,Register scratch,uint32_t encoding_mask)6204 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
6205 Register index,
6206 Register value,
6207 Register scratch,
6208 uint32_t encoding_mask) {
6209 Label is_object;
6210 SmiTst(string, at);
6211 Check(ne, kNonObject, at, Operand(zero_reg));
6212
6213 lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
6214 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
6215
6216 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
6217 li(scratch, Operand(encoding_mask));
6218 Check(eq, kUnexpectedStringType, at, Operand(scratch));
6219
6220 // The index is assumed to be untagged coming in, tag it to compare with the
6221 // string length without using a temp register, it is restored at the end of
6222 // this function.
6223 Label index_tag_ok, index_tag_bad;
6224 TrySmiTag(index, scratch, &index_tag_bad);
6225 Branch(&index_tag_ok);
6226 bind(&index_tag_bad);
6227 Abort(kIndexIsTooLarge);
6228 bind(&index_tag_ok);
6229
6230 lw(at, FieldMemOperand(string, String::kLengthOffset));
6231 Check(lt, kIndexIsTooLarge, index, Operand(at));
6232
6233 DCHECK(Smi::kZero == 0);
6234 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
6235
6236 SmiUntag(index, index);
6237 }
6238
6239
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)6240 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6241 int num_double_arguments,
6242 Register scratch) {
6243 int frame_alignment = ActivationFrameAlignment();
6244
6245 // Up to four simple arguments are passed in registers a0..a3.
6246 // Those four arguments must have reserved argument slots on the stack for
6247 // mips, even though those argument slots are not normally used.
6248 // Remaining arguments are pushed on the stack, above (higher address than)
6249 // the argument slots.
6250 int stack_passed_arguments = CalculateStackPassedWords(
6251 num_reg_arguments, num_double_arguments);
6252 if (frame_alignment > kPointerSize) {
6253 // Make stack end at alignment and make room for num_arguments - 4 words
6254 // and the original value of sp.
6255 mov(scratch, sp);
6256 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
6257 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6258 And(sp, sp, Operand(-frame_alignment));
6259 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
6260 } else {
6261 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6262 }
6263 }
6264
6265
PrepareCallCFunction(int num_reg_arguments,Register scratch)6266 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6267 Register scratch) {
6268 PrepareCallCFunction(num_reg_arguments, 0, scratch);
6269 }
6270
6271
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)6272 void MacroAssembler::CallCFunction(ExternalReference function,
6273 int num_reg_arguments,
6274 int num_double_arguments) {
6275 li(t8, Operand(function));
6276 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
6277 }
6278
6279
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)6280 void MacroAssembler::CallCFunction(Register function,
6281 int num_reg_arguments,
6282 int num_double_arguments) {
6283 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
6284 }
6285
6286
CallCFunction(ExternalReference function,int num_arguments)6287 void MacroAssembler::CallCFunction(ExternalReference function,
6288 int num_arguments) {
6289 CallCFunction(function, num_arguments, 0);
6290 }
6291
6292
CallCFunction(Register function,int num_arguments)6293 void MacroAssembler::CallCFunction(Register function,
6294 int num_arguments) {
6295 CallCFunction(function, num_arguments, 0);
6296 }
6297
6298
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)6299 void MacroAssembler::CallCFunctionHelper(Register function,
6300 int num_reg_arguments,
6301 int num_double_arguments) {
6302 DCHECK(has_frame());
6303 // Make sure that the stack is aligned before calling a C function unless
6304 // running in the simulator. The simulator has its own alignment check which
6305 // provides more information.
6306 // The argument stots are presumed to have been set up by
6307 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
6308
6309 #if V8_HOST_ARCH_MIPS
6310 if (emit_debug_code()) {
6311 int frame_alignment = base::OS::ActivationFrameAlignment();
6312 int frame_alignment_mask = frame_alignment - 1;
6313 if (frame_alignment > kPointerSize) {
6314 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6315 Label alignment_as_expected;
6316 And(at, sp, Operand(frame_alignment_mask));
6317 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
6318 // Don't use Check here, as it will call Runtime_Abort possibly
6319 // re-entering here.
6320 stop("Unexpected alignment in CallCFunction");
6321 bind(&alignment_as_expected);
6322 }
6323 }
6324 #endif // V8_HOST_ARCH_MIPS
6325
6326 // Just call directly. The function called cannot cause a GC, or
6327 // allow preemption, so the return address in the link register
6328 // stays correct.
6329
6330 if (!function.is(t9)) {
6331 mov(t9, function);
6332 function = t9;
6333 }
6334
6335 Call(function);
6336
6337 int stack_passed_arguments = CalculateStackPassedWords(
6338 num_reg_arguments, num_double_arguments);
6339
6340 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
6341 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
6342 } else {
6343 Addu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6344 }
6345 }
6346
6347
6348 #undef BRANCH_ARGS_CHECK
6349
6350
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)6351 void MacroAssembler::CheckPageFlag(
6352 Register object,
6353 Register scratch,
6354 int mask,
6355 Condition cc,
6356 Label* condition_met) {
6357 And(scratch, object, Operand(~Page::kPageAlignmentMask));
6358 lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
6359 And(scratch, scratch, Operand(mask));
6360 Branch(condition_met, cc, scratch, Operand(zero_reg));
6361 }
6362
6363
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)6364 void MacroAssembler::JumpIfBlack(Register object,
6365 Register scratch0,
6366 Register scratch1,
6367 Label* on_black) {
6368 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
6369 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6370 }
6371
6372
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)6373 void MacroAssembler::HasColor(Register object,
6374 Register bitmap_scratch,
6375 Register mask_scratch,
6376 Label* has_color,
6377 int first_bit,
6378 int second_bit) {
6379 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
6380 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
6381
6382 GetMarkBits(object, bitmap_scratch, mask_scratch);
6383
6384 Label other_color, word_boundary;
6385 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6386 And(t8, t9, Operand(mask_scratch));
6387 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
6388 // Shift left 1 by adding.
6389 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
6390 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
6391 And(t8, t9, Operand(mask_scratch));
6392 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
6393 jmp(&other_color);
6394
6395 bind(&word_boundary);
6396 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
6397 And(t9, t9, Operand(1));
6398 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
6399 bind(&other_color);
6400 }
6401
6402
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)6403 void MacroAssembler::GetMarkBits(Register addr_reg,
6404 Register bitmap_reg,
6405 Register mask_reg) {
6406 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
6407 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
6408 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
6409 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
6410 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
6411 Lsa(bitmap_reg, bitmap_reg, t8, kPointerSizeLog2, t8);
6412 li(t8, Operand(1));
6413 sllv(mask_reg, t8, mask_reg);
6414 }
6415
6416
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)6417 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
6418 Register mask_scratch, Register load_scratch,
6419 Label* value_is_white) {
6420 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
6421 GetMarkBits(value, bitmap_scratch, mask_scratch);
6422
6423 // If the value is black or grey we don't need to do anything.
6424 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
6425 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6426 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
6427 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
6428
6429 // Since both black and grey have a 1 in the first position and white does
6430 // not have a 1 there we only need to check one bit.
6431 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6432 And(t8, mask_scratch, load_scratch);
6433 Branch(value_is_white, eq, t8, Operand(zero_reg));
6434 }
6435
6436
LoadInstanceDescriptors(Register map,Register descriptors)6437 void MacroAssembler::LoadInstanceDescriptors(Register map,
6438 Register descriptors) {
6439 lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
6440 }
6441
6442
NumberOfOwnDescriptors(Register dst,Register map)6443 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
6444 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
6445 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
6446 }
6447
6448
EnumLength(Register dst,Register map)6449 void MacroAssembler::EnumLength(Register dst, Register map) {
6450 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
6451 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
6452 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
6453 SmiTag(dst);
6454 }
6455
6456
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)6457 void MacroAssembler::LoadAccessor(Register dst, Register holder,
6458 int accessor_index,
6459 AccessorComponent accessor) {
6460 lw(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
6461 LoadInstanceDescriptors(dst, dst);
6462 lw(dst,
6463 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
6464 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
6465 : AccessorPair::kSetterOffset;
6466 lw(dst, FieldMemOperand(dst, offset));
6467 }
6468
6469
CheckEnumCache(Label * call_runtime)6470 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
6471 Register null_value = t1;
6472 Register empty_fixed_array_value = t2;
6473 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
6474 Label next, start;
6475 mov(a2, a0);
6476
6477 // Check if the enum length field is properly initialized, indicating that
6478 // there is an enum cache.
6479 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6480
6481 EnumLength(a3, a1);
6482 Branch(
6483 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
6484
6485 LoadRoot(null_value, Heap::kNullValueRootIndex);
6486 jmp(&start);
6487
6488 bind(&next);
6489 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6490
6491 // For all objects but the receiver, check that the cache is empty.
6492 EnumLength(a3, a1);
6493 Branch(call_runtime, ne, a3, Operand(Smi::kZero));
6494
6495 bind(&start);
6496
6497 // Check that there are no elements. Register a2 contains the current JS
6498 // object we've reached through the prototype chain.
6499 Label no_elements;
6500 lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
6501 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
6502
6503 // Second chance, the object may be using the empty slow element dictionary.
6504 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
6505 Branch(call_runtime, ne, a2, Operand(at));
6506
6507 bind(&no_elements);
6508 lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
6509 Branch(&next, ne, a2, Operand(null_value));
6510 }
6511
6512
ClampUint8(Register output_reg,Register input_reg)6513 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
6514 DCHECK(!output_reg.is(input_reg));
6515 Label done;
6516 li(output_reg, Operand(255));
6517 // Normal branch: nop in delay slot.
6518 Branch(&done, gt, input_reg, Operand(output_reg));
6519 // Use delay slot in this branch.
6520 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
6521 mov(output_reg, zero_reg); // In delay slot.
6522 mov(output_reg, input_reg); // Value is in range 0..255.
6523 bind(&done);
6524 }
6525
6526
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister temp_double_reg)6527 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
6528 DoubleRegister input_reg,
6529 DoubleRegister temp_double_reg) {
6530 Label above_zero;
6531 Label done;
6532 Label in_bounds;
6533
6534 Move(temp_double_reg, 0.0);
6535 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
6536
6537 // Double value is less than zero, NaN or Inf, return 0.
6538 mov(result_reg, zero_reg);
6539 Branch(&done);
6540
6541 // Double value is >= 255, return 255.
6542 bind(&above_zero);
6543 Move(temp_double_reg, 255.0);
6544 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
6545 li(result_reg, Operand(255));
6546 Branch(&done);
6547
6548 // In 0-255 range, round and truncate.
6549 bind(&in_bounds);
6550 cvt_w_d(temp_double_reg, input_reg);
6551 mfc1(result_reg, temp_double_reg);
6552 bind(&done);
6553 }
6554
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)6555 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
6556 Register scratch_reg,
6557 Label* no_memento_found) {
6558 Label map_check;
6559 Label top_check;
6560 ExternalReference new_space_allocation_top_adr =
6561 ExternalReference::new_space_allocation_top_address(isolate());
6562 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
6563 const int kMementoLastWordOffset =
6564 kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
6565
6566 // Bail out if the object is not in new space.
6567 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
6568 // If the object is in new space, we need to check whether it is on the same
6569 // page as the current top.
6570 Addu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
6571 li(at, Operand(new_space_allocation_top_adr));
6572 lw(at, MemOperand(at));
6573 Xor(scratch_reg, scratch_reg, Operand(at));
6574 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
6575 Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
6576 // The object is on a different page than allocation top. Bail out if the
6577 // object sits on the page boundary as no memento can follow and we cannot
6578 // touch the memory following it.
6579 Addu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
6580 Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
6581 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
6582 Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
6583 // Continue with the actual map check.
6584 jmp(&map_check);
6585 // If top is on the same page as the current object, we need to check whether
6586 // we are below top.
6587 bind(&top_check);
6588 Addu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
6589 li(at, Operand(new_space_allocation_top_adr));
6590 lw(at, MemOperand(at));
6591 Branch(no_memento_found, ge, scratch_reg, Operand(at));
6592 // Memento map check.
6593 bind(&map_check);
6594 lw(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
6595 Branch(no_memento_found, ne, scratch_reg,
6596 Operand(isolate()->factory()->allocation_memento_map()));
6597 }
6598
6599
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)6600 Register GetRegisterThatIsNotOneOf(Register reg1,
6601 Register reg2,
6602 Register reg3,
6603 Register reg4,
6604 Register reg5,
6605 Register reg6) {
6606 RegList regs = 0;
6607 if (reg1.is_valid()) regs |= reg1.bit();
6608 if (reg2.is_valid()) regs |= reg2.bit();
6609 if (reg3.is_valid()) regs |= reg3.bit();
6610 if (reg4.is_valid()) regs |= reg4.bit();
6611 if (reg5.is_valid()) regs |= reg5.bit();
6612 if (reg6.is_valid()) regs |= reg6.bit();
6613
6614 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
6615 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
6616 int code = config->GetAllocatableGeneralCode(i);
6617 Register candidate = Register::from_code(code);
6618 if (regs & candidate.bit()) continue;
6619 return candidate;
6620 }
6621 UNREACHABLE();
6622 return no_reg;
6623 }
6624
6625
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)6626 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
6627 Register object,
6628 Register scratch0,
6629 Register scratch1,
6630 Label* found) {
6631 DCHECK(!scratch1.is(scratch0));
6632 Factory* factory = isolate()->factory();
6633 Register current = scratch0;
6634 Label loop_again, end;
6635
6636 // Scratch contained elements pointer.
6637 Move(current, object);
6638 lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
6639 lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
6640 Branch(&end, eq, current, Operand(factory->null_value()));
6641
6642 // Loop based on the map going up the prototype chain.
6643 bind(&loop_again);
6644 lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
6645 lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
6646 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
6647 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
6648 Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
6649 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
6650 DecodeField<Map::ElementsKindBits>(scratch1);
6651 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
6652 lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
6653 Branch(&loop_again, ne, current, Operand(factory->null_value()));
6654
6655 bind(&end);
6656 }
6657
6658
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8,Register reg9,Register reg10)6659 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
6660 Register reg5, Register reg6, Register reg7, Register reg8,
6661 Register reg9, Register reg10) {
6662 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
6663 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
6664 reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
6665 reg10.is_valid();
6666
6667 RegList regs = 0;
6668 if (reg1.is_valid()) regs |= reg1.bit();
6669 if (reg2.is_valid()) regs |= reg2.bit();
6670 if (reg3.is_valid()) regs |= reg3.bit();
6671 if (reg4.is_valid()) regs |= reg4.bit();
6672 if (reg5.is_valid()) regs |= reg5.bit();
6673 if (reg6.is_valid()) regs |= reg6.bit();
6674 if (reg7.is_valid()) regs |= reg7.bit();
6675 if (reg8.is_valid()) regs |= reg8.bit();
6676 if (reg9.is_valid()) regs |= reg9.bit();
6677 if (reg10.is_valid()) regs |= reg10.bit();
6678 int n_of_non_aliasing_regs = NumRegs(regs);
6679
6680 return n_of_valid_regs != n_of_non_aliasing_regs;
6681 }
6682
6683
CodePatcher(Isolate * isolate,byte * address,int instructions,FlushICache flush_cache)6684 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
6685 FlushICache flush_cache)
6686 : address_(address),
6687 size_(instructions * Assembler::kInstrSize),
6688 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
6689 flush_cache_(flush_cache) {
6690 // Create a new macro assembler pointing to the address of the code to patch.
6691 // The size is adjusted with kGap on order for the assembler to generate size
6692 // bytes of instructions without failing with buffer size constraints.
6693 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6694 }
6695
6696
~CodePatcher()6697 CodePatcher::~CodePatcher() {
6698 // Indicate that code has changed.
6699 if (flush_cache_ == FLUSH) {
6700 Assembler::FlushICache(masm_.isolate(), address_, size_);
6701 }
6702
6703 // Check that the code was patched as expected.
6704 DCHECK(masm_.pc_ == address_ + size_);
6705 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6706 }
6707
6708
Emit(Instr instr)6709 void CodePatcher::Emit(Instr instr) {
6710 masm()->emit(instr);
6711 }
6712
6713
Emit(Address addr)6714 void CodePatcher::Emit(Address addr) {
6715 masm()->emit(reinterpret_cast<Instr>(addr));
6716 }
6717
6718
ChangeBranchCondition(Instr current_instr,uint32_t new_opcode)6719 void CodePatcher::ChangeBranchCondition(Instr current_instr,
6720 uint32_t new_opcode) {
6721 current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
6722 masm_.emit(current_instr);
6723 }
6724
6725
TruncatingDiv(Register result,Register dividend,int32_t divisor)6726 void MacroAssembler::TruncatingDiv(Register result,
6727 Register dividend,
6728 int32_t divisor) {
6729 DCHECK(!dividend.is(result));
6730 DCHECK(!dividend.is(at));
6731 DCHECK(!result.is(at));
6732 base::MagicNumbersForDivision<uint32_t> mag =
6733 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6734 li(at, Operand(mag.multiplier));
6735 Mulh(result, dividend, Operand(at));
6736 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6737 if (divisor > 0 && neg) {
6738 Addu(result, result, Operand(dividend));
6739 }
6740 if (divisor < 0 && !neg && mag.multiplier > 0) {
6741 Subu(result, result, Operand(dividend));
6742 }
6743 if (mag.shift > 0) sra(result, result, mag.shift);
6744 srl(at, dividend, 31);
6745 Addu(result, result, Operand(at));
6746 }
6747
6748
6749 } // namespace internal
6750 } // namespace v8
6751
6752 #endif // V8_TARGET_ARCH_MIPS
6753