1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6
7 #if V8_TARGET_ARCH_MIPS64
8
9 #include "src/base/division-by-constant.h"
10 #include "src/bootstrapper.h"
11 #include "src/codegen.h"
12 #include "src/debug/debug.h"
13 #include "src/mips64/macro-assembler-mips64.h"
14 #include "src/register-configuration.h"
15 #include "src/runtime/runtime.h"
16
17 namespace v8 {
18 namespace internal {
19
20 // Floating point constants.
21 const uint64_t kDoubleSignMask = Double::kSignMask;
22 const uint32_t kDoubleExponentShift = HeapNumber::kMantissaBits;
23 const uint32_t kDoubleNaNShift = kDoubleExponentShift - 1;
24 const uint64_t kDoubleNaNMask = Double::kExponentMask | (1L << kDoubleNaNShift);
25
26 const uint32_t kSingleSignMask = kBinary32SignMask;
27 const uint32_t kSingleExponentMask = kBinary32ExponentMask;
28 const uint32_t kSingleExponentShift = kBinary32ExponentShift;
29 const uint32_t kSingleNaNShift = kSingleExponentShift - 1;
30 const uint32_t kSingleNaNMask = kSingleExponentMask | (1 << kSingleNaNShift);
31
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)32 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
33 CodeObjectRequired create_code_object)
34 : Assembler(arg_isolate, buffer, size),
35 generating_stub_(false),
36 has_frame_(false),
37 has_double_zero_reg_set_(false) {
38 if (create_code_object == CodeObjectRequired::kYes) {
39 code_object_ =
40 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
41 }
42 }
43
Load(Register dst,const MemOperand & src,Representation r)44 void MacroAssembler::Load(Register dst,
45 const MemOperand& src,
46 Representation r) {
47 DCHECK(!r.IsDouble());
48 if (r.IsInteger8()) {
49 lb(dst, src);
50 } else if (r.IsUInteger8()) {
51 lbu(dst, src);
52 } else if (r.IsInteger16()) {
53 lh(dst, src);
54 } else if (r.IsUInteger16()) {
55 lhu(dst, src);
56 } else if (r.IsInteger32()) {
57 lw(dst, src);
58 } else {
59 ld(dst, src);
60 }
61 }
62
63
Store(Register src,const MemOperand & dst,Representation r)64 void MacroAssembler::Store(Register src,
65 const MemOperand& dst,
66 Representation r) {
67 DCHECK(!r.IsDouble());
68 if (r.IsInteger8() || r.IsUInteger8()) {
69 sb(src, dst);
70 } else if (r.IsInteger16() || r.IsUInteger16()) {
71 sh(src, dst);
72 } else if (r.IsInteger32()) {
73 sw(src, dst);
74 } else {
75 if (r.IsHeapObject()) {
76 AssertNotSmi(src);
77 } else if (r.IsSmi()) {
78 AssertSmi(src);
79 }
80 sd(src, dst);
81 }
82 }
83
84
LoadRoot(Register destination,Heap::RootListIndex index)85 void MacroAssembler::LoadRoot(Register destination,
86 Heap::RootListIndex index) {
87 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
88 }
89
90
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)91 void MacroAssembler::LoadRoot(Register destination,
92 Heap::RootListIndex index,
93 Condition cond,
94 Register src1, const Operand& src2) {
95 Branch(2, NegateCondition(cond), src1, src2);
96 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
97 }
98
99
StoreRoot(Register source,Heap::RootListIndex index)100 void MacroAssembler::StoreRoot(Register source,
101 Heap::RootListIndex index) {
102 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
103 sd(source, MemOperand(s6, index << kPointerSizeLog2));
104 }
105
106
StoreRoot(Register source,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)107 void MacroAssembler::StoreRoot(Register source,
108 Heap::RootListIndex index,
109 Condition cond,
110 Register src1, const Operand& src2) {
111 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
112 Branch(2, NegateCondition(cond), src1, src2);
113 sd(source, MemOperand(s6, index << kPointerSizeLog2));
114 }
115
PushCommonFrame(Register marker_reg)116 void MacroAssembler::PushCommonFrame(Register marker_reg) {
117 if (marker_reg.is_valid()) {
118 Push(ra, fp, marker_reg);
119 Daddu(fp, sp, Operand(kPointerSize));
120 } else {
121 Push(ra, fp);
122 mov(fp, sp);
123 }
124 }
125
PopCommonFrame(Register marker_reg)126 void MacroAssembler::PopCommonFrame(Register marker_reg) {
127 if (marker_reg.is_valid()) {
128 Pop(ra, fp, marker_reg);
129 } else {
130 Pop(ra, fp);
131 }
132 }
133
PushStandardFrame(Register function_reg)134 void MacroAssembler::PushStandardFrame(Register function_reg) {
135 int offset = -StandardFrameConstants::kContextOffset;
136 if (function_reg.is_valid()) {
137 Push(ra, fp, cp, function_reg);
138 offset += kPointerSize;
139 } else {
140 Push(ra, fp, cp);
141 }
142 Daddu(fp, sp, Operand(offset));
143 }
144
145 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()146 void MacroAssembler::PushSafepointRegisters() {
147 // Safepoints expect a block of kNumSafepointRegisters values on the
148 // stack, so adjust the stack for unsaved registers.
149 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
150 DCHECK(num_unsaved >= 0);
151 if (num_unsaved > 0) {
152 Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
153 }
154 MultiPush(kSafepointSavedRegisters);
155 }
156
157
PopSafepointRegisters()158 void MacroAssembler::PopSafepointRegisters() {
159 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
160 MultiPop(kSafepointSavedRegisters);
161 if (num_unsaved > 0) {
162 Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
163 }
164 }
165
166
StoreToSafepointRegisterSlot(Register src,Register dst)167 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
168 sd(src, SafepointRegisterSlot(dst));
169 }
170
171
LoadFromSafepointRegisterSlot(Register dst,Register src)172 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
173 ld(dst, SafepointRegisterSlot(src));
174 }
175
176
SafepointRegisterStackIndex(int reg_code)177 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
178 // The registers are pushed starting with the highest encoding,
179 // which means that lowest encodings are closest to the stack pointer.
180 return kSafepointRegisterStackIndexMap[reg_code];
181 }
182
183
SafepointRegisterSlot(Register reg)184 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
185 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
186 }
187
188
SafepointRegistersAndDoublesSlot(Register reg)189 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
190 UNIMPLEMENTED_MIPS();
191 // General purpose registers are pushed last on the stack.
192 int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
193 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
194 return MemOperand(sp, doubles_size + register_offset);
195 }
196
197
InNewSpace(Register object,Register scratch,Condition cc,Label * branch)198 void MacroAssembler::InNewSpace(Register object,
199 Register scratch,
200 Condition cc,
201 Label* branch) {
202 DCHECK(cc == eq || cc == ne);
203 CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch);
204 }
205
206
207 // Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
208 // The register 'object' contains a heap object pointer. The heap object
209 // tag is shifted away.
RecordWriteField(Register object,int offset,Register value,Register dst,RAStatus ra_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)210 void MacroAssembler::RecordWriteField(
211 Register object,
212 int offset,
213 Register value,
214 Register dst,
215 RAStatus ra_status,
216 SaveFPRegsMode save_fp,
217 RememberedSetAction remembered_set_action,
218 SmiCheck smi_check,
219 PointersToHereCheck pointers_to_here_check_for_value) {
220 DCHECK(!AreAliased(value, dst, t8, object));
221 // First, check if a write barrier is even needed. The tests below
222 // catch stores of Smis.
223 Label done;
224
225 // Skip barrier if writing a smi.
226 if (smi_check == INLINE_SMI_CHECK) {
227 JumpIfSmi(value, &done);
228 }
229
230 // Although the object register is tagged, the offset is relative to the start
231 // of the object, so so offset must be a multiple of kPointerSize.
232 DCHECK(IsAligned(offset, kPointerSize));
233
234 Daddu(dst, object, Operand(offset - kHeapObjectTag));
235 if (emit_debug_code()) {
236 Label ok;
237 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
238 Branch(&ok, eq, t8, Operand(zero_reg));
239 stop("Unaligned cell in write barrier");
240 bind(&ok);
241 }
242
243 RecordWrite(object,
244 dst,
245 value,
246 ra_status,
247 save_fp,
248 remembered_set_action,
249 OMIT_SMI_CHECK,
250 pointers_to_here_check_for_value);
251
252 bind(&done);
253
254 // Clobber clobbered input registers when running with the debug-code flag
255 // turned on to provoke errors.
256 if (emit_debug_code()) {
257 li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
258 li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
259 }
260 }
261
262
263 // Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
RecordWriteForMap(Register object,Register map,Register dst,RAStatus ra_status,SaveFPRegsMode fp_mode)264 void MacroAssembler::RecordWriteForMap(Register object,
265 Register map,
266 Register dst,
267 RAStatus ra_status,
268 SaveFPRegsMode fp_mode) {
269 if (emit_debug_code()) {
270 DCHECK(!dst.is(at));
271 ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
272 Check(eq,
273 kWrongAddressOrValuePassedToRecordWrite,
274 dst,
275 Operand(isolate()->factory()->meta_map()));
276 }
277
278 if (!FLAG_incremental_marking) {
279 return;
280 }
281
282 if (emit_debug_code()) {
283 ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
284 Check(eq,
285 kWrongAddressOrValuePassedToRecordWrite,
286 map,
287 Operand(at));
288 }
289
290 Label done;
291
292 // A single check of the map's pages interesting flag suffices, since it is
293 // only set during incremental collection, and then it's also guaranteed that
294 // the from object's page's interesting flag is also set. This optimization
295 // relies on the fact that maps can never be in new space.
296 CheckPageFlag(map,
297 map, // Used as scratch.
298 MemoryChunk::kPointersToHereAreInterestingMask,
299 eq,
300 &done);
301
302 Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
303 if (emit_debug_code()) {
304 Label ok;
305 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
306 Branch(&ok, eq, at, Operand(zero_reg));
307 stop("Unaligned cell in write barrier");
308 bind(&ok);
309 }
310
311 // Record the actual write.
312 if (ra_status == kRAHasNotBeenSaved) {
313 push(ra);
314 }
315 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
316 fp_mode);
317 CallStub(&stub);
318 if (ra_status == kRAHasNotBeenSaved) {
319 pop(ra);
320 }
321
322 bind(&done);
323
324 // Count number of write barriers in generated code.
325 isolate()->counters()->write_barriers_static()->Increment();
326 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
327
328 // Clobber clobbered registers when running with the debug-code flag
329 // turned on to provoke errors.
330 if (emit_debug_code()) {
331 li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
332 li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
333 }
334 }
335
336
337 // Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
338 // The register 'object' contains a heap object pointer. The heap object
339 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,RAStatus ra_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)340 void MacroAssembler::RecordWrite(
341 Register object,
342 Register address,
343 Register value,
344 RAStatus ra_status,
345 SaveFPRegsMode fp_mode,
346 RememberedSetAction remembered_set_action,
347 SmiCheck smi_check,
348 PointersToHereCheck pointers_to_here_check_for_value) {
349 DCHECK(!AreAliased(object, address, value, t8));
350 DCHECK(!AreAliased(object, address, value, t9));
351
352 if (emit_debug_code()) {
353 ld(at, MemOperand(address));
354 Assert(
355 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
356 }
357
358 if (remembered_set_action == OMIT_REMEMBERED_SET &&
359 !FLAG_incremental_marking) {
360 return;
361 }
362
363 // First, check if a write barrier is even needed. The tests below
364 // catch stores of smis and stores into the young generation.
365 Label done;
366
367 if (smi_check == INLINE_SMI_CHECK) {
368 DCHECK_EQ(0, kSmiTag);
369 JumpIfSmi(value, &done);
370 }
371
372 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
373 CheckPageFlag(value,
374 value, // Used as scratch.
375 MemoryChunk::kPointersToHereAreInterestingMask,
376 eq,
377 &done);
378 }
379 CheckPageFlag(object,
380 value, // Used as scratch.
381 MemoryChunk::kPointersFromHereAreInterestingMask,
382 eq,
383 &done);
384
385 // Record the actual write.
386 if (ra_status == kRAHasNotBeenSaved) {
387 push(ra);
388 }
389 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
390 fp_mode);
391 CallStub(&stub);
392 if (ra_status == kRAHasNotBeenSaved) {
393 pop(ra);
394 }
395
396 bind(&done);
397
398 // Count number of write barriers in generated code.
399 isolate()->counters()->write_barriers_static()->Increment();
400 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
401 value);
402
403 // Clobber clobbered registers when running with the debug-code flag
404 // turned on to provoke errors.
405 if (emit_debug_code()) {
406 li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
407 li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
408 }
409 }
410
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)411 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
412 Register code_entry,
413 Register scratch) {
414 const int offset = JSFunction::kCodeEntryOffset;
415
416 // Since a code entry (value) is always in old space, we don't need to update
417 // remembered set. If incremental marking is off, there is nothing for us to
418 // do.
419 if (!FLAG_incremental_marking) return;
420
421 DCHECK(js_function.is(a1));
422 DCHECK(code_entry.is(a4));
423 DCHECK(scratch.is(a5));
424 AssertNotSmi(js_function);
425
426 if (emit_debug_code()) {
427 Daddu(scratch, js_function, Operand(offset - kHeapObjectTag));
428 ld(at, MemOperand(scratch));
429 Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
430 Operand(code_entry));
431 }
432
433 // First, check if a write barrier is even needed. The tests below
434 // catch stores of Smis and stores into young gen.
435 Label done;
436
437 CheckPageFlag(code_entry, scratch,
438 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
439 CheckPageFlag(js_function, scratch,
440 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
441
442 const Register dst = scratch;
443 Daddu(dst, js_function, Operand(offset - kHeapObjectTag));
444
445 // Save caller-saved registers. js_function and code_entry are in the
446 // caller-saved register list.
447 DCHECK(kJSCallerSaved & js_function.bit());
448 DCHECK(kJSCallerSaved & code_entry.bit());
449 MultiPush(kJSCallerSaved | ra.bit());
450
451 int argument_count = 3;
452
453 PrepareCallCFunction(argument_count, code_entry);
454
455 Move(a0, js_function);
456 Move(a1, dst);
457 li(a2, Operand(ExternalReference::isolate_address(isolate())));
458
459 {
460 AllowExternalCallThatCantCauseGC scope(this);
461 CallCFunction(
462 ExternalReference::incremental_marking_record_write_code_entry_function(
463 isolate()),
464 argument_count);
465 }
466
467 // Restore caller-saved registers.
468 MultiPop(kJSCallerSaved | ra.bit());
469
470 bind(&done);
471 }
472
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)473 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
474 Register address,
475 Register scratch,
476 SaveFPRegsMode fp_mode,
477 RememberedSetFinalAction and_then) {
478 Label done;
479 if (emit_debug_code()) {
480 Label ok;
481 JumpIfNotInNewSpace(object, scratch, &ok);
482 stop("Remembered set pointer is in new space");
483 bind(&ok);
484 }
485 // Load store buffer top.
486 ExternalReference store_buffer =
487 ExternalReference::store_buffer_top(isolate());
488 li(t8, Operand(store_buffer));
489 ld(scratch, MemOperand(t8));
490 // Store pointer to buffer and increment buffer top.
491 sd(address, MemOperand(scratch));
492 Daddu(scratch, scratch, kPointerSize);
493 // Write back new top of buffer.
494 sd(scratch, MemOperand(t8));
495 // Call stub on end of buffer.
496 // Check for end of buffer.
497 And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
498 DCHECK(!scratch.is(t8));
499 if (and_then == kFallThroughAtEnd) {
500 Branch(&done, ne, t8, Operand(zero_reg));
501 } else {
502 DCHECK(and_then == kReturnAtEnd);
503 Ret(ne, t8, Operand(zero_reg));
504 }
505 push(ra);
506 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
507 CallStub(&store_buffer_overflow);
508 pop(ra);
509 bind(&done);
510 if (and_then == kReturnAtEnd) {
511 Ret();
512 }
513 }
514
515
516 // -----------------------------------------------------------------------------
517 // Allocation support.
518
519
520 // Compute the hash code from the untagged key. This must be kept in sync with
521 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
522 // code-stub-hydrogen.cc
GetNumberHash(Register reg0,Register scratch)523 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
524 // First of all we assign the hash seed to scratch.
525 LoadRoot(scratch, Heap::kHashSeedRootIndex);
526 SmiUntag(scratch);
527
528 // Xor original key with a seed.
529 xor_(reg0, reg0, scratch);
530
531 // Compute the hash code from the untagged key. This must be kept in sync
532 // with ComputeIntegerHash in utils.h.
533 //
534 // hash = ~hash + (hash << 15);
535 // The algorithm uses 32-bit integer values.
536 nor(scratch, reg0, zero_reg);
537 Lsa(reg0, scratch, reg0, 15);
538
539 // hash = hash ^ (hash >> 12);
540 srl(at, reg0, 12);
541 xor_(reg0, reg0, at);
542
543 // hash = hash + (hash << 2);
544 Lsa(reg0, reg0, reg0, 2);
545
546 // hash = hash ^ (hash >> 4);
547 srl(at, reg0, 4);
548 xor_(reg0, reg0, at);
549
550 // hash = hash * 2057;
551 sll(scratch, reg0, 11);
552 Lsa(reg0, reg0, reg0, 3);
553 addu(reg0, reg0, scratch);
554
555 // hash = hash ^ (hash >> 16);
556 srl(at, reg0, 16);
557 xor_(reg0, reg0, at);
558 And(reg0, reg0, Operand(0x3fffffff));
559 }
560
561 // ---------------------------------------------------------------------------
562 // Instruction macros.
563
Addu(Register rd,Register rs,const Operand & rt)564 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
565 if (rt.is_reg()) {
566 addu(rd, rs, rt.rm());
567 } else {
568 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
569 addiu(rd, rs, static_cast<int32_t>(rt.imm64_));
570 } else {
571 // li handles the relocation.
572 DCHECK(!rs.is(at));
573 li(at, rt);
574 addu(rd, rs, at);
575 }
576 }
577 }
578
579
Daddu(Register rd,Register rs,const Operand & rt)580 void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
581 if (rt.is_reg()) {
582 daddu(rd, rs, rt.rm());
583 } else {
584 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
585 daddiu(rd, rs, static_cast<int32_t>(rt.imm64_));
586 } else {
587 // li handles the relocation.
588 DCHECK(!rs.is(at));
589 li(at, rt);
590 daddu(rd, rs, at);
591 }
592 }
593 }
594
595
Subu(Register rd,Register rs,const Operand & rt)596 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
597 if (rt.is_reg()) {
598 subu(rd, rs, rt.rm());
599 } else {
600 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
601 addiu(rd, rs, static_cast<int32_t>(
602 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
603 } else {
604 // li handles the relocation.
605 DCHECK(!rs.is(at));
606 li(at, rt);
607 subu(rd, rs, at);
608 }
609 }
610 }
611
612
Dsubu(Register rd,Register rs,const Operand & rt)613 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
614 if (rt.is_reg()) {
615 dsubu(rd, rs, rt.rm());
616 } else {
617 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
618 daddiu(rd, rs,
619 static_cast<int32_t>(
620 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
621 } else {
622 // li handles the relocation.
623 DCHECK(!rs.is(at));
624 li(at, rt);
625 dsubu(rd, rs, at);
626 }
627 }
628 }
629
630
Mul(Register rd,Register rs,const Operand & rt)631 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
632 if (rt.is_reg()) {
633 mul(rd, rs, rt.rm());
634 } else {
635 // li handles the relocation.
636 DCHECK(!rs.is(at));
637 li(at, rt);
638 mul(rd, rs, at);
639 }
640 }
641
642
Mulh(Register rd,Register rs,const Operand & rt)643 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
644 if (rt.is_reg()) {
645 if (kArchVariant != kMips64r6) {
646 mult(rs, rt.rm());
647 mfhi(rd);
648 } else {
649 muh(rd, rs, rt.rm());
650 }
651 } else {
652 // li handles the relocation.
653 DCHECK(!rs.is(at));
654 li(at, rt);
655 if (kArchVariant != kMips64r6) {
656 mult(rs, at);
657 mfhi(rd);
658 } else {
659 muh(rd, rs, at);
660 }
661 }
662 }
663
664
Mulhu(Register rd,Register rs,const Operand & rt)665 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
666 if (rt.is_reg()) {
667 if (kArchVariant != kMips64r6) {
668 multu(rs, rt.rm());
669 mfhi(rd);
670 } else {
671 muhu(rd, rs, rt.rm());
672 }
673 } else {
674 // li handles the relocation.
675 DCHECK(!rs.is(at));
676 li(at, rt);
677 if (kArchVariant != kMips64r6) {
678 multu(rs, at);
679 mfhi(rd);
680 } else {
681 muhu(rd, rs, at);
682 }
683 }
684 }
685
686
Dmul(Register rd,Register rs,const Operand & rt)687 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
688 if (rt.is_reg()) {
689 if (kArchVariant == kMips64r6) {
690 dmul(rd, rs, rt.rm());
691 } else {
692 dmult(rs, rt.rm());
693 mflo(rd);
694 }
695 } else {
696 // li handles the relocation.
697 DCHECK(!rs.is(at));
698 li(at, rt);
699 if (kArchVariant == kMips64r6) {
700 dmul(rd, rs, at);
701 } else {
702 dmult(rs, at);
703 mflo(rd);
704 }
705 }
706 }
707
708
Dmulh(Register rd,Register rs,const Operand & rt)709 void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
710 if (rt.is_reg()) {
711 if (kArchVariant == kMips64r6) {
712 dmuh(rd, rs, rt.rm());
713 } else {
714 dmult(rs, rt.rm());
715 mfhi(rd);
716 }
717 } else {
718 // li handles the relocation.
719 DCHECK(!rs.is(at));
720 li(at, rt);
721 if (kArchVariant == kMips64r6) {
722 dmuh(rd, rs, at);
723 } else {
724 dmult(rs, at);
725 mfhi(rd);
726 }
727 }
728 }
729
730
Mult(Register rs,const Operand & rt)731 void MacroAssembler::Mult(Register rs, const Operand& rt) {
732 if (rt.is_reg()) {
733 mult(rs, rt.rm());
734 } else {
735 // li handles the relocation.
736 DCHECK(!rs.is(at));
737 li(at, rt);
738 mult(rs, at);
739 }
740 }
741
742
Dmult(Register rs,const Operand & rt)743 void MacroAssembler::Dmult(Register rs, const Operand& rt) {
744 if (rt.is_reg()) {
745 dmult(rs, rt.rm());
746 } else {
747 // li handles the relocation.
748 DCHECK(!rs.is(at));
749 li(at, rt);
750 dmult(rs, at);
751 }
752 }
753
754
Multu(Register rs,const Operand & rt)755 void MacroAssembler::Multu(Register rs, const Operand& rt) {
756 if (rt.is_reg()) {
757 multu(rs, rt.rm());
758 } else {
759 // li handles the relocation.
760 DCHECK(!rs.is(at));
761 li(at, rt);
762 multu(rs, at);
763 }
764 }
765
766
Dmultu(Register rs,const Operand & rt)767 void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
768 if (rt.is_reg()) {
769 dmultu(rs, rt.rm());
770 } else {
771 // li handles the relocation.
772 DCHECK(!rs.is(at));
773 li(at, rt);
774 dmultu(rs, at);
775 }
776 }
777
778
Div(Register rs,const Operand & rt)779 void MacroAssembler::Div(Register rs, const Operand& rt) {
780 if (rt.is_reg()) {
781 div(rs, rt.rm());
782 } else {
783 // li handles the relocation.
784 DCHECK(!rs.is(at));
785 li(at, rt);
786 div(rs, at);
787 }
788 }
789
790
Div(Register res,Register rs,const Operand & rt)791 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
792 if (rt.is_reg()) {
793 if (kArchVariant != kMips64r6) {
794 div(rs, rt.rm());
795 mflo(res);
796 } else {
797 div(res, rs, rt.rm());
798 }
799 } else {
800 // li handles the relocation.
801 DCHECK(!rs.is(at));
802 li(at, rt);
803 if (kArchVariant != kMips64r6) {
804 div(rs, at);
805 mflo(res);
806 } else {
807 div(res, rs, at);
808 }
809 }
810 }
811
812
Mod(Register rd,Register rs,const Operand & rt)813 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
814 if (rt.is_reg()) {
815 if (kArchVariant != kMips64r6) {
816 div(rs, rt.rm());
817 mfhi(rd);
818 } else {
819 mod(rd, rs, rt.rm());
820 }
821 } else {
822 // li handles the relocation.
823 DCHECK(!rs.is(at));
824 li(at, rt);
825 if (kArchVariant != kMips64r6) {
826 div(rs, at);
827 mfhi(rd);
828 } else {
829 mod(rd, rs, at);
830 }
831 }
832 }
833
834
Modu(Register rd,Register rs,const Operand & rt)835 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
836 if (rt.is_reg()) {
837 if (kArchVariant != kMips64r6) {
838 divu(rs, rt.rm());
839 mfhi(rd);
840 } else {
841 modu(rd, rs, rt.rm());
842 }
843 } else {
844 // li handles the relocation.
845 DCHECK(!rs.is(at));
846 li(at, rt);
847 if (kArchVariant != kMips64r6) {
848 divu(rs, at);
849 mfhi(rd);
850 } else {
851 modu(rd, rs, at);
852 }
853 }
854 }
855
856
Ddiv(Register rs,const Operand & rt)857 void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
858 if (rt.is_reg()) {
859 ddiv(rs, rt.rm());
860 } else {
861 // li handles the relocation.
862 DCHECK(!rs.is(at));
863 li(at, rt);
864 ddiv(rs, at);
865 }
866 }
867
868
Ddiv(Register rd,Register rs,const Operand & rt)869 void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
870 if (kArchVariant != kMips64r6) {
871 if (rt.is_reg()) {
872 ddiv(rs, rt.rm());
873 mflo(rd);
874 } else {
875 // li handles the relocation.
876 DCHECK(!rs.is(at));
877 li(at, rt);
878 ddiv(rs, at);
879 mflo(rd);
880 }
881 } else {
882 if (rt.is_reg()) {
883 ddiv(rd, rs, rt.rm());
884 } else {
885 // li handles the relocation.
886 DCHECK(!rs.is(at));
887 li(at, rt);
888 ddiv(rd, rs, at);
889 }
890 }
891 }
892
893
Divu(Register rs,const Operand & rt)894 void MacroAssembler::Divu(Register rs, const Operand& rt) {
895 if (rt.is_reg()) {
896 divu(rs, rt.rm());
897 } else {
898 // li handles the relocation.
899 DCHECK(!rs.is(at));
900 li(at, rt);
901 divu(rs, at);
902 }
903 }
904
905
Divu(Register res,Register rs,const Operand & rt)906 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
907 if (rt.is_reg()) {
908 if (kArchVariant != kMips64r6) {
909 divu(rs, rt.rm());
910 mflo(res);
911 } else {
912 divu(res, rs, rt.rm());
913 }
914 } else {
915 // li handles the relocation.
916 DCHECK(!rs.is(at));
917 li(at, rt);
918 if (kArchVariant != kMips64r6) {
919 divu(rs, at);
920 mflo(res);
921 } else {
922 divu(res, rs, at);
923 }
924 }
925 }
926
927
Ddivu(Register rs,const Operand & rt)928 void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
929 if (rt.is_reg()) {
930 ddivu(rs, rt.rm());
931 } else {
932 // li handles the relocation.
933 DCHECK(!rs.is(at));
934 li(at, rt);
935 ddivu(rs, at);
936 }
937 }
938
939
Ddivu(Register res,Register rs,const Operand & rt)940 void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
941 if (rt.is_reg()) {
942 if (kArchVariant != kMips64r6) {
943 ddivu(rs, rt.rm());
944 mflo(res);
945 } else {
946 ddivu(res, rs, rt.rm());
947 }
948 } else {
949 // li handles the relocation.
950 DCHECK(!rs.is(at));
951 li(at, rt);
952 if (kArchVariant != kMips64r6) {
953 ddivu(rs, at);
954 mflo(res);
955 } else {
956 ddivu(res, rs, at);
957 }
958 }
959 }
960
961
Dmod(Register rd,Register rs,const Operand & rt)962 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
963 if (kArchVariant != kMips64r6) {
964 if (rt.is_reg()) {
965 ddiv(rs, rt.rm());
966 mfhi(rd);
967 } else {
968 // li handles the relocation.
969 DCHECK(!rs.is(at));
970 li(at, rt);
971 ddiv(rs, at);
972 mfhi(rd);
973 }
974 } else {
975 if (rt.is_reg()) {
976 dmod(rd, rs, rt.rm());
977 } else {
978 // li handles the relocation.
979 DCHECK(!rs.is(at));
980 li(at, rt);
981 dmod(rd, rs, at);
982 }
983 }
984 }
985
986
Dmodu(Register rd,Register rs,const Operand & rt)987 void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
988 if (kArchVariant != kMips64r6) {
989 if (rt.is_reg()) {
990 ddivu(rs, rt.rm());
991 mfhi(rd);
992 } else {
993 // li handles the relocation.
994 DCHECK(!rs.is(at));
995 li(at, rt);
996 ddivu(rs, at);
997 mfhi(rd);
998 }
999 } else {
1000 if (rt.is_reg()) {
1001 dmodu(rd, rs, rt.rm());
1002 } else {
1003 // li handles the relocation.
1004 DCHECK(!rs.is(at));
1005 li(at, rt);
1006 dmodu(rd, rs, at);
1007 }
1008 }
1009 }
1010
1011
And(Register rd,Register rs,const Operand & rt)1012 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
1013 if (rt.is_reg()) {
1014 and_(rd, rs, rt.rm());
1015 } else {
1016 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1017 andi(rd, rs, static_cast<int32_t>(rt.imm64_));
1018 } else {
1019 // li handles the relocation.
1020 DCHECK(!rs.is(at));
1021 li(at, rt);
1022 and_(rd, rs, at);
1023 }
1024 }
1025 }
1026
1027
Or(Register rd,Register rs,const Operand & rt)1028 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
1029 if (rt.is_reg()) {
1030 or_(rd, rs, rt.rm());
1031 } else {
1032 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1033 ori(rd, rs, static_cast<int32_t>(rt.imm64_));
1034 } else {
1035 // li handles the relocation.
1036 DCHECK(!rs.is(at));
1037 li(at, rt);
1038 or_(rd, rs, at);
1039 }
1040 }
1041 }
1042
1043
Xor(Register rd,Register rs,const Operand & rt)1044 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
1045 if (rt.is_reg()) {
1046 xor_(rd, rs, rt.rm());
1047 } else {
1048 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1049 xori(rd, rs, static_cast<int32_t>(rt.imm64_));
1050 } else {
1051 // li handles the relocation.
1052 DCHECK(!rs.is(at));
1053 li(at, rt);
1054 xor_(rd, rs, at);
1055 }
1056 }
1057 }
1058
1059
Nor(Register rd,Register rs,const Operand & rt)1060 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
1061 if (rt.is_reg()) {
1062 nor(rd, rs, rt.rm());
1063 } else {
1064 // li handles the relocation.
1065 DCHECK(!rs.is(at));
1066 li(at, rt);
1067 nor(rd, rs, at);
1068 }
1069 }
1070
1071
Neg(Register rs,const Operand & rt)1072 void MacroAssembler::Neg(Register rs, const Operand& rt) {
1073 DCHECK(rt.is_reg());
1074 DCHECK(!at.is(rs));
1075 DCHECK(!at.is(rt.rm()));
1076 li(at, -1);
1077 xor_(rs, rt.rm(), at);
1078 }
1079
1080
Slt(Register rd,Register rs,const Operand & rt)1081 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
1082 if (rt.is_reg()) {
1083 slt(rd, rs, rt.rm());
1084 } else {
1085 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1086 slti(rd, rs, static_cast<int32_t>(rt.imm64_));
1087 } else {
1088 // li handles the relocation.
1089 DCHECK(!rs.is(at));
1090 li(at, rt);
1091 slt(rd, rs, at);
1092 }
1093 }
1094 }
1095
1096
Sltu(Register rd,Register rs,const Operand & rt)1097 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1098 if (rt.is_reg()) {
1099 sltu(rd, rs, rt.rm());
1100 } else {
1101 const uint64_t int16_min = std::numeric_limits<int16_t>::min();
1102 if (is_uint15(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1103 // Imm range is: [0, 32767].
1104 sltiu(rd, rs, static_cast<int32_t>(rt.imm64_));
1105 } else if (is_uint15(rt.imm64_ - int16_min) && !MustUseReg(rt.rmode_)) {
1106 // Imm range is: [max_unsigned-32767,max_unsigned].
1107 sltiu(rd, rs, static_cast<uint16_t>(rt.imm64_));
1108 } else {
1109 // li handles the relocation.
1110 DCHECK(!rs.is(at));
1111 li(at, rt);
1112 sltu(rd, rs, at);
1113 }
1114 }
1115 }
1116
1117
Ror(Register rd,Register rs,const Operand & rt)1118 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1119 if (rt.is_reg()) {
1120 rotrv(rd, rs, rt.rm());
1121 } else {
1122 int64_t ror_value = rt.imm64_ % 32;
1123 if (ror_value < 0) {
1124 ror_value += 32;
1125 }
1126 rotr(rd, rs, ror_value);
1127 }
1128 }
1129
1130
Dror(Register rd,Register rs,const Operand & rt)1131 void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
1132 if (rt.is_reg()) {
1133 drotrv(rd, rs, rt.rm());
1134 } else {
1135 int64_t dror_value = rt.imm64_ % 64;
1136 if (dror_value < 0) dror_value += 64;
1137 if (dror_value <= 31) {
1138 drotr(rd, rs, dror_value);
1139 } else {
1140 drotr32(rd, rs, dror_value - 32);
1141 }
1142 }
1143 }
1144
1145
Pref(int32_t hint,const MemOperand & rs)1146 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1147 pref(hint, rs);
1148 }
1149
1150
Lsa(Register rd,Register rt,Register rs,uint8_t sa,Register scratch)1151 void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
1152 Register scratch) {
1153 DCHECK(sa >= 1 && sa <= 31);
1154 if (kArchVariant == kMips64r6 && sa <= 4) {
1155 lsa(rd, rt, rs, sa - 1);
1156 } else {
1157 Register tmp = rd.is(rt) ? scratch : rd;
1158 DCHECK(!tmp.is(rt));
1159 sll(tmp, rs, sa);
1160 Addu(rd, rt, tmp);
1161 }
1162 }
1163
1164
Dlsa(Register rd,Register rt,Register rs,uint8_t sa,Register scratch)1165 void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
1166 Register scratch) {
1167 DCHECK(sa >= 1 && sa <= 31);
1168 if (kArchVariant == kMips64r6 && sa <= 4) {
1169 dlsa(rd, rt, rs, sa - 1);
1170 } else {
1171 Register tmp = rd.is(rt) ? scratch : rd;
1172 DCHECK(!tmp.is(rt));
1173 dsll(tmp, rs, sa);
1174 Daddu(rd, rt, tmp);
1175 }
1176 }
1177
Bovc(Register rs,Register rt,Label * L)1178 void MacroAssembler::Bovc(Register rs, Register rt, Label* L) {
1179 if (is_trampoline_emitted()) {
1180 Label skip;
1181 bnvc(rs, rt, &skip);
1182 BranchLong(L, PROTECT);
1183 bind(&skip);
1184 } else {
1185 bovc(rs, rt, L);
1186 }
1187 }
1188
Bnvc(Register rs,Register rt,Label * L)1189 void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
1190 if (is_trampoline_emitted()) {
1191 Label skip;
1192 bovc(rs, rt, &skip);
1193 BranchLong(L, PROTECT);
1194 bind(&skip);
1195 } else {
1196 bnvc(rs, rt, L);
1197 }
1198 }
1199
1200 // ------------Pseudo-instructions-------------
1201
1202 // Change endianness
ByteSwapSigned(Register dest,Register src,int operand_size)1203 void MacroAssembler::ByteSwapSigned(Register dest, Register src,
1204 int operand_size) {
1205 DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4 ||
1206 operand_size == 8);
1207 DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
1208 if (operand_size == 1) {
1209 seb(src, src);
1210 sll(src, src, 0);
1211 dsbh(dest, src);
1212 dshd(dest, dest);
1213 } else if (operand_size == 2) {
1214 seh(src, src);
1215 sll(src, src, 0);
1216 dsbh(dest, src);
1217 dshd(dest, dest);
1218 } else if (operand_size == 4) {
1219 sll(src, src, 0);
1220 dsbh(dest, src);
1221 dshd(dest, dest);
1222 } else {
1223 dsbh(dest, src);
1224 dshd(dest, dest);
1225 }
1226 }
1227
ByteSwapUnsigned(Register dest,Register src,int operand_size)1228 void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
1229 int operand_size) {
1230 DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
1231 if (operand_size == 1) {
1232 andi(src, src, 0xFF);
1233 dsbh(dest, src);
1234 dshd(dest, dest);
1235 } else if (operand_size == 2) {
1236 andi(src, src, 0xFFFF);
1237 dsbh(dest, src);
1238 dshd(dest, dest);
1239 } else {
1240 dsll32(src, src, 0);
1241 dsrl32(src, src, 0);
1242 dsbh(dest, src);
1243 dshd(dest, dest);
1244 }
1245 }
1246
Ulw(Register rd,const MemOperand & rs)1247 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1248 DCHECK(!rd.is(at));
1249 DCHECK(!rs.rm().is(at));
1250 if (kArchVariant == kMips64r6) {
1251 lw(rd, rs);
1252 } else {
1253 DCHECK(kArchVariant == kMips64r2);
1254 if (is_int16(rs.offset() + kMipsLwrOffset) &&
1255 is_int16(rs.offset() + kMipsLwlOffset)) {
1256 if (!rd.is(rs.rm())) {
1257 lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1258 lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1259 } else {
1260 lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1261 lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1262 mov(rd, at);
1263 }
1264 } else { // Offset > 16 bits, use multiple instructions to load.
1265 LoadRegPlusOffsetToAt(rs);
1266 lwr(rd, MemOperand(at, kMipsLwrOffset));
1267 lwl(rd, MemOperand(at, kMipsLwlOffset));
1268 }
1269 }
1270 }
1271
Ulwu(Register rd,const MemOperand & rs)1272 void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
1273 if (kArchVariant == kMips64r6) {
1274 lwu(rd, rs);
1275 } else {
1276 DCHECK(kArchVariant == kMips64r2);
1277 Ulw(rd, rs);
1278 Dext(rd, rd, 0, 32);
1279 }
1280 }
1281
1282
Usw(Register rd,const MemOperand & rs)1283 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1284 DCHECK(!rd.is(at));
1285 DCHECK(!rs.rm().is(at));
1286 if (kArchVariant == kMips64r6) {
1287 sw(rd, rs);
1288 } else {
1289 DCHECK(kArchVariant == kMips64r2);
1290 if (is_int16(rs.offset() + kMipsSwrOffset) &&
1291 is_int16(rs.offset() + kMipsSwlOffset)) {
1292 swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
1293 swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
1294 } else {
1295 LoadRegPlusOffsetToAt(rs);
1296 swr(rd, MemOperand(at, kMipsSwrOffset));
1297 swl(rd, MemOperand(at, kMipsSwlOffset));
1298 }
1299 }
1300 }
1301
Ulh(Register rd,const MemOperand & rs)1302 void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
1303 DCHECK(!rd.is(at));
1304 DCHECK(!rs.rm().is(at));
1305 if (kArchVariant == kMips64r6) {
1306 lh(rd, rs);
1307 } else {
1308 DCHECK(kArchVariant == kMips64r2);
1309 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1310 #if defined(V8_TARGET_LITTLE_ENDIAN)
1311 lbu(at, rs);
1312 lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
1313 #elif defined(V8_TARGET_BIG_ENDIAN)
1314 lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1315 lb(rd, rs);
1316 #endif
1317 } else { // Offset > 16 bits, use multiple instructions to load.
1318 LoadRegPlusOffsetToAt(rs);
1319 #if defined(V8_TARGET_LITTLE_ENDIAN)
1320 lb(rd, MemOperand(at, 1));
1321 lbu(at, MemOperand(at, 0));
1322 #elif defined(V8_TARGET_BIG_ENDIAN)
1323 lb(rd, MemOperand(at, 0));
1324 lbu(at, MemOperand(at, 1));
1325 #endif
1326 }
1327 dsll(rd, rd, 8);
1328 or_(rd, rd, at);
1329 }
1330 }
1331
Ulhu(Register rd,const MemOperand & rs)1332 void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
1333 DCHECK(!rd.is(at));
1334 DCHECK(!rs.rm().is(at));
1335 if (kArchVariant == kMips64r6) {
1336 lhu(rd, rs);
1337 } else {
1338 DCHECK(kArchVariant == kMips64r2);
1339 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1340 #if defined(V8_TARGET_LITTLE_ENDIAN)
1341 lbu(at, rs);
1342 lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
1343 #elif defined(V8_TARGET_BIG_ENDIAN)
1344 lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1345 lbu(rd, rs);
1346 #endif
1347 } else { // Offset > 16 bits, use multiple instructions to load.
1348 LoadRegPlusOffsetToAt(rs);
1349 #if defined(V8_TARGET_LITTLE_ENDIAN)
1350 lbu(rd, MemOperand(at, 1));
1351 lbu(at, MemOperand(at, 0));
1352 #elif defined(V8_TARGET_BIG_ENDIAN)
1353 lbu(rd, MemOperand(at, 0));
1354 lbu(at, MemOperand(at, 1));
1355 #endif
1356 }
1357 dsll(rd, rd, 8);
1358 or_(rd, rd, at);
1359 }
1360 }
1361
Ush(Register rd,const MemOperand & rs,Register scratch)1362 void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
1363 DCHECK(!rd.is(at));
1364 DCHECK(!rs.rm().is(at));
1365 DCHECK(!rs.rm().is(scratch));
1366 DCHECK(!scratch.is(at));
1367 if (kArchVariant == kMips64r6) {
1368 sh(rd, rs);
1369 } else {
1370 DCHECK(kArchVariant == kMips64r2);
1371 MemOperand source = rs;
1372 // If offset > 16 bits, load address to at with offset 0.
1373 if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
1374 LoadRegPlusOffsetToAt(rs);
1375 source = MemOperand(at, 0);
1376 }
1377
1378 if (!scratch.is(rd)) {
1379 mov(scratch, rd);
1380 }
1381
1382 #if defined(V8_TARGET_LITTLE_ENDIAN)
1383 sb(scratch, source);
1384 srl(scratch, scratch, 8);
1385 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1386 #elif defined(V8_TARGET_BIG_ENDIAN)
1387 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1388 srl(scratch, scratch, 8);
1389 sb(scratch, source);
1390 #endif
1391 }
1392 }
1393
Uld(Register rd,const MemOperand & rs)1394 void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
1395 DCHECK(!rd.is(at));
1396 DCHECK(!rs.rm().is(at));
1397 if (kArchVariant == kMips64r6) {
1398 ld(rd, rs);
1399 } else {
1400 DCHECK(kArchVariant == kMips64r2);
1401 if (is_int16(rs.offset() + kMipsLdrOffset) &&
1402 is_int16(rs.offset() + kMipsLdlOffset)) {
1403 if (!rd.is(rs.rm())) {
1404 ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
1405 ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
1406 } else {
1407 ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
1408 ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
1409 mov(rd, at);
1410 }
1411 } else { // Offset > 16 bits, use multiple instructions to load.
1412 LoadRegPlusOffsetToAt(rs);
1413 ldr(rd, MemOperand(at, kMipsLdrOffset));
1414 ldl(rd, MemOperand(at, kMipsLdlOffset));
1415 }
1416 }
1417 }
1418
1419
1420 // Load consequent 32-bit word pair in 64-bit reg. and put first word in low
1421 // bits,
1422 // second word in high bits.
LoadWordPair(Register rd,const MemOperand & rs,Register scratch)1423 void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
1424 Register scratch) {
1425 lwu(rd, rs);
1426 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1427 dsll32(scratch, scratch, 0);
1428 Daddu(rd, rd, scratch);
1429 }
1430
Usd(Register rd,const MemOperand & rs)1431 void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
1432 DCHECK(!rd.is(at));
1433 DCHECK(!rs.rm().is(at));
1434 if (kArchVariant == kMips64r6) {
1435 sd(rd, rs);
1436 } else {
1437 DCHECK(kArchVariant == kMips64r2);
1438 if (is_int16(rs.offset() + kMipsSdrOffset) &&
1439 is_int16(rs.offset() + kMipsSdlOffset)) {
1440 sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset));
1441 sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset));
1442 } else {
1443 LoadRegPlusOffsetToAt(rs);
1444 sdr(rd, MemOperand(at, kMipsSdrOffset));
1445 sdl(rd, MemOperand(at, kMipsSdlOffset));
1446 }
1447 }
1448 }
1449
1450
1451 // Do 64-bit store as two consequent 32-bit stores to unaligned address.
StoreWordPair(Register rd,const MemOperand & rs,Register scratch)1452 void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
1453 Register scratch) {
1454 sw(rd, rs);
1455 dsrl32(scratch, rd, 0);
1456 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1457 }
1458
Ulwc1(FPURegister fd,const MemOperand & rs,Register scratch)1459 void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
1460 Register scratch) {
1461 if (kArchVariant == kMips64r6) {
1462 lwc1(fd, rs);
1463 } else {
1464 DCHECK(kArchVariant == kMips64r2);
1465 Ulw(scratch, rs);
1466 mtc1(scratch, fd);
1467 }
1468 }
1469
Uswc1(FPURegister fd,const MemOperand & rs,Register scratch)1470 void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
1471 Register scratch) {
1472 if (kArchVariant == kMips64r6) {
1473 swc1(fd, rs);
1474 } else {
1475 DCHECK(kArchVariant == kMips64r2);
1476 mfc1(scratch, fd);
1477 Usw(scratch, rs);
1478 }
1479 }
1480
Uldc1(FPURegister fd,const MemOperand & rs,Register scratch)1481 void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
1482 Register scratch) {
1483 DCHECK(!scratch.is(at));
1484 if (kArchVariant == kMips64r6) {
1485 ldc1(fd, rs);
1486 } else {
1487 DCHECK(kArchVariant == kMips64r2);
1488 Uld(scratch, rs);
1489 dmtc1(scratch, fd);
1490 }
1491 }
1492
Usdc1(FPURegister fd,const MemOperand & rs,Register scratch)1493 void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
1494 Register scratch) {
1495 DCHECK(!scratch.is(at));
1496 if (kArchVariant == kMips64r6) {
1497 sdc1(fd, rs);
1498 } else {
1499 DCHECK(kArchVariant == kMips64r2);
1500 dmfc1(scratch, fd);
1501 Usd(scratch, rs);
1502 }
1503 }
1504
li(Register dst,Handle<Object> value,LiFlags mode)1505 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1506 li(dst, Operand(value), mode);
1507 }
1508
ShiftAndFixSignExtension(int64_t imm,int bitnum)1509 static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) {
1510 if ((imm >> (bitnum - 1)) & 0x1) {
1511 imm = (imm >> bitnum) + 1;
1512 } else {
1513 imm = imm >> bitnum;
1514 }
1515 return imm;
1516 }
1517
LiLower32BitHelper(Register rd,Operand j)1518 bool MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
1519 bool higher_bits_sign_extended = false;
1520 if (is_int16(j.imm64_)) {
1521 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1522 } else if (!(j.imm64_ & kHiMask)) {
1523 ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
1524 } else if (!(j.imm64_ & kImm16Mask)) {
1525 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1526 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
1527 higher_bits_sign_extended = true;
1528 }
1529 } else {
1530 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1531 ori(rd, rd, (j.imm64_ & kImm16Mask));
1532 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
1533 higher_bits_sign_extended = true;
1534 }
1535 }
1536 return higher_bits_sign_extended;
1537 }
1538
li(Register rd,Operand j,LiFlags mode)1539 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1540 DCHECK(!j.is_reg());
1541 BlockTrampolinePoolScope block_trampoline_pool(this);
1542 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1543 // Normal load of an immediate value which does not need Relocation Info.
1544 if (is_int32(j.imm64_)) {
1545 LiLower32BitHelper(rd, j);
1546 } else {
1547 if (kArchVariant == kMips64r6) {
1548 int64_t imm = j.imm64_;
1549 bool higher_bits_sign_extended = LiLower32BitHelper(rd, j);
1550 imm = ShiftAndFixSignExtension(imm, 32);
1551 // If LUI writes 1s to higher bits, we need both DAHI/DATI.
1552 if ((imm & kImm16Mask) ||
1553 (higher_bits_sign_extended && (j.imm64_ > 0))) {
1554 dahi(rd, imm & kImm16Mask);
1555 }
1556 imm = ShiftAndFixSignExtension(imm, 16);
1557 if ((!is_int48(j.imm64_) && (imm & kImm16Mask)) ||
1558 (higher_bits_sign_extended && (j.imm64_ > 0))) {
1559 dati(rd, imm & kImm16Mask);
1560 }
1561 } else {
1562 if (is_int48(j.imm64_)) {
1563 if ((j.imm64_ >> 32) & kImm16Mask) {
1564 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1565 if ((j.imm64_ >> 16) & kImm16Mask) {
1566 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1567 }
1568 } else {
1569 ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
1570 }
1571 dsll(rd, rd, 16);
1572 if (j.imm64_ & kImm16Mask) {
1573 ori(rd, rd, j.imm64_ & kImm16Mask);
1574 }
1575 } else {
1576 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1577 if ((j.imm64_ >> 32) & kImm16Mask) {
1578 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1579 }
1580 if ((j.imm64_ >> 16) & kImm16Mask) {
1581 dsll(rd, rd, 16);
1582 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1583 if (j.imm64_ & kImm16Mask) {
1584 dsll(rd, rd, 16);
1585 ori(rd, rd, j.imm64_ & kImm16Mask);
1586 } else {
1587 dsll(rd, rd, 16);
1588 }
1589 } else {
1590 if (j.imm64_ & kImm16Mask) {
1591 dsll32(rd, rd, 0);
1592 ori(rd, rd, j.imm64_ & kImm16Mask);
1593 } else {
1594 dsll32(rd, rd, 0);
1595 }
1596 }
1597 }
1598 }
1599 }
1600 } else if (MustUseReg(j.rmode_)) {
1601 RecordRelocInfo(j.rmode_, j.imm64_);
1602 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1603 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1604 dsll(rd, rd, 16);
1605 ori(rd, rd, j.imm64_ & kImm16Mask);
1606 } else if (mode == ADDRESS_LOAD) {
1607 // We always need the same number of instructions as we may need to patch
1608 // this code to load another value which may need all 4 instructions.
1609 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1610 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1611 dsll(rd, rd, 16);
1612 ori(rd, rd, j.imm64_ & kImm16Mask);
1613 } else {
1614 if (kArchVariant == kMips64r6) {
1615 int64_t imm = j.imm64_;
1616 lui(rd, (imm >> kLuiShift) & kImm16Mask);
1617 if (imm & kImm16Mask) {
1618 ori(rd, rd, (imm & kImm16Mask));
1619 }
1620 if ((imm >> 31) & 0x1) {
1621 imm = (imm >> 32) + 1;
1622 } else {
1623 imm = imm >> 32;
1624 }
1625 dahi(rd, imm & kImm16Mask);
1626 if ((imm >> 15) & 0x1) {
1627 imm = (imm >> 16) + 1;
1628 } else {
1629 imm = imm >> 16;
1630 }
1631 dati(rd, imm & kImm16Mask);
1632 } else {
1633 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1634 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1635 dsll(rd, rd, 16);
1636 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1637 dsll(rd, rd, 16);
1638 ori(rd, rd, j.imm64_ & kImm16Mask);
1639 }
1640 }
1641 }
1642
1643
MultiPush(RegList regs)1644 void MacroAssembler::MultiPush(RegList regs) {
1645 int16_t num_to_push = NumberOfBitsSet(regs);
1646 int16_t stack_offset = num_to_push * kPointerSize;
1647
1648 Dsubu(sp, sp, Operand(stack_offset));
1649 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1650 if ((regs & (1 << i)) != 0) {
1651 stack_offset -= kPointerSize;
1652 sd(ToRegister(i), MemOperand(sp, stack_offset));
1653 }
1654 }
1655 }
1656
1657
MultiPushReversed(RegList regs)1658 void MacroAssembler::MultiPushReversed(RegList regs) {
1659 int16_t num_to_push = NumberOfBitsSet(regs);
1660 int16_t stack_offset = num_to_push * kPointerSize;
1661
1662 Dsubu(sp, sp, Operand(stack_offset));
1663 for (int16_t i = 0; i < kNumRegisters; i++) {
1664 if ((regs & (1 << i)) != 0) {
1665 stack_offset -= kPointerSize;
1666 sd(ToRegister(i), MemOperand(sp, stack_offset));
1667 }
1668 }
1669 }
1670
1671
MultiPop(RegList regs)1672 void MacroAssembler::MultiPop(RegList regs) {
1673 int16_t stack_offset = 0;
1674
1675 for (int16_t i = 0; i < kNumRegisters; i++) {
1676 if ((regs & (1 << i)) != 0) {
1677 ld(ToRegister(i), MemOperand(sp, stack_offset));
1678 stack_offset += kPointerSize;
1679 }
1680 }
1681 daddiu(sp, sp, stack_offset);
1682 }
1683
1684
MultiPopReversed(RegList regs)1685 void MacroAssembler::MultiPopReversed(RegList regs) {
1686 int16_t stack_offset = 0;
1687
1688 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1689 if ((regs & (1 << i)) != 0) {
1690 ld(ToRegister(i), MemOperand(sp, stack_offset));
1691 stack_offset += kPointerSize;
1692 }
1693 }
1694 daddiu(sp, sp, stack_offset);
1695 }
1696
1697
MultiPushFPU(RegList regs)1698 void MacroAssembler::MultiPushFPU(RegList regs) {
1699 int16_t num_to_push = NumberOfBitsSet(regs);
1700 int16_t stack_offset = num_to_push * kDoubleSize;
1701
1702 Dsubu(sp, sp, Operand(stack_offset));
1703 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1704 if ((regs & (1 << i)) != 0) {
1705 stack_offset -= kDoubleSize;
1706 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1707 }
1708 }
1709 }
1710
1711
MultiPushReversedFPU(RegList regs)1712 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1713 int16_t num_to_push = NumberOfBitsSet(regs);
1714 int16_t stack_offset = num_to_push * kDoubleSize;
1715
1716 Dsubu(sp, sp, Operand(stack_offset));
1717 for (int16_t i = 0; i < kNumRegisters; i++) {
1718 if ((regs & (1 << i)) != 0) {
1719 stack_offset -= kDoubleSize;
1720 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1721 }
1722 }
1723 }
1724
1725
MultiPopFPU(RegList regs)1726 void MacroAssembler::MultiPopFPU(RegList regs) {
1727 int16_t stack_offset = 0;
1728
1729 for (int16_t i = 0; i < kNumRegisters; i++) {
1730 if ((regs & (1 << i)) != 0) {
1731 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1732 stack_offset += kDoubleSize;
1733 }
1734 }
1735 daddiu(sp, sp, stack_offset);
1736 }
1737
1738
MultiPopReversedFPU(RegList regs)1739 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1740 int16_t stack_offset = 0;
1741
1742 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1743 if ((regs & (1 << i)) != 0) {
1744 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1745 stack_offset += kDoubleSize;
1746 }
1747 }
1748 daddiu(sp, sp, stack_offset);
1749 }
1750
1751
Ext(Register rt,Register rs,uint16_t pos,uint16_t size)1752 void MacroAssembler::Ext(Register rt,
1753 Register rs,
1754 uint16_t pos,
1755 uint16_t size) {
1756 DCHECK(pos < 32);
1757 DCHECK(pos + size < 33);
1758 ext_(rt, rs, pos, size);
1759 }
1760
ExtractBits(Register rt,Register rs,uint16_t pos,uint16_t size)1761 void MacroAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
1762 uint16_t size) {
1763 DCHECK(pos < 64);
1764 DCHECK(size > 0 && size <= 64);
1765 DCHECK(pos + size <= 64);
1766 if (pos < 32) {
1767 if (size <= 32) {
1768 Dext(rt, rs, pos, size);
1769 } else {
1770 Dextm(rt, rs, pos, size);
1771 }
1772 } else if (pos < 64) {
1773 DCHECK(size <= 32);
1774 Dextu(rt, rs, pos, size);
1775 }
1776 }
1777
Dext(Register rt,Register rs,uint16_t pos,uint16_t size)1778 void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
1779 uint16_t size) {
1780 DCHECK(pos < 32);
1781 DCHECK(size > 0 && size <= 32);
1782 dext_(rt, rs, pos, size);
1783 }
1784
1785
Dextm(Register rt,Register rs,uint16_t pos,uint16_t size)1786 void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
1787 uint16_t size) {
1788 DCHECK(pos < 32);
1789 DCHECK(size > 32 && size <= 64);
1790 DCHECK((pos + size) > 32 && (pos + size) <= 64);
1791 dextm(rt, rs, pos, size);
1792 }
1793
1794
Dextu(Register rt,Register rs,uint16_t pos,uint16_t size)1795 void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
1796 uint16_t size) {
1797 DCHECK(pos >= 32 && pos < 64);
1798 DCHECK(size > 0 && size <= 32);
1799 DCHECK((pos + size) > 32 && (pos + size) <= 64);
1800 dextu(rt, rs, pos, size);
1801 }
1802
1803
Dins(Register rt,Register rs,uint16_t pos,uint16_t size)1804 void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
1805 uint16_t size) {
1806 DCHECK(pos < 32);
1807 DCHECK(pos + size <= 32);
1808 DCHECK(size != 0);
1809 dins_(rt, rs, pos, size);
1810 }
1811
1812
Ins(Register rt,Register rs,uint16_t pos,uint16_t size)1813 void MacroAssembler::Ins(Register rt,
1814 Register rs,
1815 uint16_t pos,
1816 uint16_t size) {
1817 DCHECK(pos < 32);
1818 DCHECK(pos + size <= 32);
1819 DCHECK(size != 0);
1820 ins_(rt, rs, pos, size);
1821 }
1822
Neg_s(FPURegister fd,FPURegister fs)1823 void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
1824 if (kArchVariant == kMips64r6) {
1825 // r6 neg_s changes the sign for NaN-like operands as well.
1826 neg_s(fd, fs);
1827 } else {
1828 DCHECK(kArchVariant == kMips64r2);
1829 Label is_nan, done;
1830 Register scratch1 = t8;
1831 Register scratch2 = t9;
1832 BranchF32(nullptr, &is_nan, eq, fs, fs);
1833 Branch(USE_DELAY_SLOT, &done);
1834 // For NaN input, neg_s will return the same NaN value,
1835 // while the sign has to be changed separately.
1836 neg_s(fd, fs); // In delay slot.
1837 bind(&is_nan);
1838 mfc1(scratch1, fs);
1839 And(scratch2, scratch1, Operand(~kBinary32SignMask));
1840 And(scratch1, scratch1, Operand(kBinary32SignMask));
1841 Xor(scratch1, scratch1, Operand(kBinary32SignMask));
1842 Or(scratch2, scratch2, scratch1);
1843 mtc1(scratch2, fd);
1844 bind(&done);
1845 }
1846 }
1847
Neg_d(FPURegister fd,FPURegister fs)1848 void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
1849 if (kArchVariant == kMips64r6) {
1850 // r6 neg_d changes the sign for NaN-like operands as well.
1851 neg_d(fd, fs);
1852 } else {
1853 DCHECK(kArchVariant == kMips64r2);
1854 Label is_nan, done;
1855 Register scratch1 = t8;
1856 Register scratch2 = t9;
1857 BranchF64(nullptr, &is_nan, eq, fs, fs);
1858 Branch(USE_DELAY_SLOT, &done);
1859 // For NaN input, neg_d will return the same NaN value,
1860 // while the sign has to be changed separately.
1861 neg_d(fd, fs); // In delay slot.
1862 bind(&is_nan);
1863 dmfc1(scratch1, fs);
1864 And(scratch2, scratch1, Operand(~Double::kSignMask));
1865 And(scratch1, scratch1, Operand(Double::kSignMask));
1866 Xor(scratch1, scratch1, Operand(Double::kSignMask));
1867 Or(scratch2, scratch2, scratch1);
1868 dmtc1(scratch2, fd);
1869 bind(&done);
1870 }
1871 }
1872
Cvt_d_uw(FPURegister fd,FPURegister fs)1873 void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
1874 // Move the data from fs to t8.
1875 mfc1(t8, fs);
1876 Cvt_d_uw(fd, t8);
1877 }
1878
1879
Cvt_d_uw(FPURegister fd,Register rs)1880 void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
1881 // Convert rs to a FP value in fd.
1882 DCHECK(!rs.is(t9));
1883 DCHECK(!rs.is(at));
1884
1885 // Zero extend int32 in rs.
1886 Dext(t9, rs, 0, 32);
1887 dmtc1(t9, fd);
1888 cvt_d_l(fd, fd);
1889 }
1890
1891
Cvt_d_ul(FPURegister fd,FPURegister fs)1892 void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
1893 // Move the data from fs to t8.
1894 dmfc1(t8, fs);
1895 Cvt_d_ul(fd, t8);
1896 }
1897
1898
Cvt_d_ul(FPURegister fd,Register rs)1899 void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
1900 // Convert rs to a FP value in fd.
1901
1902 DCHECK(!rs.is(t9));
1903 DCHECK(!rs.is(at));
1904
1905 Label msb_clear, conversion_done;
1906
1907 Branch(&msb_clear, ge, rs, Operand(zero_reg));
1908
1909 // Rs >= 2^63
1910 andi(t9, rs, 1);
1911 dsrl(rs, rs, 1);
1912 or_(t9, t9, rs);
1913 dmtc1(t9, fd);
1914 cvt_d_l(fd, fd);
1915 Branch(USE_DELAY_SLOT, &conversion_done);
1916 add_d(fd, fd, fd); // In delay slot.
1917
1918 bind(&msb_clear);
1919 // Rs < 2^63, we can do simple conversion.
1920 dmtc1(rs, fd);
1921 cvt_d_l(fd, fd);
1922
1923 bind(&conversion_done);
1924 }
1925
Cvt_s_uw(FPURegister fd,FPURegister fs)1926 void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
1927 // Move the data from fs to t8.
1928 mfc1(t8, fs);
1929 Cvt_s_uw(fd, t8);
1930 }
1931
Cvt_s_uw(FPURegister fd,Register rs)1932 void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
1933 // Convert rs to a FP value in fd.
1934 DCHECK(!rs.is(t9));
1935 DCHECK(!rs.is(at));
1936
1937 // Zero extend int32 in rs.
1938 Dext(t9, rs, 0, 32);
1939 dmtc1(t9, fd);
1940 cvt_s_l(fd, fd);
1941 }
1942
Cvt_s_ul(FPURegister fd,FPURegister fs)1943 void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
1944 // Move the data from fs to t8.
1945 dmfc1(t8, fs);
1946 Cvt_s_ul(fd, t8);
1947 }
1948
1949
Cvt_s_ul(FPURegister fd,Register rs)1950 void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
1951 // Convert rs to a FP value in fd.
1952
1953 DCHECK(!rs.is(t9));
1954 DCHECK(!rs.is(at));
1955
1956 Label positive, conversion_done;
1957
1958 Branch(&positive, ge, rs, Operand(zero_reg));
1959
1960 // Rs >= 2^31.
1961 andi(t9, rs, 1);
1962 dsrl(rs, rs, 1);
1963 or_(t9, t9, rs);
1964 dmtc1(t9, fd);
1965 cvt_s_l(fd, fd);
1966 Branch(USE_DELAY_SLOT, &conversion_done);
1967 add_s(fd, fd, fd); // In delay slot.
1968
1969 bind(&positive);
1970 // Rs < 2^31, we can do simple conversion.
1971 dmtc1(rs, fd);
1972 cvt_s_l(fd, fd);
1973
1974 bind(&conversion_done);
1975 }
1976
1977
Round_l_d(FPURegister fd,FPURegister fs)1978 void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
1979 round_l_d(fd, fs);
1980 }
1981
1982
Floor_l_d(FPURegister fd,FPURegister fs)1983 void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
1984 floor_l_d(fd, fs);
1985 }
1986
1987
Ceil_l_d(FPURegister fd,FPURegister fs)1988 void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
1989 ceil_l_d(fd, fs);
1990 }
1991
1992
Trunc_l_d(FPURegister fd,FPURegister fs)1993 void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
1994 trunc_l_d(fd, fs);
1995 }
1996
1997
Trunc_l_ud(FPURegister fd,FPURegister fs,FPURegister scratch)1998 void MacroAssembler::Trunc_l_ud(FPURegister fd,
1999 FPURegister fs,
2000 FPURegister scratch) {
2001 // Load to GPR.
2002 dmfc1(t8, fs);
2003 // Reset sign bit.
2004 li(at, 0x7fffffffffffffff);
2005 and_(t8, t8, at);
2006 dmtc1(t8, fs);
2007 trunc_l_d(fd, fs);
2008 }
2009
2010
Trunc_uw_d(FPURegister fd,FPURegister fs,FPURegister scratch)2011 void MacroAssembler::Trunc_uw_d(FPURegister fd,
2012 FPURegister fs,
2013 FPURegister scratch) {
2014 Trunc_uw_d(fs, t8, scratch);
2015 mtc1(t8, fd);
2016 }
2017
Trunc_uw_s(FPURegister fd,FPURegister fs,FPURegister scratch)2018 void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
2019 FPURegister scratch) {
2020 Trunc_uw_s(fs, t8, scratch);
2021 mtc1(t8, fd);
2022 }
2023
Trunc_ul_d(FPURegister fd,FPURegister fs,FPURegister scratch,Register result)2024 void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
2025 FPURegister scratch, Register result) {
2026 Trunc_ul_d(fs, t8, scratch, result);
2027 dmtc1(t8, fd);
2028 }
2029
2030
Trunc_ul_s(FPURegister fd,FPURegister fs,FPURegister scratch,Register result)2031 void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
2032 FPURegister scratch, Register result) {
2033 Trunc_ul_s(fs, t8, scratch, result);
2034 dmtc1(t8, fd);
2035 }
2036
2037
Trunc_w_d(FPURegister fd,FPURegister fs)2038 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
2039 trunc_w_d(fd, fs);
2040 }
2041
2042
Round_w_d(FPURegister fd,FPURegister fs)2043 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
2044 round_w_d(fd, fs);
2045 }
2046
2047
Floor_w_d(FPURegister fd,FPURegister fs)2048 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
2049 floor_w_d(fd, fs);
2050 }
2051
2052
Ceil_w_d(FPURegister fd,FPURegister fs)2053 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
2054 ceil_w_d(fd, fs);
2055 }
2056
2057
Trunc_uw_d(FPURegister fd,Register rs,FPURegister scratch)2058 void MacroAssembler::Trunc_uw_d(FPURegister fd,
2059 Register rs,
2060 FPURegister scratch) {
2061 DCHECK(!fd.is(scratch));
2062 DCHECK(!rs.is(at));
2063
2064 // Load 2^31 into scratch as its float representation.
2065 li(at, 0x41E00000);
2066 mtc1(zero_reg, scratch);
2067 mthc1(at, scratch);
2068 // Test if scratch > fd.
2069 // If fd < 2^31 we can convert it normally.
2070 Label simple_convert;
2071 BranchF(&simple_convert, NULL, lt, fd, scratch);
2072
2073 // First we subtract 2^31 from fd, then trunc it to rs
2074 // and add 2^31 to rs.
2075 sub_d(scratch, fd, scratch);
2076 trunc_w_d(scratch, scratch);
2077 mfc1(rs, scratch);
2078 Or(rs, rs, 1 << 31);
2079
2080 Label done;
2081 Branch(&done);
2082 // Simple conversion.
2083 bind(&simple_convert);
2084 trunc_w_d(scratch, fd);
2085 mfc1(rs, scratch);
2086
2087 bind(&done);
2088 }
2089
Trunc_uw_s(FPURegister fd,Register rs,FPURegister scratch)2090 void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
2091 FPURegister scratch) {
2092 DCHECK(!fd.is(scratch));
2093 DCHECK(!rs.is(at));
2094
2095 // Load 2^31 into scratch as its float representation.
2096 li(at, 0x4F000000);
2097 mtc1(at, scratch);
2098 // Test if scratch > fd.
2099 // If fd < 2^31 we can convert it normally.
2100 Label simple_convert;
2101 BranchF32(&simple_convert, NULL, lt, fd, scratch);
2102
2103 // First we subtract 2^31 from fd, then trunc it to rs
2104 // and add 2^31 to rs.
2105 sub_s(scratch, fd, scratch);
2106 trunc_w_s(scratch, scratch);
2107 mfc1(rs, scratch);
2108 Or(rs, rs, 1 << 31);
2109
2110 Label done;
2111 Branch(&done);
2112 // Simple conversion.
2113 bind(&simple_convert);
2114 trunc_w_s(scratch, fd);
2115 mfc1(rs, scratch);
2116
2117 bind(&done);
2118 }
2119
Trunc_ul_d(FPURegister fd,Register rs,FPURegister scratch,Register result)2120 void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
2121 FPURegister scratch, Register result) {
2122 DCHECK(!fd.is(scratch));
2123 DCHECK(!AreAliased(rs, result, at));
2124
2125 Label simple_convert, done, fail;
2126 if (result.is_valid()) {
2127 mov(result, zero_reg);
2128 Move(scratch, -1.0);
2129 // If fd =< -1 or unordered, then the conversion fails.
2130 BranchF(&fail, &fail, le, fd, scratch);
2131 }
2132
2133 // Load 2^63 into scratch as its double representation.
2134 li(at, 0x43e0000000000000);
2135 dmtc1(at, scratch);
2136
2137 // Test if scratch > fd.
2138 // If fd < 2^63 we can convert it normally.
2139 BranchF(&simple_convert, nullptr, lt, fd, scratch);
2140
2141 // First we subtract 2^63 from fd, then trunc it to rs
2142 // and add 2^63 to rs.
2143 sub_d(scratch, fd, scratch);
2144 trunc_l_d(scratch, scratch);
2145 dmfc1(rs, scratch);
2146 Or(rs, rs, Operand(1UL << 63));
2147 Branch(&done);
2148
2149 // Simple conversion.
2150 bind(&simple_convert);
2151 trunc_l_d(scratch, fd);
2152 dmfc1(rs, scratch);
2153
2154 bind(&done);
2155 if (result.is_valid()) {
2156 // Conversion is failed if the result is negative.
2157 addiu(at, zero_reg, -1);
2158 dsrl(at, at, 1); // Load 2^62.
2159 dmfc1(result, scratch);
2160 xor_(result, result, at);
2161 Slt(result, zero_reg, result);
2162 }
2163
2164 bind(&fail);
2165 }
2166
2167
Trunc_ul_s(FPURegister fd,Register rs,FPURegister scratch,Register result)2168 void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
2169 FPURegister scratch, Register result) {
2170 DCHECK(!fd.is(scratch));
2171 DCHECK(!AreAliased(rs, result, at));
2172
2173 Label simple_convert, done, fail;
2174 if (result.is_valid()) {
2175 mov(result, zero_reg);
2176 Move(scratch, -1.0f);
2177 // If fd =< -1 or unordered, then the conversion fails.
2178 BranchF32(&fail, &fail, le, fd, scratch);
2179 }
2180
2181 // Load 2^63 into scratch as its float representation.
2182 li(at, 0x5f000000);
2183 mtc1(at, scratch);
2184
2185 // Test if scratch > fd.
2186 // If fd < 2^63 we can convert it normally.
2187 BranchF32(&simple_convert, nullptr, lt, fd, scratch);
2188
2189 // First we subtract 2^63 from fd, then trunc it to rs
2190 // and add 2^63 to rs.
2191 sub_s(scratch, fd, scratch);
2192 trunc_l_s(scratch, scratch);
2193 dmfc1(rs, scratch);
2194 Or(rs, rs, Operand(1UL << 63));
2195 Branch(&done);
2196
2197 // Simple conversion.
2198 bind(&simple_convert);
2199 trunc_l_s(scratch, fd);
2200 dmfc1(rs, scratch);
2201
2202 bind(&done);
2203 if (result.is_valid()) {
2204 // Conversion is failed if the result is negative or unordered.
2205 addiu(at, zero_reg, -1);
2206 dsrl(at, at, 1); // Load 2^62.
2207 dmfc1(result, scratch);
2208 xor_(result, result, at);
2209 Slt(result, zero_reg, result);
2210 }
2211
2212 bind(&fail);
2213 }
2214
2215
Madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft,FPURegister scratch)2216 void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2217 FPURegister ft, FPURegister scratch) {
2218 if (0) { // TODO(plind): find reasonable arch-variant symbol names.
2219 madd_d(fd, fr, fs, ft);
2220 } else {
2221 // Can not change source regs's value.
2222 DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
2223 mul_d(scratch, fs, ft);
2224 add_d(fd, fr, scratch);
2225 }
2226 }
2227
2228
BranchFCommon(SecondaryField sizeField,Label * target,Label * nan,Condition cond,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)2229 void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
2230 Label* nan, Condition cond, FPURegister cmp1,
2231 FPURegister cmp2, BranchDelaySlot bd) {
2232 BlockTrampolinePoolScope block_trampoline_pool(this);
2233 if (cond == al) {
2234 Branch(bd, target);
2235 return;
2236 }
2237
2238 if (kArchVariant == kMips64r6) {
2239 sizeField = sizeField == D ? L : W;
2240 }
2241
2242 DCHECK(nan || target);
2243 // Check for unordered (NaN) cases.
2244 if (nan) {
2245 bool long_branch =
2246 nan->is_bound() ? !is_near(nan) : is_trampoline_emitted();
2247 if (kArchVariant != kMips64r6) {
2248 if (long_branch) {
2249 Label skip;
2250 c(UN, sizeField, cmp1, cmp2);
2251 bc1f(&skip);
2252 nop();
2253 BranchLong(nan, bd);
2254 bind(&skip);
2255 } else {
2256 c(UN, sizeField, cmp1, cmp2);
2257 bc1t(nan);
2258 if (bd == PROTECT) {
2259 nop();
2260 }
2261 }
2262 } else {
2263 // Use kDoubleCompareReg for comparison result. It has to be unavailable
2264 // to lithium
2265 // register allocator.
2266 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2267 if (long_branch) {
2268 Label skip;
2269 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2270 bc1eqz(&skip, kDoubleCompareReg);
2271 nop();
2272 BranchLong(nan, bd);
2273 bind(&skip);
2274 } else {
2275 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2276 bc1nez(nan, kDoubleCompareReg);
2277 if (bd == PROTECT) {
2278 nop();
2279 }
2280 }
2281 }
2282 }
2283
2284 if (target) {
2285 bool long_branch =
2286 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2287 if (long_branch) {
2288 Label skip;
2289 Condition neg_cond = NegateFpuCondition(cond);
2290 BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
2291 BranchLong(target, bd);
2292 bind(&skip);
2293 } else {
2294 BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
2295 }
2296 }
2297 }
2298
2299
BranchShortF(SecondaryField sizeField,Label * target,Condition cc,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)2300 void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
2301 Condition cc, FPURegister cmp1,
2302 FPURegister cmp2, BranchDelaySlot bd) {
2303 if (kArchVariant != kMips64r6) {
2304 BlockTrampolinePoolScope block_trampoline_pool(this);
2305 if (target) {
2306 // Here NaN cases were either handled by this function or are assumed to
2307 // have been handled by the caller.
2308 switch (cc) {
2309 case lt:
2310 c(OLT, sizeField, cmp1, cmp2);
2311 bc1t(target);
2312 break;
2313 case ult:
2314 c(ULT, sizeField, cmp1, cmp2);
2315 bc1t(target);
2316 break;
2317 case gt:
2318 c(ULE, sizeField, cmp1, cmp2);
2319 bc1f(target);
2320 break;
2321 case ugt:
2322 c(OLE, sizeField, cmp1, cmp2);
2323 bc1f(target);
2324 break;
2325 case ge:
2326 c(ULT, sizeField, cmp1, cmp2);
2327 bc1f(target);
2328 break;
2329 case uge:
2330 c(OLT, sizeField, cmp1, cmp2);
2331 bc1f(target);
2332 break;
2333 case le:
2334 c(OLE, sizeField, cmp1, cmp2);
2335 bc1t(target);
2336 break;
2337 case ule:
2338 c(ULE, sizeField, cmp1, cmp2);
2339 bc1t(target);
2340 break;
2341 case eq:
2342 c(EQ, sizeField, cmp1, cmp2);
2343 bc1t(target);
2344 break;
2345 case ueq:
2346 c(UEQ, sizeField, cmp1, cmp2);
2347 bc1t(target);
2348 break;
2349 case ne: // Unordered or not equal.
2350 c(EQ, sizeField, cmp1, cmp2);
2351 bc1f(target);
2352 break;
2353 case ogl:
2354 c(UEQ, sizeField, cmp1, cmp2);
2355 bc1f(target);
2356 break;
2357 default:
2358 CHECK(0);
2359 }
2360 }
2361 } else {
2362 BlockTrampolinePoolScope block_trampoline_pool(this);
2363 if (target) {
2364 // Here NaN cases were either handled by this function or are assumed to
2365 // have been handled by the caller.
2366 // Unsigned conditions are treated as their signed counterpart.
2367 // Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
2368 // 1) mode.
2369 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2370 switch (cc) {
2371 case lt:
2372 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2373 bc1nez(target, kDoubleCompareReg);
2374 break;
2375 case ult:
2376 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2377 bc1nez(target, kDoubleCompareReg);
2378 break;
2379 case gt:
2380 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2381 bc1eqz(target, kDoubleCompareReg);
2382 break;
2383 case ugt:
2384 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2385 bc1eqz(target, kDoubleCompareReg);
2386 break;
2387 case ge:
2388 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2389 bc1eqz(target, kDoubleCompareReg);
2390 break;
2391 case uge:
2392 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2393 bc1eqz(target, kDoubleCompareReg);
2394 break;
2395 case le:
2396 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2397 bc1nez(target, kDoubleCompareReg);
2398 break;
2399 case ule:
2400 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2401 bc1nez(target, kDoubleCompareReg);
2402 break;
2403 case eq:
2404 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2405 bc1nez(target, kDoubleCompareReg);
2406 break;
2407 case ueq:
2408 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2409 bc1nez(target, kDoubleCompareReg);
2410 break;
2411 case ne:
2412 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2413 bc1eqz(target, kDoubleCompareReg);
2414 break;
2415 case ogl:
2416 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2417 bc1eqz(target, kDoubleCompareReg);
2418 break;
2419 default:
2420 CHECK(0);
2421 }
2422 }
2423 }
2424
2425 if (bd == PROTECT) {
2426 nop();
2427 }
2428 }
2429
2430
FmoveLow(FPURegister dst,Register src_low)2431 void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
2432 DCHECK(!src_low.is(at));
2433 mfhc1(at, dst);
2434 mtc1(src_low, dst);
2435 mthc1(at, dst);
2436 }
2437
2438
Move(FPURegister dst,float imm)2439 void MacroAssembler::Move(FPURegister dst, float imm) {
2440 li(at, Operand(bit_cast<int32_t>(imm)));
2441 mtc1(at, dst);
2442 }
2443
2444
Move(FPURegister dst,double imm)2445 void MacroAssembler::Move(FPURegister dst, double imm) {
2446 int64_t imm_bits = bit_cast<int64_t>(imm);
2447 // Handle special values first.
2448 if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
2449 mov_d(dst, kDoubleRegZero);
2450 } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) {
2451 Neg_d(dst, kDoubleRegZero);
2452 } else {
2453 uint32_t lo, hi;
2454 DoubleAsTwoUInt32(imm, &lo, &hi);
2455 // Move the low part of the double into the lower bits of the corresponding
2456 // FPU register.
2457 if (lo != 0) {
2458 if (!(lo & kImm16Mask)) {
2459 lui(at, (lo >> kLuiShift) & kImm16Mask);
2460 mtc1(at, dst);
2461 } else if (!(lo & kHiMask)) {
2462 ori(at, zero_reg, lo & kImm16Mask);
2463 mtc1(at, dst);
2464 } else {
2465 lui(at, (lo >> kLuiShift) & kImm16Mask);
2466 ori(at, at, lo & kImm16Mask);
2467 mtc1(at, dst);
2468 }
2469 } else {
2470 mtc1(zero_reg, dst);
2471 }
2472 // Move the high part of the double into the high bits of the corresponding
2473 // FPU register.
2474 if (hi != 0) {
2475 if (!(hi & kImm16Mask)) {
2476 lui(at, (hi >> kLuiShift) & kImm16Mask);
2477 mthc1(at, dst);
2478 } else if (!(hi & kHiMask)) {
2479 ori(at, zero_reg, hi & kImm16Mask);
2480 mthc1(at, dst);
2481 } else {
2482 lui(at, (hi >> kLuiShift) & kImm16Mask);
2483 ori(at, at, hi & kImm16Mask);
2484 mthc1(at, dst);
2485 }
2486 } else {
2487 mthc1(zero_reg, dst);
2488 }
2489 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
2490 }
2491 }
2492
2493
Movz(Register rd,Register rs,Register rt)2494 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
2495 if (kArchVariant == kMips64r6) {
2496 Label done;
2497 Branch(&done, ne, rt, Operand(zero_reg));
2498 mov(rd, rs);
2499 bind(&done);
2500 } else {
2501 movz(rd, rs, rt);
2502 }
2503 }
2504
2505
Movn(Register rd,Register rs,Register rt)2506 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
2507 if (kArchVariant == kMips64r6) {
2508 Label done;
2509 Branch(&done, eq, rt, Operand(zero_reg));
2510 mov(rd, rs);
2511 bind(&done);
2512 } else {
2513 movn(rd, rs, rt);
2514 }
2515 }
2516
2517
Movt(Register rd,Register rs,uint16_t cc)2518 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
2519 movt(rd, rs, cc);
2520 }
2521
2522
Movf(Register rd,Register rs,uint16_t cc)2523 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
2524 movf(rd, rs, cc);
2525 }
2526
2527 #define __ masm->
2528
ZeroHelper_d(MacroAssembler * masm,MaxMinKind kind,FPURegister dst,FPURegister src1,FPURegister src2,Label * equal)2529 static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
2530 FPURegister src1, FPURegister src2, Label* equal) {
2531 if (src1.is(src2)) {
2532 __ Move(dst, src1);
2533 return true;
2534 }
2535
2536 Label other, compare_not_equal;
2537 FPURegister left, right;
2538 if (kind == MaxMinKind::kMin) {
2539 left = src1;
2540 right = src2;
2541 } else {
2542 left = src2;
2543 right = src1;
2544 }
2545
2546 __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
2547 // Left and right hand side are equal, check for -0 vs. +0.
2548 __ dmfc1(t8, src1);
2549 __ Branch(&other, eq, t8, Operand(0x8000000000000000));
2550 __ Move_d(dst, right);
2551 __ Branch(equal);
2552 __ bind(&other);
2553 __ Move_d(dst, left);
2554 __ Branch(equal);
2555 __ bind(&compare_not_equal);
2556 return false;
2557 }
2558
ZeroHelper_s(MacroAssembler * masm,MaxMinKind kind,FPURegister dst,FPURegister src1,FPURegister src2,Label * equal)2559 static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
2560 FPURegister src1, FPURegister src2, Label* equal) {
2561 if (src1.is(src2)) {
2562 __ Move(dst, src1);
2563 return true;
2564 }
2565
2566 Label other, compare_not_equal;
2567 FPURegister left, right;
2568 if (kind == MaxMinKind::kMin) {
2569 left = src1;
2570 right = src2;
2571 } else {
2572 left = src2;
2573 right = src1;
2574 }
2575
2576 __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
2577 // Left and right hand side are equal, check for -0 vs. +0.
2578 __ FmoveLow(t8, src1);
2579 __ dsll32(t8, t8, 0);
2580 __ Branch(&other, eq, t8, Operand(0x8000000000000000));
2581 __ Move_s(dst, right);
2582 __ Branch(equal);
2583 __ bind(&other);
2584 __ Move_s(dst, left);
2585 __ Branch(equal);
2586 __ bind(&compare_not_equal);
2587 return false;
2588 }
2589
2590 #undef __
2591
MinNaNCheck_d(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2592 void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
2593 FPURegister src2, Label* nan) {
2594 if (nan) {
2595 BranchF64(nullptr, nan, eq, src1, src2);
2596 }
2597 if (kArchVariant >= kMips64r6) {
2598 min_d(dst, src1, src2);
2599 } else {
2600 Label skip;
2601 if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
2602 if (dst.is(src1)) {
2603 BranchF64(&skip, nullptr, le, src1, src2);
2604 Move_d(dst, src2);
2605 } else if (dst.is(src2)) {
2606 BranchF64(&skip, nullptr, ge, src1, src2);
2607 Move_d(dst, src1);
2608 } else {
2609 Label right;
2610 BranchF64(&right, nullptr, gt, src1, src2);
2611 Move_d(dst, src1);
2612 Branch(&skip);
2613 bind(&right);
2614 Move_d(dst, src2);
2615 }
2616 }
2617 bind(&skip);
2618 }
2619 }
2620
MaxNaNCheck_d(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2621 void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
2622 FPURegister src2, Label* nan) {
2623 if (nan) {
2624 BranchF64(nullptr, nan, eq, src1, src2);
2625 }
2626 if (kArchVariant >= kMips64r6) {
2627 max_d(dst, src1, src2);
2628 } else {
2629 Label skip;
2630 if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
2631 if (dst.is(src1)) {
2632 BranchF64(&skip, nullptr, ge, src1, src2);
2633 Move_d(dst, src2);
2634 } else if (dst.is(src2)) {
2635 BranchF64(&skip, nullptr, le, src1, src2);
2636 Move_d(dst, src1);
2637 } else {
2638 Label right;
2639 BranchF64(&right, nullptr, lt, src1, src2);
2640 Move_d(dst, src1);
2641 Branch(&skip);
2642 bind(&right);
2643 Move_d(dst, src2);
2644 }
2645 }
2646 bind(&skip);
2647 }
2648 }
2649
MinNaNCheck_s(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2650 void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
2651 FPURegister src2, Label* nan) {
2652 if (nan) {
2653 BranchF32(nullptr, nan, eq, src1, src2);
2654 }
2655 if (kArchVariant >= kMips64r6) {
2656 min_s(dst, src1, src2);
2657 } else {
2658 Label skip;
2659 if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
2660 if (dst.is(src1)) {
2661 BranchF32(&skip, nullptr, le, src1, src2);
2662 Move_s(dst, src2);
2663 } else if (dst.is(src2)) {
2664 BranchF32(&skip, nullptr, ge, src1, src2);
2665 Move_s(dst, src1);
2666 } else {
2667 Label right;
2668 BranchF32(&right, nullptr, gt, src1, src2);
2669 Move_s(dst, src1);
2670 Branch(&skip);
2671 bind(&right);
2672 Move_s(dst, src2);
2673 }
2674 }
2675 bind(&skip);
2676 }
2677 }
2678
MaxNaNCheck_s(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2679 void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
2680 FPURegister src2, Label* nan) {
2681 if (nan) {
2682 BranchF32(nullptr, nan, eq, src1, src2);
2683 }
2684 if (kArchVariant >= kMips64r6) {
2685 max_s(dst, src1, src2);
2686 } else {
2687 Label skip;
2688 if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
2689 if (dst.is(src1)) {
2690 BranchF32(&skip, nullptr, ge, src1, src2);
2691 Move_s(dst, src2);
2692 } else if (dst.is(src2)) {
2693 BranchF32(&skip, nullptr, le, src1, src2);
2694 Move_s(dst, src1);
2695 } else {
2696 Label right;
2697 BranchF32(&right, nullptr, lt, src1, src2);
2698 Move_s(dst, src1);
2699 Branch(&skip);
2700 bind(&right);
2701 Move_s(dst, src2);
2702 }
2703 }
2704 bind(&skip);
2705 }
2706 }
2707
Clz(Register rd,Register rs)2708 void MacroAssembler::Clz(Register rd, Register rs) {
2709 clz(rd, rs);
2710 }
2711
2712
EmitFPUTruncate(FPURoundingMode rounding_mode,Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch,Register except_flag,CheckForInexactConversion check_inexact)2713 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
2714 Register result,
2715 DoubleRegister double_input,
2716 Register scratch,
2717 DoubleRegister double_scratch,
2718 Register except_flag,
2719 CheckForInexactConversion check_inexact) {
2720 DCHECK(!result.is(scratch));
2721 DCHECK(!double_input.is(double_scratch));
2722 DCHECK(!except_flag.is(scratch));
2723
2724 Label done;
2725
2726 // Clear the except flag (0 = no exception)
2727 mov(except_flag, zero_reg);
2728
2729 // Test for values that can be exactly represented as a signed 32-bit integer.
2730 cvt_w_d(double_scratch, double_input);
2731 mfc1(result, double_scratch);
2732 cvt_d_w(double_scratch, double_scratch);
2733 BranchF(&done, NULL, eq, double_input, double_scratch);
2734
2735 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
2736
2737 if (check_inexact == kDontCheckForInexactConversion) {
2738 // Ignore inexact exceptions.
2739 except_mask &= ~kFCSRInexactFlagMask;
2740 }
2741
2742 // Save FCSR.
2743 cfc1(scratch, FCSR);
2744 // Disable FPU exceptions.
2745 ctc1(zero_reg, FCSR);
2746
2747 // Do operation based on rounding mode.
2748 switch (rounding_mode) {
2749 case kRoundToNearest:
2750 Round_w_d(double_scratch, double_input);
2751 break;
2752 case kRoundToZero:
2753 Trunc_w_d(double_scratch, double_input);
2754 break;
2755 case kRoundToPlusInf:
2756 Ceil_w_d(double_scratch, double_input);
2757 break;
2758 case kRoundToMinusInf:
2759 Floor_w_d(double_scratch, double_input);
2760 break;
2761 } // End of switch-statement.
2762
2763 // Retrieve FCSR.
2764 cfc1(except_flag, FCSR);
2765 // Restore FCSR.
2766 ctc1(scratch, FCSR);
2767 // Move the converted value into the result register.
2768 mfc1(result, double_scratch);
2769
2770 // Check for fpu exceptions.
2771 And(except_flag, except_flag, Operand(except_mask));
2772
2773 bind(&done);
2774 }
2775
2776
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)2777 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2778 DoubleRegister double_input,
2779 Label* done) {
2780 DoubleRegister single_scratch = kLithiumScratchDouble.low();
2781 Register scratch = at;
2782 Register scratch2 = t9;
2783
2784 // Clear cumulative exception flags and save the FCSR.
2785 cfc1(scratch2, FCSR);
2786 ctc1(zero_reg, FCSR);
2787 // Try a conversion to a signed integer.
2788 trunc_w_d(single_scratch, double_input);
2789 mfc1(result, single_scratch);
2790 // Retrieve and restore the FCSR.
2791 cfc1(scratch, FCSR);
2792 ctc1(scratch2, FCSR);
2793 // Check for overflow and NaNs.
2794 And(scratch,
2795 scratch,
2796 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2797 // If we had no exceptions we are done.
2798 Branch(done, eq, scratch, Operand(zero_reg));
2799 }
2800
2801
TruncateDoubleToI(Register result,DoubleRegister double_input)2802 void MacroAssembler::TruncateDoubleToI(Register result,
2803 DoubleRegister double_input) {
2804 Label done;
2805
2806 TryInlineTruncateDoubleToI(result, double_input, &done);
2807
2808 // If we fell through then inline version didn't succeed - call stub instead.
2809 push(ra);
2810 Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2811 sdc1(double_input, MemOperand(sp, 0));
2812
2813 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2814 CallStub(&stub);
2815
2816 Daddu(sp, sp, Operand(kDoubleSize));
2817 pop(ra);
2818
2819 bind(&done);
2820 }
2821
2822
TruncateHeapNumberToI(Register result,Register object)2823 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2824 Label done;
2825 DoubleRegister double_scratch = f12;
2826 DCHECK(!result.is(object));
2827
2828 ldc1(double_scratch,
2829 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2830 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2831
2832 // If we fell through then inline version didn't succeed - call stub instead.
2833 push(ra);
2834 DoubleToIStub stub(isolate(),
2835 object,
2836 result,
2837 HeapNumber::kValueOffset - kHeapObjectTag,
2838 true,
2839 true);
2840 CallStub(&stub);
2841 pop(ra);
2842
2843 bind(&done);
2844 }
2845
2846
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch,Label * not_number)2847 void MacroAssembler::TruncateNumberToI(Register object,
2848 Register result,
2849 Register heap_number_map,
2850 Register scratch,
2851 Label* not_number) {
2852 Label done;
2853 DCHECK(!result.is(object));
2854
2855 UntagAndJumpIfSmi(result, object, &done);
2856 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
2857 TruncateHeapNumberToI(result, object);
2858
2859 bind(&done);
2860 }
2861
2862
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2863 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2864 Register src,
2865 int num_least_bits) {
2866 // Ext(dst, src, kSmiTagSize, num_least_bits);
2867 SmiUntag(dst, src);
2868 And(dst, dst, Operand((1 << num_least_bits) - 1));
2869 }
2870
2871
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2872 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2873 Register src,
2874 int num_least_bits) {
2875 DCHECK(!src.is(dst));
2876 And(dst, src, Operand((1 << num_least_bits) - 1));
2877 }
2878
2879
2880 // Emulated condtional branches do not emit a nop in the branch delay slot.
2881 //
2882 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
2883 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
2884 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
2885 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
2886
2887
Branch(int32_t offset,BranchDelaySlot bdslot)2888 void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
2889 DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset));
2890 BranchShort(offset, bdslot);
2891 }
2892
2893
Branch(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2894 void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
2895 const Operand& rt, BranchDelaySlot bdslot) {
2896 bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2897 DCHECK(is_near);
2898 USE(is_near);
2899 }
2900
2901
Branch(Label * L,BranchDelaySlot bdslot)2902 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
2903 if (L->is_bound()) {
2904 if (is_near_branch(L)) {
2905 BranchShort(L, bdslot);
2906 } else {
2907 BranchLong(L, bdslot);
2908 }
2909 } else {
2910 if (is_trampoline_emitted()) {
2911 BranchLong(L, bdslot);
2912 } else {
2913 BranchShort(L, bdslot);
2914 }
2915 }
2916 }
2917
2918
Branch(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2919 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
2920 const Operand& rt,
2921 BranchDelaySlot bdslot) {
2922 if (L->is_bound()) {
2923 if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
2924 if (cond != cc_always) {
2925 Label skip;
2926 Condition neg_cond = NegateCondition(cond);
2927 BranchShort(&skip, neg_cond, rs, rt);
2928 BranchLong(L, bdslot);
2929 bind(&skip);
2930 } else {
2931 BranchLong(L, bdslot);
2932 }
2933 }
2934 } else {
2935 if (is_trampoline_emitted()) {
2936 if (cond != cc_always) {
2937 Label skip;
2938 Condition neg_cond = NegateCondition(cond);
2939 BranchShort(&skip, neg_cond, rs, rt);
2940 BranchLong(L, bdslot);
2941 bind(&skip);
2942 } else {
2943 BranchLong(L, bdslot);
2944 }
2945 } else {
2946 BranchShort(L, cond, rs, rt, bdslot);
2947 }
2948 }
2949 }
2950
2951
Branch(Label * L,Condition cond,Register rs,Heap::RootListIndex index,BranchDelaySlot bdslot)2952 void MacroAssembler::Branch(Label* L,
2953 Condition cond,
2954 Register rs,
2955 Heap::RootListIndex index,
2956 BranchDelaySlot bdslot) {
2957 LoadRoot(at, index);
2958 Branch(L, cond, rs, Operand(at), bdslot);
2959 }
2960
2961
BranchShortHelper(int16_t offset,Label * L,BranchDelaySlot bdslot)2962 void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
2963 BranchDelaySlot bdslot) {
2964 DCHECK(L == nullptr || offset == 0);
2965 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2966 b(offset);
2967
2968 // Emit a nop in the branch delay slot if required.
2969 if (bdslot == PROTECT)
2970 nop();
2971 }
2972
2973
BranchShortHelperR6(int32_t offset,Label * L)2974 void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
2975 DCHECK(L == nullptr || offset == 0);
2976 offset = GetOffset(offset, L, OffsetSize::kOffset26);
2977 bc(offset);
2978 }
2979
2980
BranchShort(int32_t offset,BranchDelaySlot bdslot)2981 void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
2982 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2983 DCHECK(is_int26(offset));
2984 BranchShortHelperR6(offset, nullptr);
2985 } else {
2986 DCHECK(is_int16(offset));
2987 BranchShortHelper(offset, nullptr, bdslot);
2988 }
2989 }
2990
2991
BranchShort(Label * L,BranchDelaySlot bdslot)2992 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2993 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2994 BranchShortHelperR6(0, L);
2995 } else {
2996 BranchShortHelper(0, L, bdslot);
2997 }
2998 }
2999
3000
IsZero(const Operand & rt)3001 static inline bool IsZero(const Operand& rt) {
3002 if (rt.is_reg()) {
3003 return rt.rm().is(zero_reg);
3004 } else {
3005 return rt.immediate() == 0;
3006 }
3007 }
3008
3009
GetOffset(int32_t offset,Label * L,OffsetSize bits)3010 int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
3011 if (L) {
3012 offset = branch_offset_helper(L, bits) >> 2;
3013 } else {
3014 DCHECK(is_intn(offset, bits));
3015 }
3016 return offset;
3017 }
3018
3019
GetRtAsRegisterHelper(const Operand & rt,Register scratch)3020 Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
3021 Register scratch) {
3022 Register r2 = no_reg;
3023 if (rt.is_reg()) {
3024 r2 = rt.rm_;
3025 } else {
3026 r2 = scratch;
3027 li(r2, rt);
3028 }
3029
3030 return r2;
3031 }
3032
3033
BranchShortHelperR6(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt)3034 bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
3035 Condition cond, Register rs,
3036 const Operand& rt) {
3037 DCHECK(L == nullptr || offset == 0);
3038 Register scratch = rs.is(at) ? t8 : at;
3039 OffsetSize bits = OffsetSize::kOffset16;
3040
3041 // Be careful to always use shifted_branch_offset only just before the
3042 // branch instruction, as the location will be remember for patching the
3043 // target.
3044 {
3045 BlockTrampolinePoolScope block_trampoline_pool(this);
3046 switch (cond) {
3047 case cc_always:
3048 bits = OffsetSize::kOffset26;
3049 if (!is_near(L, bits)) return false;
3050 offset = GetOffset(offset, L, bits);
3051 bc(offset);
3052 break;
3053 case eq:
3054 if (rs.code() == rt.rm_.reg_code) {
3055 // Pre R6 beq is used here to make the code patchable. Otherwise bc
3056 // should be used which has no condition field so is not patchable.
3057 bits = OffsetSize::kOffset16;
3058 if (!is_near(L, bits)) return false;
3059 scratch = GetRtAsRegisterHelper(rt, scratch);
3060 offset = GetOffset(offset, L, bits);
3061 beq(rs, scratch, offset);
3062 nop();
3063 } else if (IsZero(rt)) {
3064 bits = OffsetSize::kOffset21;
3065 if (!is_near(L, bits)) return false;
3066 offset = GetOffset(offset, L, bits);
3067 beqzc(rs, offset);
3068 } else {
3069 // We don't want any other register but scratch clobbered.
3070 bits = OffsetSize::kOffset16;
3071 if (!is_near(L, bits)) return false;
3072 scratch = GetRtAsRegisterHelper(rt, scratch);
3073 offset = GetOffset(offset, L, bits);
3074 beqc(rs, scratch, offset);
3075 }
3076 break;
3077 case ne:
3078 if (rs.code() == rt.rm_.reg_code) {
3079 // Pre R6 bne is used here to make the code patchable. Otherwise we
3080 // should not generate any instruction.
3081 bits = OffsetSize::kOffset16;
3082 if (!is_near(L, bits)) return false;
3083 scratch = GetRtAsRegisterHelper(rt, scratch);
3084 offset = GetOffset(offset, L, bits);
3085 bne(rs, scratch, offset);
3086 nop();
3087 } else if (IsZero(rt)) {
3088 bits = OffsetSize::kOffset21;
3089 if (!is_near(L, bits)) return false;
3090 offset = GetOffset(offset, L, bits);
3091 bnezc(rs, offset);
3092 } else {
3093 // We don't want any other register but scratch clobbered.
3094 bits = OffsetSize::kOffset16;
3095 if (!is_near(L, bits)) return false;
3096 scratch = GetRtAsRegisterHelper(rt, scratch);
3097 offset = GetOffset(offset, L, bits);
3098 bnec(rs, scratch, offset);
3099 }
3100 break;
3101
3102 // Signed comparison.
3103 case greater:
3104 // rs > rt
3105 if (rs.code() == rt.rm_.reg_code) {
3106 break; // No code needs to be emitted.
3107 } else if (rs.is(zero_reg)) {
3108 bits = OffsetSize::kOffset16;
3109 if (!is_near(L, bits)) return false;
3110 scratch = GetRtAsRegisterHelper(rt, scratch);
3111 offset = GetOffset(offset, L, bits);
3112 bltzc(scratch, offset);
3113 } else if (IsZero(rt)) {
3114 bits = OffsetSize::kOffset16;
3115 if (!is_near(L, bits)) return false;
3116 offset = GetOffset(offset, L, bits);
3117 bgtzc(rs, offset);
3118 } else {
3119 bits = OffsetSize::kOffset16;
3120 if (!is_near(L, bits)) return false;
3121 scratch = GetRtAsRegisterHelper(rt, scratch);
3122 DCHECK(!rs.is(scratch));
3123 offset = GetOffset(offset, L, bits);
3124 bltc(scratch, rs, offset);
3125 }
3126 break;
3127 case greater_equal:
3128 // rs >= rt
3129 if (rs.code() == rt.rm_.reg_code) {
3130 bits = OffsetSize::kOffset26;
3131 if (!is_near(L, bits)) return false;
3132 offset = GetOffset(offset, L, bits);
3133 bc(offset);
3134 } else if (rs.is(zero_reg)) {
3135 bits = OffsetSize::kOffset16;
3136 if (!is_near(L, bits)) return false;
3137 scratch = GetRtAsRegisterHelper(rt, scratch);
3138 offset = GetOffset(offset, L, bits);
3139 blezc(scratch, offset);
3140 } else if (IsZero(rt)) {
3141 bits = OffsetSize::kOffset16;
3142 if (!is_near(L, bits)) return false;
3143 offset = GetOffset(offset, L, bits);
3144 bgezc(rs, offset);
3145 } else {
3146 bits = OffsetSize::kOffset16;
3147 if (!is_near(L, bits)) return false;
3148 scratch = GetRtAsRegisterHelper(rt, scratch);
3149 DCHECK(!rs.is(scratch));
3150 offset = GetOffset(offset, L, bits);
3151 bgec(rs, scratch, offset);
3152 }
3153 break;
3154 case less:
3155 // rs < rt
3156 if (rs.code() == rt.rm_.reg_code) {
3157 break; // No code needs to be emitted.
3158 } else if (rs.is(zero_reg)) {
3159 bits = OffsetSize::kOffset16;
3160 if (!is_near(L, bits)) return false;
3161 scratch = GetRtAsRegisterHelper(rt, scratch);
3162 offset = GetOffset(offset, L, bits);
3163 bgtzc(scratch, offset);
3164 } else if (IsZero(rt)) {
3165 bits = OffsetSize::kOffset16;
3166 if (!is_near(L, bits)) return false;
3167 offset = GetOffset(offset, L, bits);
3168 bltzc(rs, offset);
3169 } else {
3170 bits = OffsetSize::kOffset16;
3171 if (!is_near(L, bits)) return false;
3172 scratch = GetRtAsRegisterHelper(rt, scratch);
3173 DCHECK(!rs.is(scratch));
3174 offset = GetOffset(offset, L, bits);
3175 bltc(rs, scratch, offset);
3176 }
3177 break;
3178 case less_equal:
3179 // rs <= rt
3180 if (rs.code() == rt.rm_.reg_code) {
3181 bits = OffsetSize::kOffset26;
3182 if (!is_near(L, bits)) return false;
3183 offset = GetOffset(offset, L, bits);
3184 bc(offset);
3185 } else if (rs.is(zero_reg)) {
3186 bits = OffsetSize::kOffset16;
3187 if (!is_near(L, bits)) return false;
3188 scratch = GetRtAsRegisterHelper(rt, scratch);
3189 offset = GetOffset(offset, L, bits);
3190 bgezc(scratch, offset);
3191 } else if (IsZero(rt)) {
3192 bits = OffsetSize::kOffset16;
3193 if (!is_near(L, bits)) return false;
3194 offset = GetOffset(offset, L, bits);
3195 blezc(rs, offset);
3196 } else {
3197 bits = OffsetSize::kOffset16;
3198 if (!is_near(L, bits)) return false;
3199 scratch = GetRtAsRegisterHelper(rt, scratch);
3200 DCHECK(!rs.is(scratch));
3201 offset = GetOffset(offset, L, bits);
3202 bgec(scratch, rs, offset);
3203 }
3204 break;
3205
3206 // Unsigned comparison.
3207 case Ugreater:
3208 // rs > rt
3209 if (rs.code() == rt.rm_.reg_code) {
3210 break; // No code needs to be emitted.
3211 } else if (rs.is(zero_reg)) {
3212 bits = OffsetSize::kOffset21;
3213 if (!is_near(L, bits)) return false;
3214 scratch = GetRtAsRegisterHelper(rt, scratch);
3215 offset = GetOffset(offset, L, bits);
3216 bnezc(scratch, offset);
3217 } else if (IsZero(rt)) {
3218 bits = OffsetSize::kOffset21;
3219 if (!is_near(L, bits)) return false;
3220 offset = GetOffset(offset, L, bits);
3221 bnezc(rs, offset);
3222 } else {
3223 bits = OffsetSize::kOffset16;
3224 if (!is_near(L, bits)) return false;
3225 scratch = GetRtAsRegisterHelper(rt, scratch);
3226 DCHECK(!rs.is(scratch));
3227 offset = GetOffset(offset, L, bits);
3228 bltuc(scratch, rs, offset);
3229 }
3230 break;
3231 case Ugreater_equal:
3232 // rs >= rt
3233 if (rs.code() == rt.rm_.reg_code) {
3234 bits = OffsetSize::kOffset26;
3235 if (!is_near(L, bits)) return false;
3236 offset = GetOffset(offset, L, bits);
3237 bc(offset);
3238 } else if (rs.is(zero_reg)) {
3239 bits = OffsetSize::kOffset21;
3240 if (!is_near(L, bits)) return false;
3241 scratch = GetRtAsRegisterHelper(rt, scratch);
3242 offset = GetOffset(offset, L, bits);
3243 beqzc(scratch, offset);
3244 } else if (IsZero(rt)) {
3245 bits = OffsetSize::kOffset26;
3246 if (!is_near(L, bits)) return false;
3247 offset = GetOffset(offset, L, bits);
3248 bc(offset);
3249 } else {
3250 bits = OffsetSize::kOffset16;
3251 if (!is_near(L, bits)) return false;
3252 scratch = GetRtAsRegisterHelper(rt, scratch);
3253 DCHECK(!rs.is(scratch));
3254 offset = GetOffset(offset, L, bits);
3255 bgeuc(rs, scratch, offset);
3256 }
3257 break;
3258 case Uless:
3259 // rs < rt
3260 if (rs.code() == rt.rm_.reg_code) {
3261 break; // No code needs to be emitted.
3262 } else if (rs.is(zero_reg)) {
3263 bits = OffsetSize::kOffset21;
3264 if (!is_near(L, bits)) return false;
3265 scratch = GetRtAsRegisterHelper(rt, scratch);
3266 offset = GetOffset(offset, L, bits);
3267 bnezc(scratch, offset);
3268 } else if (IsZero(rt)) {
3269 break; // No code needs to be emitted.
3270 } else {
3271 bits = OffsetSize::kOffset16;
3272 if (!is_near(L, bits)) return false;
3273 scratch = GetRtAsRegisterHelper(rt, scratch);
3274 DCHECK(!rs.is(scratch));
3275 offset = GetOffset(offset, L, bits);
3276 bltuc(rs, scratch, offset);
3277 }
3278 break;
3279 case Uless_equal:
3280 // rs <= rt
3281 if (rs.code() == rt.rm_.reg_code) {
3282 bits = OffsetSize::kOffset26;
3283 if (!is_near(L, bits)) return false;
3284 offset = GetOffset(offset, L, bits);
3285 bc(offset);
3286 } else if (rs.is(zero_reg)) {
3287 bits = OffsetSize::kOffset26;
3288 if (!is_near(L, bits)) return false;
3289 scratch = GetRtAsRegisterHelper(rt, scratch);
3290 offset = GetOffset(offset, L, bits);
3291 bc(offset);
3292 } else if (IsZero(rt)) {
3293 bits = OffsetSize::kOffset21;
3294 if (!is_near(L, bits)) return false;
3295 offset = GetOffset(offset, L, bits);
3296 beqzc(rs, offset);
3297 } else {
3298 bits = OffsetSize::kOffset16;
3299 if (!is_near(L, bits)) return false;
3300 scratch = GetRtAsRegisterHelper(rt, scratch);
3301 DCHECK(!rs.is(scratch));
3302 offset = GetOffset(offset, L, bits);
3303 bgeuc(scratch, rs, offset);
3304 }
3305 break;
3306 default:
3307 UNREACHABLE();
3308 }
3309 }
3310 CheckTrampolinePoolQuick(1);
3311 return true;
3312 }
3313
3314
BranchShortHelper(int16_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3315 bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
3316 Register rs, const Operand& rt,
3317 BranchDelaySlot bdslot) {
3318 DCHECK(L == nullptr || offset == 0);
3319 if (!is_near(L, OffsetSize::kOffset16)) return false;
3320
3321 Register scratch = at;
3322 int32_t offset32;
3323
3324 // Be careful to always use shifted_branch_offset only just before the
3325 // branch instruction, as the location will be remember for patching the
3326 // target.
3327 {
3328 BlockTrampolinePoolScope block_trampoline_pool(this);
3329 switch (cond) {
3330 case cc_always:
3331 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3332 b(offset32);
3333 break;
3334 case eq:
3335 if (IsZero(rt)) {
3336 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3337 beq(rs, zero_reg, offset32);
3338 } else {
3339 // We don't want any other register but scratch clobbered.
3340 scratch = GetRtAsRegisterHelper(rt, scratch);
3341 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3342 beq(rs, scratch, offset32);
3343 }
3344 break;
3345 case ne:
3346 if (IsZero(rt)) {
3347 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3348 bne(rs, zero_reg, offset32);
3349 } else {
3350 // We don't want any other register but scratch clobbered.
3351 scratch = GetRtAsRegisterHelper(rt, scratch);
3352 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3353 bne(rs, scratch, offset32);
3354 }
3355 break;
3356
3357 // Signed comparison.
3358 case greater:
3359 if (IsZero(rt)) {
3360 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3361 bgtz(rs, offset32);
3362 } else {
3363 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3364 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3365 bne(scratch, zero_reg, offset32);
3366 }
3367 break;
3368 case greater_equal:
3369 if (IsZero(rt)) {
3370 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3371 bgez(rs, offset32);
3372 } else {
3373 Slt(scratch, rs, rt);
3374 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3375 beq(scratch, zero_reg, offset32);
3376 }
3377 break;
3378 case less:
3379 if (IsZero(rt)) {
3380 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3381 bltz(rs, offset32);
3382 } else {
3383 Slt(scratch, rs, rt);
3384 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3385 bne(scratch, zero_reg, offset32);
3386 }
3387 break;
3388 case less_equal:
3389 if (IsZero(rt)) {
3390 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3391 blez(rs, offset32);
3392 } else {
3393 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3394 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3395 beq(scratch, zero_reg, offset32);
3396 }
3397 break;
3398
3399 // Unsigned comparison.
3400 case Ugreater:
3401 if (IsZero(rt)) {
3402 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3403 bne(rs, zero_reg, offset32);
3404 } else {
3405 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3406 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3407 bne(scratch, zero_reg, offset32);
3408 }
3409 break;
3410 case Ugreater_equal:
3411 if (IsZero(rt)) {
3412 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3413 b(offset32);
3414 } else {
3415 Sltu(scratch, rs, rt);
3416 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3417 beq(scratch, zero_reg, offset32);
3418 }
3419 break;
3420 case Uless:
3421 if (IsZero(rt)) {
3422 return true; // No code needs to be emitted.
3423 } else {
3424 Sltu(scratch, rs, rt);
3425 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3426 bne(scratch, zero_reg, offset32);
3427 }
3428 break;
3429 case Uless_equal:
3430 if (IsZero(rt)) {
3431 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3432 beq(rs, zero_reg, offset32);
3433 } else {
3434 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3435 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3436 beq(scratch, zero_reg, offset32);
3437 }
3438 break;
3439 default:
3440 UNREACHABLE();
3441 }
3442 }
3443
3444 // Emit a nop in the branch delay slot if required.
3445 if (bdslot == PROTECT)
3446 nop();
3447
3448 return true;
3449 }
3450
3451
BranchShortCheck(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3452 bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
3453 Register rs, const Operand& rt,
3454 BranchDelaySlot bdslot) {
3455 BRANCH_ARGS_CHECK(cond, rs, rt);
3456
3457 if (!L) {
3458 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3459 DCHECK(is_int26(offset));
3460 return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
3461 } else {
3462 DCHECK(is_int16(offset));
3463 return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3464 }
3465 } else {
3466 DCHECK(offset == 0);
3467 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3468 return BranchShortHelperR6(0, L, cond, rs, rt);
3469 } else {
3470 return BranchShortHelper(0, L, cond, rs, rt, bdslot);
3471 }
3472 }
3473 return false;
3474 }
3475
3476
BranchShort(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3477 void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
3478 const Operand& rt, BranchDelaySlot bdslot) {
3479 BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3480 }
3481
3482
BranchShort(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3483 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
3484 const Operand& rt, BranchDelaySlot bdslot) {
3485 BranchShortCheck(0, L, cond, rs, rt, bdslot);
3486 }
3487
3488
BranchAndLink(int32_t offset,BranchDelaySlot bdslot)3489 void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
3490 BranchAndLinkShort(offset, bdslot);
3491 }
3492
3493
BranchAndLink(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3494 void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
3495 const Operand& rt, BranchDelaySlot bdslot) {
3496 bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3497 DCHECK(is_near);
3498 USE(is_near);
3499 }
3500
3501
BranchAndLink(Label * L,BranchDelaySlot bdslot)3502 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
3503 if (L->is_bound()) {
3504 if (is_near_branch(L)) {
3505 BranchAndLinkShort(L, bdslot);
3506 } else {
3507 BranchAndLinkLong(L, bdslot);
3508 }
3509 } else {
3510 if (is_trampoline_emitted()) {
3511 BranchAndLinkLong(L, bdslot);
3512 } else {
3513 BranchAndLinkShort(L, bdslot);
3514 }
3515 }
3516 }
3517
3518
BranchAndLink(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3519 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
3520 const Operand& rt,
3521 BranchDelaySlot bdslot) {
3522 if (L->is_bound()) {
3523 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
3524 Label skip;
3525 Condition neg_cond = NegateCondition(cond);
3526 BranchShort(&skip, neg_cond, rs, rt);
3527 BranchAndLinkLong(L, bdslot);
3528 bind(&skip);
3529 }
3530 } else {
3531 if (is_trampoline_emitted()) {
3532 Label skip;
3533 Condition neg_cond = NegateCondition(cond);
3534 BranchShort(&skip, neg_cond, rs, rt);
3535 BranchAndLinkLong(L, bdslot);
3536 bind(&skip);
3537 } else {
3538 BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
3539 }
3540 }
3541 }
3542
3543
BranchAndLinkShortHelper(int16_t offset,Label * L,BranchDelaySlot bdslot)3544 void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3545 BranchDelaySlot bdslot) {
3546 DCHECK(L == nullptr || offset == 0);
3547 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3548 bal(offset);
3549
3550 // Emit a nop in the branch delay slot if required.
3551 if (bdslot == PROTECT)
3552 nop();
3553 }
3554
3555
BranchAndLinkShortHelperR6(int32_t offset,Label * L)3556 void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
3557 DCHECK(L == nullptr || offset == 0);
3558 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3559 balc(offset);
3560 }
3561
3562
BranchAndLinkShort(int32_t offset,BranchDelaySlot bdslot)3563 void MacroAssembler::BranchAndLinkShort(int32_t offset,
3564 BranchDelaySlot bdslot) {
3565 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3566 DCHECK(is_int26(offset));
3567 BranchAndLinkShortHelperR6(offset, nullptr);
3568 } else {
3569 DCHECK(is_int16(offset));
3570 BranchAndLinkShortHelper(offset, nullptr, bdslot);
3571 }
3572 }
3573
3574
BranchAndLinkShort(Label * L,BranchDelaySlot bdslot)3575 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
3576 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3577 BranchAndLinkShortHelperR6(0, L);
3578 } else {
3579 BranchAndLinkShortHelper(0, L, bdslot);
3580 }
3581 }
3582
3583
BranchAndLinkShortHelperR6(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt)3584 bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
3585 Condition cond, Register rs,
3586 const Operand& rt) {
3587 DCHECK(L == nullptr || offset == 0);
3588 Register scratch = rs.is(at) ? t8 : at;
3589 OffsetSize bits = OffsetSize::kOffset16;
3590
3591 BlockTrampolinePoolScope block_trampoline_pool(this);
3592 DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3593 switch (cond) {
3594 case cc_always:
3595 bits = OffsetSize::kOffset26;
3596 if (!is_near(L, bits)) return false;
3597 offset = GetOffset(offset, L, bits);
3598 balc(offset);
3599 break;
3600 case eq:
3601 if (!is_near(L, bits)) return false;
3602 Subu(scratch, rs, rt);
3603 offset = GetOffset(offset, L, bits);
3604 beqzalc(scratch, offset);
3605 break;
3606 case ne:
3607 if (!is_near(L, bits)) return false;
3608 Subu(scratch, rs, rt);
3609 offset = GetOffset(offset, L, bits);
3610 bnezalc(scratch, offset);
3611 break;
3612
3613 // Signed comparison.
3614 case greater:
3615 // rs > rt
3616 if (rs.code() == rt.rm_.reg_code) {
3617 break; // No code needs to be emitted.
3618 } else if (rs.is(zero_reg)) {
3619 if (!is_near(L, bits)) return false;
3620 scratch = GetRtAsRegisterHelper(rt, scratch);
3621 offset = GetOffset(offset, L, bits);
3622 bltzalc(scratch, offset);
3623 } else if (IsZero(rt)) {
3624 if (!is_near(L, bits)) return false;
3625 offset = GetOffset(offset, L, bits);
3626 bgtzalc(rs, offset);
3627 } else {
3628 if (!is_near(L, bits)) return false;
3629 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3630 offset = GetOffset(offset, L, bits);
3631 bnezalc(scratch, offset);
3632 }
3633 break;
3634 case greater_equal:
3635 // rs >= rt
3636 if (rs.code() == rt.rm_.reg_code) {
3637 bits = OffsetSize::kOffset26;
3638 if (!is_near(L, bits)) return false;
3639 offset = GetOffset(offset, L, bits);
3640 balc(offset);
3641 } else if (rs.is(zero_reg)) {
3642 if (!is_near(L, bits)) return false;
3643 scratch = GetRtAsRegisterHelper(rt, scratch);
3644 offset = GetOffset(offset, L, bits);
3645 blezalc(scratch, offset);
3646 } else if (IsZero(rt)) {
3647 if (!is_near(L, bits)) return false;
3648 offset = GetOffset(offset, L, bits);
3649 bgezalc(rs, offset);
3650 } else {
3651 if (!is_near(L, bits)) return false;
3652 Slt(scratch, rs, rt);
3653 offset = GetOffset(offset, L, bits);
3654 beqzalc(scratch, offset);
3655 }
3656 break;
3657 case less:
3658 // rs < rt
3659 if (rs.code() == rt.rm_.reg_code) {
3660 break; // No code needs to be emitted.
3661 } else if (rs.is(zero_reg)) {
3662 if (!is_near(L, bits)) return false;
3663 scratch = GetRtAsRegisterHelper(rt, scratch);
3664 offset = GetOffset(offset, L, bits);
3665 bgtzalc(scratch, offset);
3666 } else if (IsZero(rt)) {
3667 if (!is_near(L, bits)) return false;
3668 offset = GetOffset(offset, L, bits);
3669 bltzalc(rs, offset);
3670 } else {
3671 if (!is_near(L, bits)) return false;
3672 Slt(scratch, rs, rt);
3673 offset = GetOffset(offset, L, bits);
3674 bnezalc(scratch, offset);
3675 }
3676 break;
3677 case less_equal:
3678 // rs <= r2
3679 if (rs.code() == rt.rm_.reg_code) {
3680 bits = OffsetSize::kOffset26;
3681 if (!is_near(L, bits)) return false;
3682 offset = GetOffset(offset, L, bits);
3683 balc(offset);
3684 } else if (rs.is(zero_reg)) {
3685 if (!is_near(L, bits)) return false;
3686 scratch = GetRtAsRegisterHelper(rt, scratch);
3687 offset = GetOffset(offset, L, bits);
3688 bgezalc(scratch, offset);
3689 } else if (IsZero(rt)) {
3690 if (!is_near(L, bits)) return false;
3691 offset = GetOffset(offset, L, bits);
3692 blezalc(rs, offset);
3693 } else {
3694 if (!is_near(L, bits)) return false;
3695 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3696 offset = GetOffset(offset, L, bits);
3697 beqzalc(scratch, offset);
3698 }
3699 break;
3700
3701
3702 // Unsigned comparison.
3703 case Ugreater:
3704 // rs > r2
3705 if (!is_near(L, bits)) return false;
3706 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3707 offset = GetOffset(offset, L, bits);
3708 bnezalc(scratch, offset);
3709 break;
3710 case Ugreater_equal:
3711 // rs >= r2
3712 if (!is_near(L, bits)) return false;
3713 Sltu(scratch, rs, rt);
3714 offset = GetOffset(offset, L, bits);
3715 beqzalc(scratch, offset);
3716 break;
3717 case Uless:
3718 // rs < r2
3719 if (!is_near(L, bits)) return false;
3720 Sltu(scratch, rs, rt);
3721 offset = GetOffset(offset, L, bits);
3722 bnezalc(scratch, offset);
3723 break;
3724 case Uless_equal:
3725 // rs <= r2
3726 if (!is_near(L, bits)) return false;
3727 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3728 offset = GetOffset(offset, L, bits);
3729 beqzalc(scratch, offset);
3730 break;
3731 default:
3732 UNREACHABLE();
3733 }
3734 return true;
3735 }
3736
3737
3738 // Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
3739 // with the slt instructions. We could use sub or add instead but we would miss
3740 // overflow cases, so we keep slt and add an intermediate third instruction.
BranchAndLinkShortHelper(int16_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3741 bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3742 Condition cond, Register rs,
3743 const Operand& rt,
3744 BranchDelaySlot bdslot) {
3745 DCHECK(L == nullptr || offset == 0);
3746 if (!is_near(L, OffsetSize::kOffset16)) return false;
3747
3748 Register scratch = t8;
3749 BlockTrampolinePoolScope block_trampoline_pool(this);
3750
3751 switch (cond) {
3752 case cc_always:
3753 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3754 bal(offset);
3755 break;
3756 case eq:
3757 bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3758 nop();
3759 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3760 bal(offset);
3761 break;
3762 case ne:
3763 beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3764 nop();
3765 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3766 bal(offset);
3767 break;
3768
3769 // Signed comparison.
3770 case greater:
3771 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3772 addiu(scratch, scratch, -1);
3773 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3774 bgezal(scratch, offset);
3775 break;
3776 case greater_equal:
3777 Slt(scratch, rs, rt);
3778 addiu(scratch, scratch, -1);
3779 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3780 bltzal(scratch, offset);
3781 break;
3782 case less:
3783 Slt(scratch, rs, rt);
3784 addiu(scratch, scratch, -1);
3785 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3786 bgezal(scratch, offset);
3787 break;
3788 case less_equal:
3789 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3790 addiu(scratch, scratch, -1);
3791 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3792 bltzal(scratch, offset);
3793 break;
3794
3795 // Unsigned comparison.
3796 case Ugreater:
3797 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3798 addiu(scratch, scratch, -1);
3799 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3800 bgezal(scratch, offset);
3801 break;
3802 case Ugreater_equal:
3803 Sltu(scratch, rs, rt);
3804 addiu(scratch, scratch, -1);
3805 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3806 bltzal(scratch, offset);
3807 break;
3808 case Uless:
3809 Sltu(scratch, rs, rt);
3810 addiu(scratch, scratch, -1);
3811 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3812 bgezal(scratch, offset);
3813 break;
3814 case Uless_equal:
3815 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3816 addiu(scratch, scratch, -1);
3817 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3818 bltzal(scratch, offset);
3819 break;
3820
3821 default:
3822 UNREACHABLE();
3823 }
3824
3825 // Emit a nop in the branch delay slot if required.
3826 if (bdslot == PROTECT)
3827 nop();
3828
3829 return true;
3830 }
3831
3832
BranchAndLinkShortCheck(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3833 bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
3834 Condition cond, Register rs,
3835 const Operand& rt,
3836 BranchDelaySlot bdslot) {
3837 BRANCH_ARGS_CHECK(cond, rs, rt);
3838
3839 if (!L) {
3840 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3841 DCHECK(is_int26(offset));
3842 return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
3843 } else {
3844 DCHECK(is_int16(offset));
3845 return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3846 }
3847 } else {
3848 DCHECK(offset == 0);
3849 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3850 return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
3851 } else {
3852 return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
3853 }
3854 }
3855 return false;
3856 }
3857
3858
Jump(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3859 void MacroAssembler::Jump(Register target,
3860 Condition cond,
3861 Register rs,
3862 const Operand& rt,
3863 BranchDelaySlot bd) {
3864 BlockTrampolinePoolScope block_trampoline_pool(this);
3865 if (kArchVariant == kMips64r6 && bd == PROTECT) {
3866 if (cond == cc_always) {
3867 jic(target, 0);
3868 } else {
3869 BRANCH_ARGS_CHECK(cond, rs, rt);
3870 Branch(2, NegateCondition(cond), rs, rt);
3871 jic(target, 0);
3872 }
3873 } else {
3874 if (cond == cc_always) {
3875 jr(target);
3876 } else {
3877 BRANCH_ARGS_CHECK(cond, rs, rt);
3878 Branch(2, NegateCondition(cond), rs, rt);
3879 jr(target);
3880 }
3881 // Emit a nop in the branch delay slot if required.
3882 if (bd == PROTECT) nop();
3883 }
3884 }
3885
3886
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3887 void MacroAssembler::Jump(intptr_t target,
3888 RelocInfo::Mode rmode,
3889 Condition cond,
3890 Register rs,
3891 const Operand& rt,
3892 BranchDelaySlot bd) {
3893 Label skip;
3894 if (cond != cc_always) {
3895 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3896 }
3897 // The first instruction of 'li' may be placed in the delay slot.
3898 // This is not an issue, t9 is expected to be clobbered anyway.
3899 li(t9, Operand(target, rmode));
3900 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
3901 bind(&skip);
3902 }
3903
3904
Jump(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3905 void MacroAssembler::Jump(Address target,
3906 RelocInfo::Mode rmode,
3907 Condition cond,
3908 Register rs,
3909 const Operand& rt,
3910 BranchDelaySlot bd) {
3911 DCHECK(!RelocInfo::IsCodeTarget(rmode));
3912 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3913 }
3914
3915
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3916 void MacroAssembler::Jump(Handle<Code> code,
3917 RelocInfo::Mode rmode,
3918 Condition cond,
3919 Register rs,
3920 const Operand& rt,
3921 BranchDelaySlot bd) {
3922 DCHECK(RelocInfo::IsCodeTarget(rmode));
3923 AllowDeferredHandleDereference embedding_raw_address;
3924 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
3925 }
3926
3927
CallSize(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3928 int MacroAssembler::CallSize(Register target,
3929 Condition cond,
3930 Register rs,
3931 const Operand& rt,
3932 BranchDelaySlot bd) {
3933 int size = 0;
3934
3935 if (cond == cc_always) {
3936 size += 1;
3937 } else {
3938 size += 3;
3939 }
3940
3941 if (bd == PROTECT && kArchVariant != kMips64r6) size += 1;
3942
3943 return size * kInstrSize;
3944 }
3945
3946
3947 // Note: To call gcc-compiled C code on mips, you must call thru t9.
Call(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3948 void MacroAssembler::Call(Register target,
3949 Condition cond,
3950 Register rs,
3951 const Operand& rt,
3952 BranchDelaySlot bd) {
3953 #ifdef DEBUG
3954 int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
3955 #endif
3956
3957 BlockTrampolinePoolScope block_trampoline_pool(this);
3958 Label start;
3959 bind(&start);
3960 if (kArchVariant == kMips64r6 && bd == PROTECT) {
3961 if (cond == cc_always) {
3962 jialc(target, 0);
3963 } else {
3964 BRANCH_ARGS_CHECK(cond, rs, rt);
3965 Branch(2, NegateCondition(cond), rs, rt);
3966 jialc(target, 0);
3967 }
3968 } else {
3969 if (cond == cc_always) {
3970 jalr(target);
3971 } else {
3972 BRANCH_ARGS_CHECK(cond, rs, rt);
3973 Branch(2, NegateCondition(cond), rs, rt);
3974 jalr(target);
3975 }
3976 // Emit a nop in the branch delay slot if required.
3977 if (bd == PROTECT) nop();
3978 }
3979
3980 #ifdef DEBUG
3981 CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
3982 SizeOfCodeGeneratedSince(&start));
3983 #endif
3984 }
3985
3986
CallSize(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3987 int MacroAssembler::CallSize(Address target,
3988 RelocInfo::Mode rmode,
3989 Condition cond,
3990 Register rs,
3991 const Operand& rt,
3992 BranchDelaySlot bd) {
3993 int size = CallSize(t9, cond, rs, rt, bd);
3994 return size + 4 * kInstrSize;
3995 }
3996
3997
Call(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3998 void MacroAssembler::Call(Address target,
3999 RelocInfo::Mode rmode,
4000 Condition cond,
4001 Register rs,
4002 const Operand& rt,
4003 BranchDelaySlot bd) {
4004 BlockTrampolinePoolScope block_trampoline_pool(this);
4005 Label start;
4006 bind(&start);
4007 int64_t target_int = reinterpret_cast<int64_t>(target);
4008 li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
4009 Call(t9, cond, rs, rt, bd);
4010 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
4011 SizeOfCodeGeneratedSince(&start));
4012 }
4013
4014
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)4015 int MacroAssembler::CallSize(Handle<Code> code,
4016 RelocInfo::Mode rmode,
4017 TypeFeedbackId ast_id,
4018 Condition cond,
4019 Register rs,
4020 const Operand& rt,
4021 BranchDelaySlot bd) {
4022 AllowDeferredHandleDereference using_raw_address;
4023 return CallSize(reinterpret_cast<Address>(code.location()),
4024 rmode, cond, rs, rt, bd);
4025 }
4026
4027
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)4028 void MacroAssembler::Call(Handle<Code> code,
4029 RelocInfo::Mode rmode,
4030 TypeFeedbackId ast_id,
4031 Condition cond,
4032 Register rs,
4033 const Operand& rt,
4034 BranchDelaySlot bd) {
4035 BlockTrampolinePoolScope block_trampoline_pool(this);
4036 Label start;
4037 bind(&start);
4038 DCHECK(RelocInfo::IsCodeTarget(rmode));
4039 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
4040 SetRecordedAstId(ast_id);
4041 rmode = RelocInfo::CODE_TARGET_WITH_ID;
4042 }
4043 AllowDeferredHandleDereference embedding_raw_address;
4044 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
4045 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
4046 SizeOfCodeGeneratedSince(&start));
4047 }
4048
4049
Ret(Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)4050 void MacroAssembler::Ret(Condition cond,
4051 Register rs,
4052 const Operand& rt,
4053 BranchDelaySlot bd) {
4054 Jump(ra, cond, rs, rt, bd);
4055 }
4056
4057
BranchLong(Label * L,BranchDelaySlot bdslot)4058 void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
4059 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
4060 (!L->is_bound() || is_near_r6(L))) {
4061 BranchShortHelperR6(0, L);
4062 } else {
4063 EmitForbiddenSlotInstruction();
4064 BlockTrampolinePoolScope block_trampoline_pool(this);
4065 {
4066 BlockGrowBufferScope block_buf_growth(this);
4067 // Buffer growth (and relocation) must be blocked for internal references
4068 // until associated instructions are emitted and available to be patched.
4069 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
4070 j(L);
4071 }
4072 // Emit a nop in the branch delay slot if required.
4073 if (bdslot == PROTECT) nop();
4074 }
4075 }
4076
4077
BranchAndLinkLong(Label * L,BranchDelaySlot bdslot)4078 void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
4079 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
4080 (!L->is_bound() || is_near_r6(L))) {
4081 BranchAndLinkShortHelperR6(0, L);
4082 } else {
4083 EmitForbiddenSlotInstruction();
4084 BlockTrampolinePoolScope block_trampoline_pool(this);
4085 {
4086 BlockGrowBufferScope block_buf_growth(this);
4087 // Buffer growth (and relocation) must be blocked for internal references
4088 // until associated instructions are emitted and available to be patched.
4089 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
4090 jal(L);
4091 }
4092 // Emit a nop in the branch delay slot if required.
4093 if (bdslot == PROTECT) nop();
4094 }
4095 }
4096
4097
DropAndRet(int drop)4098 void MacroAssembler::DropAndRet(int drop) {
4099 DCHECK(is_int16(drop * kPointerSize));
4100 Ret(USE_DELAY_SLOT);
4101 daddiu(sp, sp, drop * kPointerSize);
4102 }
4103
DropAndRet(int drop,Condition cond,Register r1,const Operand & r2)4104 void MacroAssembler::DropAndRet(int drop,
4105 Condition cond,
4106 Register r1,
4107 const Operand& r2) {
4108 // Both Drop and Ret need to be conditional.
4109 Label skip;
4110 if (cond != cc_always) {
4111 Branch(&skip, NegateCondition(cond), r1, r2);
4112 }
4113
4114 Drop(drop);
4115 Ret();
4116
4117 if (cond != cc_always) {
4118 bind(&skip);
4119 }
4120 }
4121
4122
Drop(int count,Condition cond,Register reg,const Operand & op)4123 void MacroAssembler::Drop(int count,
4124 Condition cond,
4125 Register reg,
4126 const Operand& op) {
4127 if (count <= 0) {
4128 return;
4129 }
4130
4131 Label skip;
4132
4133 if (cond != al) {
4134 Branch(&skip, NegateCondition(cond), reg, op);
4135 }
4136
4137 Daddu(sp, sp, Operand(count * kPointerSize));
4138
4139 if (cond != al) {
4140 bind(&skip);
4141 }
4142 }
4143
4144
4145
Swap(Register reg1,Register reg2,Register scratch)4146 void MacroAssembler::Swap(Register reg1,
4147 Register reg2,
4148 Register scratch) {
4149 if (scratch.is(no_reg)) {
4150 Xor(reg1, reg1, Operand(reg2));
4151 Xor(reg2, reg2, Operand(reg1));
4152 Xor(reg1, reg1, Operand(reg2));
4153 } else {
4154 mov(scratch, reg1);
4155 mov(reg1, reg2);
4156 mov(reg2, scratch);
4157 }
4158 }
4159
4160
Call(Label * target)4161 void MacroAssembler::Call(Label* target) {
4162 BranchAndLink(target);
4163 }
4164
4165
Push(Handle<Object> handle)4166 void MacroAssembler::Push(Handle<Object> handle) {
4167 li(at, Operand(handle));
4168 push(at);
4169 }
4170
4171
PushRegisterAsTwoSmis(Register src,Register scratch)4172 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
4173 DCHECK(!src.is(scratch));
4174 mov(scratch, src);
4175 dsrl32(src, src, 0);
4176 dsll32(src, src, 0);
4177 push(src);
4178 dsll32(scratch, scratch, 0);
4179 push(scratch);
4180 }
4181
4182
PopRegisterAsTwoSmis(Register dst,Register scratch)4183 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
4184 DCHECK(!dst.is(scratch));
4185 pop(scratch);
4186 dsrl32(scratch, scratch, 0);
4187 pop(dst);
4188 dsrl32(dst, dst, 0);
4189 dsll32(dst, dst, 0);
4190 or_(dst, dst, scratch);
4191 }
4192
4193
DebugBreak()4194 void MacroAssembler::DebugBreak() {
4195 PrepareCEntryArgs(0);
4196 PrepareCEntryFunction(
4197 ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
4198 CEntryStub ces(isolate(), 1);
4199 DCHECK(AllowThisStubCall(&ces));
4200 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
4201 }
4202
4203
4204 // ---------------------------------------------------------------------------
4205 // Exception handling.
4206
PushStackHandler()4207 void MacroAssembler::PushStackHandler() {
4208 // Adjust this code if not the case.
4209 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
4210 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
4211
4212 // Link the current handler as the next handler.
4213 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
4214 ld(a5, MemOperand(a6));
4215 push(a5);
4216
4217 // Set this new handler as the current one.
4218 sd(sp, MemOperand(a6));
4219 }
4220
4221
PopStackHandler()4222 void MacroAssembler::PopStackHandler() {
4223 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4224 pop(a1);
4225 Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
4226 kPointerSize)));
4227 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
4228 sd(a1, MemOperand(at));
4229 }
4230
4231
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)4232 void MacroAssembler::Allocate(int object_size,
4233 Register result,
4234 Register scratch1,
4235 Register scratch2,
4236 Label* gc_required,
4237 AllocationFlags flags) {
4238 DCHECK(object_size <= kMaxRegularHeapObjectSize);
4239 if (!FLAG_inline_new) {
4240 if (emit_debug_code()) {
4241 // Trash the registers to simulate an allocation failure.
4242 li(result, 0x7091);
4243 li(scratch1, 0x7191);
4244 li(scratch2, 0x7291);
4245 }
4246 jmp(gc_required);
4247 return;
4248 }
4249
4250 DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
4251
4252 // Make object size into bytes.
4253 if ((flags & SIZE_IN_WORDS) != 0) {
4254 object_size *= kPointerSize;
4255 }
4256 DCHECK(0 == (object_size & kObjectAlignmentMask));
4257
4258 // Check relative positions of allocation top and limit addresses.
4259 // ARM adds additional checks to make sure the ldm instruction can be
4260 // used. On MIPS we don't have ldm so we don't need additional checks either.
4261 ExternalReference allocation_top =
4262 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4263 ExternalReference allocation_limit =
4264 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4265
4266 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4267 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
4268 DCHECK((limit - top) == kPointerSize);
4269
4270 // Set up allocation top address and allocation limit registers.
4271 Register top_address = scratch1;
4272 // This code stores a temporary value in t9.
4273 Register alloc_limit = t9;
4274 Register result_end = scratch2;
4275 li(top_address, Operand(allocation_top));
4276
4277 if ((flags & RESULT_CONTAINS_TOP) == 0) {
4278 // Load allocation top into result and allocation limit into alloc_limit.
4279 ld(result, MemOperand(top_address));
4280 ld(alloc_limit, MemOperand(top_address, kPointerSize));
4281 } else {
4282 if (emit_debug_code()) {
4283 // Assert that result actually contains top on entry.
4284 ld(alloc_limit, MemOperand(top_address));
4285 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
4286 }
4287 // Load allocation limit. Result already contains allocation top.
4288 ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
4289 }
4290
4291 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4292 // the same alignment on ARM64.
4293 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4294
4295 if (emit_debug_code()) {
4296 And(at, result, Operand(kDoubleAlignmentMask));
4297 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4298 }
4299
4300 // Calculate new top and bail out if new space is exhausted. Use result
4301 // to calculate the new top.
4302 Daddu(result_end, result, Operand(object_size));
4303 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
4304
4305 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4306 // The top pointer is not updated for allocation folding dominators.
4307 sd(result_end, MemOperand(top_address));
4308 }
4309
4310 // Tag object.
4311 Daddu(result, result, Operand(kHeapObjectTag));
4312 }
4313
4314
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4315 void MacroAssembler::Allocate(Register object_size, Register result,
4316 Register result_end, Register scratch,
4317 Label* gc_required, AllocationFlags flags) {
4318 if (!FLAG_inline_new) {
4319 if (emit_debug_code()) {
4320 // Trash the registers to simulate an allocation failure.
4321 li(result, 0x7091);
4322 li(scratch, 0x7191);
4323 li(result_end, 0x7291);
4324 }
4325 jmp(gc_required);
4326 return;
4327 }
4328
4329 // |object_size| and |result_end| may overlap, other registers must not.
4330 DCHECK(!AreAliased(object_size, result, scratch, t9, at));
4331 DCHECK(!AreAliased(result_end, result, scratch, t9, at));
4332
4333 // Check relative positions of allocation top and limit addresses.
4334 // ARM adds additional checks to make sure the ldm instruction can be
4335 // used. On MIPS we don't have ldm so we don't need additional checks either.
4336 ExternalReference allocation_top =
4337 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4338 ExternalReference allocation_limit =
4339 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4340 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4341 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
4342 DCHECK((limit - top) == kPointerSize);
4343
4344 // Set up allocation top address and object size registers.
4345 Register top_address = scratch;
4346 // This code stores a temporary value in t9.
4347 Register alloc_limit = t9;
4348 li(top_address, Operand(allocation_top));
4349
4350 if ((flags & RESULT_CONTAINS_TOP) == 0) {
4351 // Load allocation top into result and allocation limit into alloc_limit.
4352 ld(result, MemOperand(top_address));
4353 ld(alloc_limit, MemOperand(top_address, kPointerSize));
4354 } else {
4355 if (emit_debug_code()) {
4356 // Assert that result actually contains top on entry.
4357 ld(alloc_limit, MemOperand(top_address));
4358 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
4359 }
4360 // Load allocation limit. Result already contains allocation top.
4361 ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
4362 }
4363
4364 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4365 // the same alignment on ARM64.
4366 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4367
4368 if (emit_debug_code()) {
4369 And(at, result, Operand(kDoubleAlignmentMask));
4370 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4371 }
4372
4373 // Calculate new top and bail out if new space is exhausted. Use result
4374 // to calculate the new top. Object size may be in words so a shift is
4375 // required to get the number of bytes.
4376 if ((flags & SIZE_IN_WORDS) != 0) {
4377 Dlsa(result_end, result, object_size, kPointerSizeLog2);
4378 } else {
4379 Daddu(result_end, result, Operand(object_size));
4380 }
4381
4382 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
4383
4384 // Update allocation top. result temporarily holds the new top.
4385 if (emit_debug_code()) {
4386 And(at, result_end, Operand(kObjectAlignmentMask));
4387 Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
4388 }
4389
4390 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4391 // The top pointer is not updated for allocation folding dominators.
4392 sd(result_end, MemOperand(top_address));
4393 }
4394
4395 // Tag object if.
4396 Daddu(result, result, Operand(kHeapObjectTag));
4397 }
4398
FastAllocate(int object_size,Register result,Register scratch1,Register scratch2,AllocationFlags flags)4399 void MacroAssembler::FastAllocate(int object_size, Register result,
4400 Register scratch1, Register scratch2,
4401 AllocationFlags flags) {
4402 DCHECK(object_size <= kMaxRegularHeapObjectSize);
4403 DCHECK(!AreAliased(result, scratch1, scratch2, at));
4404
4405 // Make object size into bytes.
4406 if ((flags & SIZE_IN_WORDS) != 0) {
4407 object_size *= kPointerSize;
4408 }
4409 DCHECK(0 == (object_size & kObjectAlignmentMask));
4410
4411 ExternalReference allocation_top =
4412 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4413
4414 Register top_address = scratch1;
4415 Register result_end = scratch2;
4416 li(top_address, Operand(allocation_top));
4417 ld(result, MemOperand(top_address));
4418
4419 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4420 // the same alignment on MIPS64.
4421 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4422
4423 if (emit_debug_code()) {
4424 And(at, result, Operand(kDoubleAlignmentMask));
4425 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4426 }
4427
4428 // Calculate new top and write it back.
4429 Daddu(result_end, result, Operand(object_size));
4430 sd(result_end, MemOperand(top_address));
4431
4432 Daddu(result, result, Operand(kHeapObjectTag));
4433 }
4434
FastAllocate(Register object_size,Register result,Register result_end,Register scratch,AllocationFlags flags)4435 void MacroAssembler::FastAllocate(Register object_size, Register result,
4436 Register result_end, Register scratch,
4437 AllocationFlags flags) {
4438 // |object_size| and |result_end| may overlap, other registers must not.
4439 DCHECK(!AreAliased(object_size, result, scratch, at));
4440 DCHECK(!AreAliased(result_end, result, scratch, at));
4441
4442 ExternalReference allocation_top =
4443 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4444
4445 // Set up allocation top address and object size registers.
4446 Register top_address = scratch;
4447 li(top_address, Operand(allocation_top));
4448 ld(result, MemOperand(top_address));
4449
4450 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4451 // the same alignment on MIPS64.
4452 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4453
4454 if (emit_debug_code()) {
4455 And(at, result, Operand(kDoubleAlignmentMask));
4456 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4457 }
4458
4459 // Calculate new top and write it back
4460 if ((flags & SIZE_IN_WORDS) != 0) {
4461 Dlsa(result_end, result, object_size, kPointerSizeLog2);
4462 } else {
4463 Daddu(result_end, result, Operand(object_size));
4464 }
4465
4466 // Update allocation top. result temporarily holds the new top.
4467 if (emit_debug_code()) {
4468 And(at, result_end, Operand(kObjectAlignmentMask));
4469 Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
4470 }
4471
4472 Daddu(result, result, Operand(kHeapObjectTag));
4473 }
4474
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4475 void MacroAssembler::AllocateTwoByteString(Register result,
4476 Register length,
4477 Register scratch1,
4478 Register scratch2,
4479 Register scratch3,
4480 Label* gc_required) {
4481 // Calculate the number of bytes needed for the characters in the string while
4482 // observing object alignment.
4483 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4484 dsll(scratch1, length, 1); // Length in bytes, not chars.
4485 daddiu(scratch1, scratch1,
4486 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
4487 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4488
4489 // Allocate two-byte string in new space.
4490 Allocate(scratch1, result, scratch2, scratch3, gc_required,
4491 NO_ALLOCATION_FLAGS);
4492
4493 // Set the map, length and hash field.
4494 InitializeNewString(result,
4495 length,
4496 Heap::kStringMapRootIndex,
4497 scratch1,
4498 scratch2);
4499 }
4500
4501
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4502 void MacroAssembler::AllocateOneByteString(Register result, Register length,
4503 Register scratch1, Register scratch2,
4504 Register scratch3,
4505 Label* gc_required) {
4506 // Calculate the number of bytes needed for the characters in the string
4507 // while observing object alignment.
4508 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4509 DCHECK(kCharSize == 1);
4510 daddiu(scratch1, length,
4511 kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
4512 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4513
4514 // Allocate one-byte string in new space.
4515 Allocate(scratch1, result, scratch2, scratch3, gc_required,
4516 NO_ALLOCATION_FLAGS);
4517
4518 // Set the map, length and hash field.
4519 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
4520 scratch1, scratch2);
4521 }
4522
4523
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4524 void MacroAssembler::AllocateTwoByteConsString(Register result,
4525 Register length,
4526 Register scratch1,
4527 Register scratch2,
4528 Label* gc_required) {
4529 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4530 NO_ALLOCATION_FLAGS);
4531 InitializeNewString(result,
4532 length,
4533 Heap::kConsStringMapRootIndex,
4534 scratch1,
4535 scratch2);
4536 }
4537
4538
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4539 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
4540 Register scratch1,
4541 Register scratch2,
4542 Label* gc_required) {
4543 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4544 NO_ALLOCATION_FLAGS);
4545
4546 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
4547 scratch1, scratch2);
4548 }
4549
4550
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4551 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4552 Register length,
4553 Register scratch1,
4554 Register scratch2,
4555 Label* gc_required) {
4556 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4557 NO_ALLOCATION_FLAGS);
4558
4559 InitializeNewString(result,
4560 length,
4561 Heap::kSlicedStringMapRootIndex,
4562 scratch1,
4563 scratch2);
4564 }
4565
4566
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4567 void MacroAssembler::AllocateOneByteSlicedString(Register result,
4568 Register length,
4569 Register scratch1,
4570 Register scratch2,
4571 Label* gc_required) {
4572 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4573 NO_ALLOCATION_FLAGS);
4574
4575 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
4576 scratch1, scratch2);
4577 }
4578
4579
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)4580 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
4581 Label* not_unique_name) {
4582 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4583 Label succeed;
4584 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4585 Branch(&succeed, eq, at, Operand(zero_reg));
4586 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
4587
4588 bind(&succeed);
4589 }
4590
4591
4592 // Allocates a heap number or jumps to the label if the young space is full and
4593 // a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * need_gc,MutableMode mode)4594 void MacroAssembler::AllocateHeapNumber(Register result,
4595 Register scratch1,
4596 Register scratch2,
4597 Register heap_number_map,
4598 Label* need_gc,
4599 MutableMode mode) {
4600 // Allocate an object in the heap for the heap number and tag it as a heap
4601 // object.
4602 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
4603 NO_ALLOCATION_FLAGS);
4604
4605 Heap::RootListIndex map_index = mode == MUTABLE
4606 ? Heap::kMutableHeapNumberMapRootIndex
4607 : Heap::kHeapNumberMapRootIndex;
4608 AssertIsRoot(heap_number_map, map_index);
4609
4610 // Store heap number map in the allocated object.
4611 sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
4612 }
4613
4614
AllocateHeapNumberWithValue(Register result,FPURegister value,Register scratch1,Register scratch2,Label * gc_required)4615 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
4616 FPURegister value,
4617 Register scratch1,
4618 Register scratch2,
4619 Label* gc_required) {
4620 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
4621 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
4622 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
4623 }
4624
4625
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)4626 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
4627 Register value, Register scratch1,
4628 Register scratch2, Label* gc_required) {
4629 DCHECK(!result.is(constructor));
4630 DCHECK(!result.is(scratch1));
4631 DCHECK(!result.is(scratch2));
4632 DCHECK(!result.is(value));
4633
4634 // Allocate JSValue in new space.
4635 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
4636 NO_ALLOCATION_FLAGS);
4637
4638 // Initialize the JSValue.
4639 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
4640 sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
4641 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
4642 sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
4643 sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
4644 sd(value, FieldMemOperand(result, JSValue::kValueOffset));
4645 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
4646 }
4647
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)4648 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
4649 Register end_address,
4650 Register filler) {
4651 Label loop, entry;
4652 Branch(&entry);
4653 bind(&loop);
4654 sd(filler, MemOperand(current_address));
4655 Daddu(current_address, current_address, kPointerSize);
4656 bind(&entry);
4657 Branch(&loop, ult, current_address, Operand(end_address));
4658 }
4659
CheckFastObjectElements(Register map,Register scratch,Label * fail)4660 void MacroAssembler::CheckFastObjectElements(Register map,
4661 Register scratch,
4662 Label* fail) {
4663 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4664 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4665 STATIC_ASSERT(FAST_ELEMENTS == 2);
4666 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4667 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4668 Branch(fail, ls, scratch,
4669 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4670 Branch(fail, hi, scratch,
4671 Operand(Map::kMaximumBitField2FastHoleyElementValue));
4672 }
4673
4674
CheckFastSmiElements(Register map,Register scratch,Label * fail)4675 void MacroAssembler::CheckFastSmiElements(Register map,
4676 Register scratch,
4677 Label* fail) {
4678 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4679 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4680 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4681 Branch(fail, hi, scratch,
4682 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4683 }
4684
4685
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,Register scratch2,Label * fail,int elements_offset)4686 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
4687 Register key_reg,
4688 Register elements_reg,
4689 Register scratch1,
4690 Register scratch2,
4691 Label* fail,
4692 int elements_offset) {
4693 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2));
4694 Label smi_value, done;
4695
4696 // Handle smi values specially.
4697 JumpIfSmi(value_reg, &smi_value);
4698
4699 // Ensure that the object is a heap number.
4700 CheckMap(value_reg,
4701 scratch1,
4702 Heap::kHeapNumberMapRootIndex,
4703 fail,
4704 DONT_DO_SMI_CHECK);
4705
4706 // Double value, turn potential sNaN into qNan.
4707 DoubleRegister double_result = f0;
4708 DoubleRegister double_scratch = f2;
4709
4710 ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
4711 Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
4712 FPUCanonicalizeNaN(double_result, double_result);
4713
4714 bind(&smi_value);
4715 // Untag and transfer.
4716 dsrl32(scratch1, value_reg, 0);
4717 mtc1(scratch1, double_scratch);
4718 cvt_d_w(double_result, double_scratch);
4719
4720 bind(&done);
4721 Daddu(scratch1, elements_reg,
4722 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
4723 elements_offset));
4724 dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
4725 Daddu(scratch1, scratch1, scratch2);
4726 // scratch1 is now effective address of the double element.
4727 sdc1(double_result, MemOperand(scratch1, 0));
4728 }
4729
SubNanPreservePayloadAndSign_s(FPURegister fd,FPURegister fs,FPURegister ft)4730 void MacroAssembler::SubNanPreservePayloadAndSign_s(FPURegister fd,
4731 FPURegister fs,
4732 FPURegister ft) {
4733 FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
4734 Label check_nan, save_payload, done;
4735 Register scratch1 = t8;
4736 Register scratch2 = t9;
4737
4738 sub_s(dest, fs, ft);
4739 // Check if the result of subtraction is NaN.
4740 BranchF32(nullptr, &check_nan, eq, fs, ft);
4741 Branch(USE_DELAY_SLOT, &done);
4742 dest.is(fd) ? nop() : mov_s(fd, dest);
4743
4744 bind(&check_nan);
4745 // Check if first operand is a NaN.
4746 mfc1(scratch1, fs);
4747 BranchF32(nullptr, &save_payload, eq, fs, fs);
4748 // Second operand must be a NaN.
4749 mfc1(scratch1, ft);
4750
4751 bind(&save_payload);
4752 // Reserve payload.
4753 And(scratch1, scratch1,
4754 Operand(kSingleSignMask | ((1 << kSingleNaNShift) - 1)));
4755 mfc1(scratch2, dest);
4756 And(scratch2, scratch2, Operand(kSingleNaNMask));
4757 Or(scratch2, scratch2, scratch1);
4758 mtc1(scratch2, fd);
4759
4760 bind(&done);
4761 }
4762
SubNanPreservePayloadAndSign_d(FPURegister fd,FPURegister fs,FPURegister ft)4763 void MacroAssembler::SubNanPreservePayloadAndSign_d(FPURegister fd,
4764 FPURegister fs,
4765 FPURegister ft) {
4766 FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
4767 Label check_nan, save_payload, done;
4768 Register scratch1 = t8;
4769 Register scratch2 = t9;
4770
4771 sub_d(dest, fs, ft);
4772 // Check if the result of subtraction is NaN.
4773 BranchF64(nullptr, &check_nan, eq, fs, ft);
4774 Branch(USE_DELAY_SLOT, &done);
4775 dest.is(fd) ? nop() : mov_d(fd, dest);
4776
4777 bind(&check_nan);
4778 // Check if first operand is a NaN.
4779 dmfc1(scratch1, fs);
4780 BranchF64(nullptr, &save_payload, eq, fs, fs);
4781 // Second operand must be a NaN.
4782 dmfc1(scratch1, ft);
4783
4784 bind(&save_payload);
4785 // Reserve payload.
4786 li(at, Operand(kDoubleSignMask | (1L << kDoubleNaNShift)));
4787 Dsubu(at, at, Operand(1));
4788 And(scratch1, scratch1, at);
4789 dmfc1(scratch2, dest);
4790 And(scratch2, scratch2, Operand(kDoubleNaNMask));
4791 Or(scratch2, scratch2, scratch1);
4792 dmtc1(scratch2, fd);
4793
4794 bind(&done);
4795 }
4796
CompareMapAndBranch(Register obj,Register scratch,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)4797 void MacroAssembler::CompareMapAndBranch(Register obj,
4798 Register scratch,
4799 Handle<Map> map,
4800 Label* early_success,
4801 Condition cond,
4802 Label* branch_to) {
4803 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4804 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
4805 }
4806
4807
CompareMapAndBranch(Register obj_map,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)4808 void MacroAssembler::CompareMapAndBranch(Register obj_map,
4809 Handle<Map> map,
4810 Label* early_success,
4811 Condition cond,
4812 Label* branch_to) {
4813 Branch(branch_to, cond, obj_map, Operand(map));
4814 }
4815
4816
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)4817 void MacroAssembler::CheckMap(Register obj,
4818 Register scratch,
4819 Handle<Map> map,
4820 Label* fail,
4821 SmiCheckType smi_check_type) {
4822 if (smi_check_type == DO_SMI_CHECK) {
4823 JumpIfSmi(obj, fail);
4824 }
4825 Label success;
4826 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
4827 bind(&success);
4828 }
4829
4830
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)4831 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
4832 Register scratch2, Handle<WeakCell> cell,
4833 Handle<Code> success,
4834 SmiCheckType smi_check_type) {
4835 Label fail;
4836 if (smi_check_type == DO_SMI_CHECK) {
4837 JumpIfSmi(obj, &fail);
4838 }
4839 ld(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
4840 GetWeakValue(scratch2, cell);
4841 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
4842 bind(&fail);
4843 }
4844
4845
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)4846 void MacroAssembler::CheckMap(Register obj,
4847 Register scratch,
4848 Heap::RootListIndex index,
4849 Label* fail,
4850 SmiCheckType smi_check_type) {
4851 if (smi_check_type == DO_SMI_CHECK) {
4852 JumpIfSmi(obj, fail);
4853 }
4854 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4855 LoadRoot(at, index);
4856 Branch(fail, ne, scratch, Operand(at));
4857 }
4858
4859
GetWeakValue(Register value,Handle<WeakCell> cell)4860 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
4861 li(value, Operand(cell));
4862 ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
4863 }
4864
FPUCanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)4865 void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
4866 const DoubleRegister src) {
4867 sub_d(dst, src, kDoubleRegZero);
4868 }
4869
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)4870 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
4871 Label* miss) {
4872 GetWeakValue(value, cell);
4873 JumpIfSmi(value, miss);
4874 }
4875
4876
MovFromFloatResult(const DoubleRegister dst)4877 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
4878 if (IsMipsSoftFloatABI) {
4879 if (kArchEndian == kLittle) {
4880 Move(dst, v0, v1);
4881 } else {
4882 Move(dst, v1, v0);
4883 }
4884 } else {
4885 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
4886 }
4887 }
4888
4889
MovFromFloatParameter(const DoubleRegister dst)4890 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
4891 if (IsMipsSoftFloatABI) {
4892 if (kArchEndian == kLittle) {
4893 Move(dst, a0, a1);
4894 } else {
4895 Move(dst, a1, a0);
4896 }
4897 } else {
4898 Move(dst, f12); // Reg f12 is n64 ABI FP first argument value.
4899 }
4900 }
4901
4902
MovToFloatParameter(DoubleRegister src)4903 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4904 if (!IsMipsSoftFloatABI) {
4905 Move(f12, src);
4906 } else {
4907 if (kArchEndian == kLittle) {
4908 Move(a0, a1, src);
4909 } else {
4910 Move(a1, a0, src);
4911 }
4912 }
4913 }
4914
4915
MovToFloatResult(DoubleRegister src)4916 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4917 if (!IsMipsSoftFloatABI) {
4918 Move(f0, src);
4919 } else {
4920 if (kArchEndian == kLittle) {
4921 Move(v0, v1, src);
4922 } else {
4923 Move(v1, v0, src);
4924 }
4925 }
4926 }
4927
4928
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)4929 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4930 DoubleRegister src2) {
4931 if (!IsMipsSoftFloatABI) {
4932 const DoubleRegister fparg2 = f13;
4933 if (src2.is(f12)) {
4934 DCHECK(!src1.is(fparg2));
4935 Move(fparg2, src2);
4936 Move(f12, src1);
4937 } else {
4938 Move(f12, src1);
4939 Move(fparg2, src2);
4940 }
4941 } else {
4942 if (kArchEndian == kLittle) {
4943 Move(a0, a1, src1);
4944 Move(a2, a3, src2);
4945 } else {
4946 Move(a1, a0, src1);
4947 Move(a3, a2, src2);
4948 }
4949 }
4950 }
4951
4952
4953 // -----------------------------------------------------------------------------
4954 // JavaScript invokes.
4955
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)4956 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
4957 Register caller_args_count_reg,
4958 Register scratch0, Register scratch1) {
4959 #if DEBUG
4960 if (callee_args_count.is_reg()) {
4961 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
4962 scratch1));
4963 } else {
4964 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
4965 }
4966 #endif
4967
4968 // Calculate the end of destination area where we will put the arguments
4969 // after we drop current frame. We add kPointerSize to count the receiver
4970 // argument which is not included into formal parameters count.
4971 Register dst_reg = scratch0;
4972 Dlsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
4973 Daddu(dst_reg, dst_reg,
4974 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
4975
4976 Register src_reg = caller_args_count_reg;
4977 // Calculate the end of source area. +kPointerSize is for the receiver.
4978 if (callee_args_count.is_reg()) {
4979 Dlsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
4980 Daddu(src_reg, src_reg, Operand(kPointerSize));
4981 } else {
4982 Daddu(src_reg, sp,
4983 Operand((callee_args_count.immediate() + 1) * kPointerSize));
4984 }
4985
4986 if (FLAG_debug_code) {
4987 Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
4988 }
4989
4990 // Restore caller's frame pointer and return address now as they will be
4991 // overwritten by the copying loop.
4992 ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
4993 ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4994
4995 // Now copy callee arguments to the caller frame going backwards to avoid
4996 // callee arguments corruption (source and destination areas could overlap).
4997
4998 // Both src_reg and dst_reg are pointing to the word after the one to copy,
4999 // so they must be pre-decremented in the loop.
5000 Register tmp_reg = scratch1;
5001 Label loop, entry;
5002 Branch(&entry);
5003 bind(&loop);
5004 Dsubu(src_reg, src_reg, Operand(kPointerSize));
5005 Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
5006 ld(tmp_reg, MemOperand(src_reg));
5007 sd(tmp_reg, MemOperand(dst_reg));
5008 bind(&entry);
5009 Branch(&loop, ne, sp, Operand(src_reg));
5010
5011 // Leave current frame.
5012 mov(sp, dst_reg);
5013 }
5014
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)5015 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
5016 const ParameterCount& actual,
5017 Label* done,
5018 bool* definitely_mismatches,
5019 InvokeFlag flag,
5020 const CallWrapper& call_wrapper) {
5021 bool definitely_matches = false;
5022 *definitely_mismatches = false;
5023 Label regular_invoke;
5024
5025 // Check whether the expected and actual arguments count match. If not,
5026 // setup registers according to contract with ArgumentsAdaptorTrampoline:
5027 // a0: actual arguments count
5028 // a1: function (passed through to callee)
5029 // a2: expected arguments count
5030
5031 // The code below is made a lot easier because the calling code already sets
5032 // up actual and expected registers according to the contract if values are
5033 // passed in registers.
5034 DCHECK(actual.is_immediate() || actual.reg().is(a0));
5035 DCHECK(expected.is_immediate() || expected.reg().is(a2));
5036
5037 if (expected.is_immediate()) {
5038 DCHECK(actual.is_immediate());
5039 li(a0, Operand(actual.immediate()));
5040 if (expected.immediate() == actual.immediate()) {
5041 definitely_matches = true;
5042 } else {
5043 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
5044 if (expected.immediate() == sentinel) {
5045 // Don't worry about adapting arguments for builtins that
5046 // don't want that done. Skip adaption code by making it look
5047 // like we have a match between expected and actual number of
5048 // arguments.
5049 definitely_matches = true;
5050 } else {
5051 *definitely_mismatches = true;
5052 li(a2, Operand(expected.immediate()));
5053 }
5054 }
5055 } else if (actual.is_immediate()) {
5056 li(a0, Operand(actual.immediate()));
5057 Branch(®ular_invoke, eq, expected.reg(), Operand(a0));
5058 } else {
5059 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
5060 }
5061
5062 if (!definitely_matches) {
5063 Handle<Code> adaptor =
5064 isolate()->builtins()->ArgumentsAdaptorTrampoline();
5065 if (flag == CALL_FUNCTION) {
5066 call_wrapper.BeforeCall(CallSize(adaptor));
5067 Call(adaptor);
5068 call_wrapper.AfterCall();
5069 if (!*definitely_mismatches) {
5070 Branch(done);
5071 }
5072 } else {
5073 Jump(adaptor, RelocInfo::CODE_TARGET);
5074 }
5075 bind(®ular_invoke);
5076 }
5077 }
5078
5079
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)5080 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
5081 const ParameterCount& expected,
5082 const ParameterCount& actual) {
5083 Label skip_flooding;
5084 ExternalReference last_step_action =
5085 ExternalReference::debug_last_step_action_address(isolate());
5086 STATIC_ASSERT(StepFrame > StepIn);
5087 li(t0, Operand(last_step_action));
5088 lb(t0, MemOperand(t0));
5089 Branch(&skip_flooding, lt, t0, Operand(StepIn));
5090 {
5091 FrameScope frame(this,
5092 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
5093 if (expected.is_reg()) {
5094 SmiTag(expected.reg());
5095 Push(expected.reg());
5096 }
5097 if (actual.is_reg()) {
5098 SmiTag(actual.reg());
5099 Push(actual.reg());
5100 }
5101 if (new_target.is_valid()) {
5102 Push(new_target);
5103 }
5104 Push(fun);
5105 Push(fun);
5106 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
5107 Pop(fun);
5108 if (new_target.is_valid()) {
5109 Pop(new_target);
5110 }
5111 if (actual.is_reg()) {
5112 Pop(actual.reg());
5113 SmiUntag(actual.reg());
5114 }
5115 if (expected.is_reg()) {
5116 Pop(expected.reg());
5117 SmiUntag(expected.reg());
5118 }
5119 }
5120 bind(&skip_flooding);
5121 }
5122
5123
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)5124 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
5125 const ParameterCount& expected,
5126 const ParameterCount& actual,
5127 InvokeFlag flag,
5128 const CallWrapper& call_wrapper) {
5129 // You can't call a function without a valid frame.
5130 DCHECK(flag == JUMP_FUNCTION || has_frame());
5131 DCHECK(function.is(a1));
5132 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
5133
5134 if (call_wrapper.NeedsDebugStepCheck()) {
5135 FloodFunctionIfStepping(function, new_target, expected, actual);
5136 }
5137
5138 // Clear the new.target register if not given.
5139 if (!new_target.is_valid()) {
5140 LoadRoot(a3, Heap::kUndefinedValueRootIndex);
5141 }
5142
5143 Label done;
5144 bool definitely_mismatches = false;
5145 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
5146 call_wrapper);
5147 if (!definitely_mismatches) {
5148 // We call indirectly through the code field in the function to
5149 // allow recompilation to take effect without changing any of the
5150 // call sites.
5151 Register code = t0;
5152 ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
5153 if (flag == CALL_FUNCTION) {
5154 call_wrapper.BeforeCall(CallSize(code));
5155 Call(code);
5156 call_wrapper.AfterCall();
5157 } else {
5158 DCHECK(flag == JUMP_FUNCTION);
5159 Jump(code);
5160 }
5161 // Continue here if InvokePrologue does handle the invocation due to
5162 // mismatched parameter counts.
5163 bind(&done);
5164 }
5165 }
5166
5167
InvokeFunction(Register function,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)5168 void MacroAssembler::InvokeFunction(Register function,
5169 Register new_target,
5170 const ParameterCount& actual,
5171 InvokeFlag flag,
5172 const CallWrapper& call_wrapper) {
5173 // You can't call a function without a valid frame.
5174 DCHECK(flag == JUMP_FUNCTION || has_frame());
5175
5176 // Contract with called JS functions requires that function is passed in a1.
5177 DCHECK(function.is(a1));
5178 Register expected_reg = a2;
5179 Register temp_reg = t0;
5180 ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
5181 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
5182 // The argument count is stored as int32_t on 64-bit platforms.
5183 // TODO(plind): Smi on 32-bit platforms.
5184 lw(expected_reg,
5185 FieldMemOperand(temp_reg,
5186 SharedFunctionInfo::kFormalParameterCountOffset));
5187 ParameterCount expected(expected_reg);
5188 InvokeFunctionCode(a1, new_target, expected, actual, flag, call_wrapper);
5189 }
5190
5191
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)5192 void MacroAssembler::InvokeFunction(Register function,
5193 const ParameterCount& expected,
5194 const ParameterCount& actual,
5195 InvokeFlag flag,
5196 const CallWrapper& call_wrapper) {
5197 // You can't call a function without a valid frame.
5198 DCHECK(flag == JUMP_FUNCTION || has_frame());
5199
5200 // Contract with called JS functions requires that function is passed in a1.
5201 DCHECK(function.is(a1));
5202
5203 // Get the function and setup the context.
5204 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
5205
5206 InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
5207 }
5208
5209
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)5210 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
5211 const ParameterCount& expected,
5212 const ParameterCount& actual,
5213 InvokeFlag flag,
5214 const CallWrapper& call_wrapper) {
5215 li(a1, function);
5216 InvokeFunction(a1, expected, actual, flag, call_wrapper);
5217 }
5218
5219
IsObjectJSStringType(Register object,Register scratch,Label * fail)5220 void MacroAssembler::IsObjectJSStringType(Register object,
5221 Register scratch,
5222 Label* fail) {
5223 DCHECK(kNotStringTag != 0);
5224
5225 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5226 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5227 And(scratch, scratch, Operand(kIsNotStringMask));
5228 Branch(fail, ne, scratch, Operand(zero_reg));
5229 }
5230
5231
IsObjectNameType(Register object,Register scratch,Label * fail)5232 void MacroAssembler::IsObjectNameType(Register object,
5233 Register scratch,
5234 Label* fail) {
5235 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5236 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5237 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
5238 }
5239
5240
5241 // ---------------------------------------------------------------------------
5242 // Support functions.
5243
5244
GetMapConstructor(Register result,Register map,Register temp,Register temp2)5245 void MacroAssembler::GetMapConstructor(Register result, Register map,
5246 Register temp, Register temp2) {
5247 Label done, loop;
5248 ld(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
5249 bind(&loop);
5250 JumpIfSmi(result, &done);
5251 GetObjectType(result, temp, temp2);
5252 Branch(&done, ne, temp2, Operand(MAP_TYPE));
5253 ld(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
5254 Branch(&loop);
5255 bind(&done);
5256 }
5257
5258
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)5259 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
5260 Register scratch, Label* miss) {
5261 // Get the prototype or initial map from the function.
5262 ld(result,
5263 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5264
5265 // If the prototype or initial map is the hole, don't return it and
5266 // simply miss the cache instead. This will allow us to allocate a
5267 // prototype object on-demand in the runtime system.
5268 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
5269 Branch(miss, eq, result, Operand(t8));
5270
5271 // If the function does not have an initial map, we're done.
5272 Label done;
5273 GetObjectType(result, scratch, scratch);
5274 Branch(&done, ne, scratch, Operand(MAP_TYPE));
5275
5276 // Get the prototype from the initial map.
5277 ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
5278
5279 // All done.
5280 bind(&done);
5281 }
5282
5283
GetObjectType(Register object,Register map,Register type_reg)5284 void MacroAssembler::GetObjectType(Register object,
5285 Register map,
5286 Register type_reg) {
5287 ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
5288 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
5289 }
5290
5291
5292 // -----------------------------------------------------------------------------
5293 // Runtime calls.
5294
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)5295 void MacroAssembler::CallStub(CodeStub* stub,
5296 TypeFeedbackId ast_id,
5297 Condition cond,
5298 Register r1,
5299 const Operand& r2,
5300 BranchDelaySlot bd) {
5301 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
5302 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
5303 cond, r1, r2, bd);
5304 }
5305
5306
TailCallStub(CodeStub * stub,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)5307 void MacroAssembler::TailCallStub(CodeStub* stub,
5308 Condition cond,
5309 Register r1,
5310 const Operand& r2,
5311 BranchDelaySlot bd) {
5312 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
5313 }
5314
5315
AllowThisStubCall(CodeStub * stub)5316 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
5317 return has_frame_ || !stub->SometimesSetsUpAFrame();
5318 }
5319
ObjectToDoubleFPURegister(Register object,FPURegister result,Register scratch1,Register scratch2,Register heap_number_map,Label * not_number,ObjectToDoubleFlags flags)5320 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
5321 FPURegister result,
5322 Register scratch1,
5323 Register scratch2,
5324 Register heap_number_map,
5325 Label* not_number,
5326 ObjectToDoubleFlags flags) {
5327 Label done;
5328 if ((flags & OBJECT_NOT_SMI) == 0) {
5329 Label not_smi;
5330 JumpIfNotSmi(object, ¬_smi);
5331 // Remove smi tag and convert to double.
5332 // dsra(scratch1, object, kSmiTagSize);
5333 dsra32(scratch1, object, 0);
5334 mtc1(scratch1, result);
5335 cvt_d_w(result, result);
5336 Branch(&done);
5337 bind(¬_smi);
5338 }
5339 // Check for heap number and load double value from it.
5340 ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
5341 Branch(not_number, ne, scratch1, Operand(heap_number_map));
5342
5343 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
5344 // If exponent is all ones the number is either a NaN or +/-Infinity.
5345 Register exponent = scratch1;
5346 Register mask_reg = scratch2;
5347 lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
5348 li(mask_reg, HeapNumber::kExponentMask);
5349
5350 And(exponent, exponent, mask_reg);
5351 Branch(not_number, eq, exponent, Operand(mask_reg));
5352 }
5353 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
5354 bind(&done);
5355 }
5356
5357
SmiToDoubleFPURegister(Register smi,FPURegister value,Register scratch1)5358 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
5359 FPURegister value,
5360 Register scratch1) {
5361 dsra32(scratch1, smi, 0);
5362 mtc1(scratch1, value);
5363 cvt_d_w(value, value);
5364 }
5365
BranchOvfHelper(MacroAssembler * masm,Register overflow_dst,Label * overflow_label,Label * no_overflow_label)5366 static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
5367 Label* overflow_label,
5368 Label* no_overflow_label) {
5369 DCHECK(overflow_label || no_overflow_label);
5370 if (!overflow_label) {
5371 DCHECK(no_overflow_label);
5372 masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
5373 } else {
5374 masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
5375 if (no_overflow_label) masm->Branch(no_overflow_label);
5376 }
5377 }
5378
AddBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5379 void MacroAssembler::AddBranchOvf(Register dst, Register left,
5380 const Operand& right, Label* overflow_label,
5381 Label* no_overflow_label, Register scratch) {
5382 if (right.is_reg()) {
5383 AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5384 scratch);
5385 } else {
5386 if (kArchVariant == kMips64r6) {
5387 Register right_reg = t9;
5388 DCHECK(!left.is(right_reg));
5389 li(right_reg, Operand(right));
5390 AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
5391 } else {
5392 Register overflow_dst = t9;
5393 DCHECK(!dst.is(scratch));
5394 DCHECK(!dst.is(overflow_dst));
5395 DCHECK(!scratch.is(overflow_dst));
5396 DCHECK(!left.is(overflow_dst));
5397 if (dst.is(left)) {
5398 mov(scratch, left); // Preserve left.
5399 // Left is overwritten.
5400 Addu(dst, left, static_cast<int32_t>(right.immediate()));
5401 xor_(scratch, dst, scratch); // Original left.
5402 // Load right since xori takes uint16 as immediate.
5403 Addu(overflow_dst, zero_reg, right);
5404 xor_(overflow_dst, dst, overflow_dst);
5405 and_(overflow_dst, overflow_dst, scratch);
5406 } else {
5407 Addu(dst, left, static_cast<int32_t>(right.immediate()));
5408 xor_(overflow_dst, dst, left);
5409 // Load right since xori takes uint16 as immediate.
5410 Addu(scratch, zero_reg, right);
5411 xor_(scratch, dst, scratch);
5412 and_(overflow_dst, scratch, overflow_dst);
5413 }
5414 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5415 }
5416 }
5417 }
5418
AddBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5419 void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
5420 Label* overflow_label,
5421 Label* no_overflow_label, Register scratch) {
5422 if (kArchVariant == kMips64r6) {
5423 if (!overflow_label) {
5424 DCHECK(no_overflow_label);
5425 DCHECK(!dst.is(scratch));
5426 Register left_reg = left.is(dst) ? scratch : left;
5427 Register right_reg = right.is(dst) ? t9 : right;
5428 DCHECK(!dst.is(left_reg));
5429 DCHECK(!dst.is(right_reg));
5430 Move(left_reg, left);
5431 Move(right_reg, right);
5432 addu(dst, left, right);
5433 Bnvc(left_reg, right_reg, no_overflow_label);
5434 } else {
5435 Bovc(left, right, overflow_label);
5436 addu(dst, left, right);
5437 if (no_overflow_label) bc(no_overflow_label);
5438 }
5439 } else {
5440 Register overflow_dst = t9;
5441 DCHECK(!dst.is(scratch));
5442 DCHECK(!dst.is(overflow_dst));
5443 DCHECK(!scratch.is(overflow_dst));
5444 DCHECK(!left.is(overflow_dst));
5445 DCHECK(!right.is(overflow_dst));
5446 DCHECK(!left.is(scratch));
5447 DCHECK(!right.is(scratch));
5448
5449 if (left.is(right) && dst.is(left)) {
5450 mov(overflow_dst, right);
5451 right = overflow_dst;
5452 }
5453
5454 if (dst.is(left)) {
5455 mov(scratch, left); // Preserve left.
5456 addu(dst, left, right); // Left is overwritten.
5457 xor_(scratch, dst, scratch); // Original left.
5458 xor_(overflow_dst, dst, right);
5459 and_(overflow_dst, overflow_dst, scratch);
5460 } else if (dst.is(right)) {
5461 mov(scratch, right); // Preserve right.
5462 addu(dst, left, right); // Right is overwritten.
5463 xor_(scratch, dst, scratch); // Original right.
5464 xor_(overflow_dst, dst, left);
5465 and_(overflow_dst, overflow_dst, scratch);
5466 } else {
5467 addu(dst, left, right);
5468 xor_(overflow_dst, dst, left);
5469 xor_(scratch, dst, right);
5470 and_(overflow_dst, scratch, overflow_dst);
5471 }
5472 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5473 }
5474 }
5475
SubBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5476 void MacroAssembler::SubBranchOvf(Register dst, Register left,
5477 const Operand& right, Label* overflow_label,
5478 Label* no_overflow_label, Register scratch) {
5479 DCHECK(overflow_label || no_overflow_label);
5480 if (right.is_reg()) {
5481 SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5482 scratch);
5483 } else {
5484 Register overflow_dst = t9;
5485 DCHECK(!dst.is(scratch));
5486 DCHECK(!dst.is(overflow_dst));
5487 DCHECK(!scratch.is(overflow_dst));
5488 DCHECK(!left.is(overflow_dst));
5489 DCHECK(!left.is(scratch));
5490 if (dst.is(left)) {
5491 mov(scratch, left); // Preserve left.
5492 // Left is overwritten.
5493 Subu(dst, left, static_cast<int32_t>(right.immediate()));
5494 // Load right since xori takes uint16 as immediate.
5495 Addu(overflow_dst, zero_reg, right);
5496 xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
5497 xor_(scratch, dst, scratch); // scratch is original left.
5498 and_(overflow_dst, scratch, overflow_dst);
5499 } else {
5500 Subu(dst, left, right);
5501 xor_(overflow_dst, dst, left);
5502 // Load right since xori takes uint16 as immediate.
5503 Addu(scratch, zero_reg, right);
5504 xor_(scratch, left, scratch);
5505 and_(overflow_dst, scratch, overflow_dst);
5506 }
5507 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5508 }
5509 }
5510
SubBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5511 void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
5512 Label* overflow_label,
5513 Label* no_overflow_label, Register scratch) {
5514 DCHECK(overflow_label || no_overflow_label);
5515 Register overflow_dst = t9;
5516 DCHECK(!dst.is(scratch));
5517 DCHECK(!dst.is(overflow_dst));
5518 DCHECK(!scratch.is(overflow_dst));
5519 DCHECK(!overflow_dst.is(left));
5520 DCHECK(!overflow_dst.is(right));
5521 DCHECK(!scratch.is(left));
5522 DCHECK(!scratch.is(right));
5523
5524 // This happens with some crankshaft code. Since Subu works fine if
5525 // left == right, let's not make that restriction here.
5526 if (left.is(right)) {
5527 mov(dst, zero_reg);
5528 if (no_overflow_label) {
5529 Branch(no_overflow_label);
5530 }
5531 }
5532
5533 if (dst.is(left)) {
5534 mov(scratch, left); // Preserve left.
5535 subu(dst, left, right); // Left is overwritten.
5536 xor_(overflow_dst, dst, scratch); // scratch is original left.
5537 xor_(scratch, scratch, right); // scratch is original left.
5538 and_(overflow_dst, scratch, overflow_dst);
5539 } else if (dst.is(right)) {
5540 mov(scratch, right); // Preserve right.
5541 subu(dst, left, right); // Right is overwritten.
5542 xor_(overflow_dst, dst, left);
5543 xor_(scratch, left, scratch); // Original right.
5544 and_(overflow_dst, scratch, overflow_dst);
5545 } else {
5546 subu(dst, left, right);
5547 xor_(overflow_dst, dst, left);
5548 xor_(scratch, left, right);
5549 and_(overflow_dst, scratch, overflow_dst);
5550 }
5551 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5552 }
5553
DaddBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5554 void MacroAssembler::DaddBranchOvf(Register dst, Register left,
5555 const Operand& right, Label* overflow_label,
5556 Label* no_overflow_label, Register scratch) {
5557 if (right.is_reg()) {
5558 DaddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5559 scratch);
5560 } else {
5561 Register overflow_dst = t9;
5562 DCHECK(!dst.is(scratch));
5563 DCHECK(!dst.is(overflow_dst));
5564 DCHECK(!scratch.is(overflow_dst));
5565 DCHECK(!left.is(overflow_dst));
5566 li(overflow_dst, right); // Load right.
5567 if (dst.is(left)) {
5568 mov(scratch, left); // Preserve left.
5569 Daddu(dst, left, overflow_dst); // Left is overwritten.
5570 xor_(scratch, dst, scratch); // Original left.
5571 xor_(overflow_dst, dst, overflow_dst);
5572 and_(overflow_dst, overflow_dst, scratch);
5573 } else {
5574 Daddu(dst, left, overflow_dst);
5575 xor_(scratch, dst, overflow_dst);
5576 xor_(overflow_dst, dst, left);
5577 and_(overflow_dst, scratch, overflow_dst);
5578 }
5579 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5580 }
5581 }
5582
5583
DaddBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5584 void MacroAssembler::DaddBranchOvf(Register dst, Register left, Register right,
5585 Label* overflow_label,
5586 Label* no_overflow_label, Register scratch) {
5587 Register overflow_dst = t9;
5588 DCHECK(!dst.is(scratch));
5589 DCHECK(!dst.is(overflow_dst));
5590 DCHECK(!scratch.is(overflow_dst));
5591 DCHECK(!left.is(overflow_dst));
5592 DCHECK(!right.is(overflow_dst));
5593 DCHECK(!left.is(scratch));
5594 DCHECK(!right.is(scratch));
5595
5596 if (left.is(right) && dst.is(left)) {
5597 mov(overflow_dst, right);
5598 right = overflow_dst;
5599 }
5600
5601 if (dst.is(left)) {
5602 mov(scratch, left); // Preserve left.
5603 daddu(dst, left, right); // Left is overwritten.
5604 xor_(scratch, dst, scratch); // Original left.
5605 xor_(overflow_dst, dst, right);
5606 and_(overflow_dst, overflow_dst, scratch);
5607 } else if (dst.is(right)) {
5608 mov(scratch, right); // Preserve right.
5609 daddu(dst, left, right); // Right is overwritten.
5610 xor_(scratch, dst, scratch); // Original right.
5611 xor_(overflow_dst, dst, left);
5612 and_(overflow_dst, overflow_dst, scratch);
5613 } else {
5614 daddu(dst, left, right);
5615 xor_(overflow_dst, dst, left);
5616 xor_(scratch, dst, right);
5617 and_(overflow_dst, scratch, overflow_dst);
5618 }
5619 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5620 }
5621
5622
DsubBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5623 void MacroAssembler::DsubBranchOvf(Register dst, Register left,
5624 const Operand& right, Label* overflow_label,
5625 Label* no_overflow_label, Register scratch) {
5626 DCHECK(overflow_label || no_overflow_label);
5627 if (right.is_reg()) {
5628 DsubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5629 scratch);
5630 } else {
5631 Register overflow_dst = t9;
5632 DCHECK(!dst.is(scratch));
5633 DCHECK(!dst.is(overflow_dst));
5634 DCHECK(!scratch.is(overflow_dst));
5635 DCHECK(!left.is(overflow_dst));
5636 DCHECK(!left.is(scratch));
5637 li(overflow_dst, right); // Load right.
5638 if (dst.is(left)) {
5639 mov(scratch, left); // Preserve left.
5640 Dsubu(dst, left, overflow_dst); // Left is overwritten.
5641 xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
5642 xor_(scratch, dst, scratch); // scratch is original left.
5643 and_(overflow_dst, scratch, overflow_dst);
5644 } else {
5645 Dsubu(dst, left, overflow_dst);
5646 xor_(scratch, left, overflow_dst);
5647 xor_(overflow_dst, dst, left);
5648 and_(overflow_dst, scratch, overflow_dst);
5649 }
5650 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5651 }
5652 }
5653
5654
DsubBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5655 void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
5656 Label* overflow_label,
5657 Label* no_overflow_label, Register scratch) {
5658 DCHECK(overflow_label || no_overflow_label);
5659 Register overflow_dst = t9;
5660 DCHECK(!dst.is(scratch));
5661 DCHECK(!dst.is(overflow_dst));
5662 DCHECK(!scratch.is(overflow_dst));
5663 DCHECK(!overflow_dst.is(left));
5664 DCHECK(!overflow_dst.is(right));
5665 DCHECK(!scratch.is(left));
5666 DCHECK(!scratch.is(right));
5667
5668 // This happens with some crankshaft code. Since Subu works fine if
5669 // left == right, let's not make that restriction here.
5670 if (left.is(right)) {
5671 mov(dst, zero_reg);
5672 if (no_overflow_label) {
5673 Branch(no_overflow_label);
5674 }
5675 }
5676
5677 if (dst.is(left)) {
5678 mov(scratch, left); // Preserve left.
5679 dsubu(dst, left, right); // Left is overwritten.
5680 xor_(overflow_dst, dst, scratch); // scratch is original left.
5681 xor_(scratch, scratch, right); // scratch is original left.
5682 and_(overflow_dst, scratch, overflow_dst);
5683 } else if (dst.is(right)) {
5684 mov(scratch, right); // Preserve right.
5685 dsubu(dst, left, right); // Right is overwritten.
5686 xor_(overflow_dst, dst, left);
5687 xor_(scratch, left, scratch); // Original right.
5688 and_(overflow_dst, scratch, overflow_dst);
5689 } else {
5690 dsubu(dst, left, right);
5691 xor_(overflow_dst, dst, left);
5692 xor_(scratch, left, right);
5693 and_(overflow_dst, scratch, overflow_dst);
5694 }
5695 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5696 }
5697
BranchOvfHelperMult(MacroAssembler * masm,Register overflow_dst,Label * overflow_label,Label * no_overflow_label)5698 static inline void BranchOvfHelperMult(MacroAssembler* masm,
5699 Register overflow_dst,
5700 Label* overflow_label,
5701 Label* no_overflow_label) {
5702 DCHECK(overflow_label || no_overflow_label);
5703 if (!overflow_label) {
5704 DCHECK(no_overflow_label);
5705 masm->Branch(no_overflow_label, eq, overflow_dst, Operand(zero_reg));
5706 } else {
5707 masm->Branch(overflow_label, ne, overflow_dst, Operand(zero_reg));
5708 if (no_overflow_label) masm->Branch(no_overflow_label);
5709 }
5710 }
5711
MulBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5712 void MacroAssembler::MulBranchOvf(Register dst, Register left,
5713 const Operand& right, Label* overflow_label,
5714 Label* no_overflow_label, Register scratch) {
5715 DCHECK(overflow_label || no_overflow_label);
5716 if (right.is_reg()) {
5717 MulBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5718 scratch);
5719 } else {
5720 Register overflow_dst = t9;
5721 DCHECK(!dst.is(scratch));
5722 DCHECK(!dst.is(overflow_dst));
5723 DCHECK(!scratch.is(overflow_dst));
5724 DCHECK(!left.is(overflow_dst));
5725 DCHECK(!left.is(scratch));
5726
5727 if (dst.is(left)) {
5728 Mul(scratch, left, static_cast<int32_t>(right.immediate()));
5729 Mulh(overflow_dst, left, static_cast<int32_t>(right.immediate()));
5730 mov(dst, scratch);
5731 } else {
5732 Mul(dst, left, static_cast<int32_t>(right.immediate()));
5733 Mulh(overflow_dst, left, static_cast<int32_t>(right.immediate()));
5734 }
5735
5736 dsra32(scratch, dst, 0);
5737 xor_(overflow_dst, overflow_dst, scratch);
5738
5739 BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
5740 }
5741 }
5742
MulBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5743 void MacroAssembler::MulBranchOvf(Register dst, Register left, Register right,
5744 Label* overflow_label,
5745 Label* no_overflow_label, Register scratch) {
5746 DCHECK(overflow_label || no_overflow_label);
5747 Register overflow_dst = t9;
5748 DCHECK(!dst.is(scratch));
5749 DCHECK(!dst.is(overflow_dst));
5750 DCHECK(!scratch.is(overflow_dst));
5751 DCHECK(!overflow_dst.is(left));
5752 DCHECK(!overflow_dst.is(right));
5753 DCHECK(!scratch.is(left));
5754 DCHECK(!scratch.is(right));
5755
5756 if (dst.is(left) || dst.is(right)) {
5757 Mul(scratch, left, right);
5758 Mulh(overflow_dst, left, right);
5759 mov(dst, scratch);
5760 } else {
5761 Mul(dst, left, right);
5762 Mulh(overflow_dst, left, right);
5763 }
5764
5765 dsra32(scratch, dst, 0);
5766 xor_(overflow_dst, overflow_dst, scratch);
5767
5768 BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
5769 }
5770
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles,BranchDelaySlot bd)5771 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
5772 SaveFPRegsMode save_doubles,
5773 BranchDelaySlot bd) {
5774 // All parameters are on the stack. v0 has the return value after call.
5775
5776 // If the expected number of arguments of the runtime function is
5777 // constant, we check that the actual number of arguments match the
5778 // expectation.
5779 CHECK(f->nargs < 0 || f->nargs == num_arguments);
5780
5781 // TODO(1236192): Most runtime routines don't need the number of
5782 // arguments passed in because it is constant. At some point we
5783 // should remove this need and make the runtime routine entry code
5784 // smarter.
5785 PrepareCEntryArgs(num_arguments);
5786 PrepareCEntryFunction(ExternalReference(f, isolate()));
5787 CEntryStub stub(isolate(), 1, save_doubles);
5788 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5789 }
5790
5791
CallExternalReference(const ExternalReference & ext,int num_arguments,BranchDelaySlot bd)5792 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
5793 int num_arguments,
5794 BranchDelaySlot bd) {
5795 PrepareCEntryArgs(num_arguments);
5796 PrepareCEntryFunction(ext);
5797
5798 CEntryStub stub(isolate(), 1);
5799 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5800 }
5801
5802
TailCallRuntime(Runtime::FunctionId fid)5803 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
5804 const Runtime::Function* function = Runtime::FunctionForId(fid);
5805 DCHECK_EQ(1, function->result_size);
5806 if (function->nargs >= 0) {
5807 PrepareCEntryArgs(function->nargs);
5808 }
5809 JumpToExternalReference(ExternalReference(fid, isolate()));
5810 }
5811
JumpToExternalReference(const ExternalReference & builtin,BranchDelaySlot bd,bool builtin_exit_frame)5812 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
5813 BranchDelaySlot bd,
5814 bool builtin_exit_frame) {
5815 PrepareCEntryFunction(builtin);
5816 CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
5817 builtin_exit_frame);
5818 Jump(stub.GetCode(),
5819 RelocInfo::CODE_TARGET,
5820 al,
5821 zero_reg,
5822 Operand(zero_reg),
5823 bd);
5824 }
5825
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5826 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
5827 Register scratch1, Register scratch2) {
5828 if (FLAG_native_code_counters && counter->Enabled()) {
5829 li(scratch1, Operand(value));
5830 li(scratch2, Operand(ExternalReference(counter)));
5831 sw(scratch1, MemOperand(scratch2));
5832 }
5833 }
5834
5835
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5836 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
5837 Register scratch1, Register scratch2) {
5838 DCHECK(value > 0);
5839 if (FLAG_native_code_counters && counter->Enabled()) {
5840 li(scratch2, Operand(ExternalReference(counter)));
5841 lw(scratch1, MemOperand(scratch2));
5842 Addu(scratch1, scratch1, Operand(value));
5843 sw(scratch1, MemOperand(scratch2));
5844 }
5845 }
5846
5847
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5848 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
5849 Register scratch1, Register scratch2) {
5850 DCHECK(value > 0);
5851 if (FLAG_native_code_counters && counter->Enabled()) {
5852 li(scratch2, Operand(ExternalReference(counter)));
5853 lw(scratch1, MemOperand(scratch2));
5854 Subu(scratch1, scratch1, Operand(value));
5855 sw(scratch1, MemOperand(scratch2));
5856 }
5857 }
5858
5859
5860 // -----------------------------------------------------------------------------
5861 // Debugging.
5862
Assert(Condition cc,BailoutReason reason,Register rs,Operand rt)5863 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
5864 Register rs, Operand rt) {
5865 if (emit_debug_code())
5866 Check(cc, reason, rs, rt);
5867 }
5868
5869
AssertFastElements(Register elements)5870 void MacroAssembler::AssertFastElements(Register elements) {
5871 if (emit_debug_code()) {
5872 DCHECK(!elements.is(at));
5873 Label ok;
5874 push(elements);
5875 ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
5876 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
5877 Branch(&ok, eq, elements, Operand(at));
5878 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
5879 Branch(&ok, eq, elements, Operand(at));
5880 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
5881 Branch(&ok, eq, elements, Operand(at));
5882 Abort(kJSObjectWithFastElementsMapHasSlowElements);
5883 bind(&ok);
5884 pop(elements);
5885 }
5886 }
5887
5888
Check(Condition cc,BailoutReason reason,Register rs,Operand rt)5889 void MacroAssembler::Check(Condition cc, BailoutReason reason,
5890 Register rs, Operand rt) {
5891 Label L;
5892 Branch(&L, cc, rs, rt);
5893 Abort(reason);
5894 // Will not return here.
5895 bind(&L);
5896 }
5897
5898
Abort(BailoutReason reason)5899 void MacroAssembler::Abort(BailoutReason reason) {
5900 Label abort_start;
5901 bind(&abort_start);
5902 #ifdef DEBUG
5903 const char* msg = GetBailoutReason(reason);
5904 if (msg != NULL) {
5905 RecordComment("Abort message: ");
5906 RecordComment(msg);
5907 }
5908
5909 if (FLAG_trap_on_abort) {
5910 stop(msg);
5911 return;
5912 }
5913 #endif
5914
5915 // Check if Abort() has already been initialized.
5916 DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
5917
5918 Move(a0, Smi::FromInt(static_cast<int>(reason)));
5919
5920 // Disable stub call restrictions to always allow calls to abort.
5921 if (!has_frame_) {
5922 // We don't actually want to generate a pile of code for this, so just
5923 // claim there is a stack frame, without generating one.
5924 FrameScope scope(this, StackFrame::NONE);
5925 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
5926 } else {
5927 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
5928 }
5929 // Will not return here.
5930 if (is_trampoline_pool_blocked()) {
5931 // If the calling code cares about the exact number of
5932 // instructions generated, we insert padding here to keep the size
5933 // of the Abort macro constant.
5934 // Currently in debug mode with debug_code enabled the number of
5935 // generated instructions is 10, so we use this as a maximum value.
5936 static const int kExpectedAbortInstructions = 10;
5937 int abort_instructions = InstructionsGeneratedSince(&abort_start);
5938 DCHECK(abort_instructions <= kExpectedAbortInstructions);
5939 while (abort_instructions++ < kExpectedAbortInstructions) {
5940 nop();
5941 }
5942 }
5943 }
5944
5945
LoadContext(Register dst,int context_chain_length)5946 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
5947 if (context_chain_length > 0) {
5948 // Move up the chain of contexts to the context containing the slot.
5949 ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5950 for (int i = 1; i < context_chain_length; i++) {
5951 ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5952 }
5953 } else {
5954 // Slot is in the current function context. Move it into the
5955 // destination register in case we store into it (the write barrier
5956 // cannot be allowed to destroy the context in esi).
5957 Move(dst, cp);
5958 }
5959 }
5960
5961
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)5962 void MacroAssembler::LoadTransitionedArrayMapConditional(
5963 ElementsKind expected_kind,
5964 ElementsKind transitioned_kind,
5965 Register map_in_out,
5966 Register scratch,
5967 Label* no_map_match) {
5968 DCHECK(IsFastElementsKind(expected_kind));
5969 DCHECK(IsFastElementsKind(transitioned_kind));
5970
5971 // Check that the function's map is the same as the expected cached map.
5972 ld(scratch, NativeContextMemOperand());
5973 ld(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
5974 Branch(no_map_match, ne, map_in_out, Operand(at));
5975
5976 // Use the transitioned cached map.
5977 ld(map_in_out,
5978 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
5979 }
5980
5981
LoadNativeContextSlot(int index,Register dst)5982 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
5983 ld(dst, NativeContextMemOperand());
5984 ld(dst, ContextMemOperand(dst, index));
5985 }
5986
5987
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)5988 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5989 Register map,
5990 Register scratch) {
5991 // Load the initial map. The global functions all have initial maps.
5992 ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5993 if (emit_debug_code()) {
5994 Label ok, fail;
5995 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
5996 Branch(&ok);
5997 bind(&fail);
5998 Abort(kGlobalFunctionsMustHaveInitialMap);
5999 bind(&ok);
6000 }
6001 }
6002
StubPrologue(StackFrame::Type type)6003 void MacroAssembler::StubPrologue(StackFrame::Type type) {
6004 li(at, Operand(Smi::FromInt(type)));
6005 PushCommonFrame(at);
6006 }
6007
6008
Prologue(bool code_pre_aging)6009 void MacroAssembler::Prologue(bool code_pre_aging) {
6010 PredictableCodeSizeScope predictible_code_size_scope(
6011 this, kNoCodeAgeSequenceLength);
6012 // The following three instructions must remain together and unmodified
6013 // for code aging to work properly.
6014 if (code_pre_aging) {
6015 // Pre-age the code.
6016 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
6017 nop(Assembler::CODE_AGE_MARKER_NOP);
6018 // Load the stub address to t9 and call it,
6019 // GetCodeAgeAndParity() extracts the stub address from this instruction.
6020 li(t9,
6021 Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
6022 ADDRESS_LOAD);
6023 nop(); // Prevent jalr to jal optimization.
6024 jalr(t9, a0);
6025 nop(); // Branch delay slot nop.
6026 nop(); // Pad the empty space.
6027 } else {
6028 PushStandardFrame(a1);
6029 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
6030 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
6031 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
6032 }
6033 }
6034
EmitLoadTypeFeedbackVector(Register vector)6035 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
6036 ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
6037 ld(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
6038 ld(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
6039 }
6040
6041
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)6042 void MacroAssembler::EnterFrame(StackFrame::Type type,
6043 bool load_constant_pool_pointer_reg) {
6044 // Out-of-line constant pool not implemented on mips64.
6045 UNREACHABLE();
6046 }
6047
6048
EnterFrame(StackFrame::Type type)6049 void MacroAssembler::EnterFrame(StackFrame::Type type) {
6050 int stack_offset, fp_offset;
6051 if (type == StackFrame::INTERNAL) {
6052 stack_offset = -4 * kPointerSize;
6053 fp_offset = 2 * kPointerSize;
6054 } else {
6055 stack_offset = -3 * kPointerSize;
6056 fp_offset = 1 * kPointerSize;
6057 }
6058 daddiu(sp, sp, stack_offset);
6059 stack_offset = -stack_offset - kPointerSize;
6060 sd(ra, MemOperand(sp, stack_offset));
6061 stack_offset -= kPointerSize;
6062 sd(fp, MemOperand(sp, stack_offset));
6063 stack_offset -= kPointerSize;
6064 li(t9, Operand(Smi::FromInt(type)));
6065 sd(t9, MemOperand(sp, stack_offset));
6066 if (type == StackFrame::INTERNAL) {
6067 DCHECK_EQ(stack_offset, kPointerSize);
6068 li(t9, Operand(CodeObject()));
6069 sd(t9, MemOperand(sp, 0));
6070 } else {
6071 DCHECK_EQ(stack_offset, 0);
6072 }
6073 // Adjust FP to point to saved FP.
6074 Daddu(fp, sp, Operand(fp_offset));
6075 }
6076
6077
LeaveFrame(StackFrame::Type type)6078 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
6079 daddiu(sp, fp, 2 * kPointerSize);
6080 ld(ra, MemOperand(fp, 1 * kPointerSize));
6081 ld(fp, MemOperand(fp, 0 * kPointerSize));
6082 }
6083
EnterBuiltinFrame(Register context,Register target,Register argc)6084 void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
6085 Register argc) {
6086 Push(ra, fp);
6087 Move(fp, sp);
6088 Push(context, target, argc);
6089 }
6090
LeaveBuiltinFrame(Register context,Register target,Register argc)6091 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
6092 Register argc) {
6093 Pop(context, target, argc);
6094 Pop(ra, fp);
6095 }
6096
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)6097 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
6098 StackFrame::Type frame_type) {
6099 DCHECK(frame_type == StackFrame::EXIT ||
6100 frame_type == StackFrame::BUILTIN_EXIT);
6101
6102 // Set up the frame structure on the stack.
6103 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
6104 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
6105 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
6106
6107 // This is how the stack will look:
6108 // fp + 2 (==kCallerSPDisplacement) - old stack's end
6109 // [fp + 1 (==kCallerPCOffset)] - saved old ra
6110 // [fp + 0 (==kCallerFPOffset)] - saved old fp
6111 // [fp - 1 StackFrame::EXIT Smi
6112 // [fp - 2 (==kSPOffset)] - sp of the called function
6113 // [fp - 3 (==kCodeOffset)] - CodeObject
6114 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
6115 // new stack (will contain saved ra)
6116
6117 // Save registers and reserve room for saved entry sp and code object.
6118 daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
6119 sd(ra, MemOperand(sp, 4 * kPointerSize));
6120 sd(fp, MemOperand(sp, 3 * kPointerSize));
6121 li(at, Operand(Smi::FromInt(frame_type)));
6122 sd(at, MemOperand(sp, 2 * kPointerSize));
6123 // Set up new frame pointer.
6124 daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
6125
6126 if (emit_debug_code()) {
6127 sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
6128 }
6129
6130 // Accessed from ExitFrame::code_slot.
6131 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
6132 sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
6133
6134 // Save the frame pointer and the context in top.
6135 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
6136 sd(fp, MemOperand(t8));
6137 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
6138 sd(cp, MemOperand(t8));
6139
6140 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
6141 if (save_doubles) {
6142 // The stack is already aligned to 0 modulo 8 for stores with sdc1.
6143 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
6144 int space = kNumOfSavedRegisters * kDoubleSize;
6145 Dsubu(sp, sp, Operand(space));
6146 // Remember: we only need to save every 2nd double FPU value.
6147 for (int i = 0; i < kNumOfSavedRegisters; i++) {
6148 FPURegister reg = FPURegister::from_code(2 * i);
6149 sdc1(reg, MemOperand(sp, i * kDoubleSize));
6150 }
6151 }
6152
6153 // Reserve place for the return address, stack space and an optional slot
6154 // (used by the DirectCEntryStub to hold the return value if a struct is
6155 // returned) and align the frame preparing for calling the runtime function.
6156 DCHECK(stack_space >= 0);
6157 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
6158 if (frame_alignment > 0) {
6159 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6160 And(sp, sp, Operand(-frame_alignment)); // Align stack.
6161 }
6162
6163 // Set the exit frame sp value to point just before the return address
6164 // location.
6165 daddiu(at, sp, kPointerSize);
6166 sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
6167 }
6168
6169
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool do_return,bool argument_count_is_length)6170 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
6171 bool restore_context, bool do_return,
6172 bool argument_count_is_length) {
6173 // Optionally restore all double registers.
6174 if (save_doubles) {
6175 // Remember: we only need to restore every 2nd double FPU value.
6176 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
6177 Dsubu(t8, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
6178 kNumOfSavedRegisters * kDoubleSize));
6179 for (int i = 0; i < kNumOfSavedRegisters; i++) {
6180 FPURegister reg = FPURegister::from_code(2 * i);
6181 ldc1(reg, MemOperand(t8, i * kDoubleSize));
6182 }
6183 }
6184
6185 // Clear top frame.
6186 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
6187 sd(zero_reg, MemOperand(t8));
6188
6189 // Restore current context from top and clear it in debug mode.
6190 if (restore_context) {
6191 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
6192 ld(cp, MemOperand(t8));
6193 }
6194 #ifdef DEBUG
6195 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
6196 sd(a3, MemOperand(t8));
6197 #endif
6198
6199 // Pop the arguments, restore registers, and return.
6200 mov(sp, fp); // Respect ABI stack constraint.
6201 ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
6202 ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
6203
6204 if (argument_count.is_valid()) {
6205 if (argument_count_is_length) {
6206 daddu(sp, sp, argument_count);
6207 } else {
6208 Dlsa(sp, sp, argument_count, kPointerSizeLog2, t8);
6209 }
6210 }
6211
6212 if (do_return) {
6213 Ret(USE_DELAY_SLOT);
6214 // If returning, the instruction in the delay slot will be the addiu below.
6215 }
6216 daddiu(sp, sp, 2 * kPointerSize);
6217 }
6218
6219
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)6220 void MacroAssembler::InitializeNewString(Register string,
6221 Register length,
6222 Heap::RootListIndex map_index,
6223 Register scratch1,
6224 Register scratch2) {
6225 // dsll(scratch1, length, kSmiTagSize);
6226 dsll32(scratch1, length, 0);
6227 LoadRoot(scratch2, map_index);
6228 sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
6229 li(scratch1, Operand(String::kEmptyHashField));
6230 sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
6231 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
6232 }
6233
6234
ActivationFrameAlignment()6235 int MacroAssembler::ActivationFrameAlignment() {
6236 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
6237 // Running on the real platform. Use the alignment as mandated by the local
6238 // environment.
6239 // Note: This will break if we ever start generating snapshots on one Mips
6240 // platform for another Mips platform with a different alignment.
6241 return base::OS::ActivationFrameAlignment();
6242 #else // V8_HOST_ARCH_MIPS
6243 // If we are using the simulator then we should always align to the expected
6244 // alignment. As the simulator is used to generate snapshots we do not know
6245 // if the target platform will need alignment, so this is controlled from a
6246 // flag.
6247 return FLAG_sim_stack_alignment;
6248 #endif // V8_HOST_ARCH_MIPS
6249 }
6250
6251
AssertStackIsAligned()6252 void MacroAssembler::AssertStackIsAligned() {
6253 if (emit_debug_code()) {
6254 const int frame_alignment = ActivationFrameAlignment();
6255 const int frame_alignment_mask = frame_alignment - 1;
6256
6257 if (frame_alignment > kPointerSize) {
6258 Label alignment_as_expected;
6259 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6260 andi(at, sp, frame_alignment_mask);
6261 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
6262 // Don't use Check here, as it will call Runtime_Abort re-entering here.
6263 stop("Unexpected stack alignment");
6264 bind(&alignment_as_expected);
6265 }
6266 }
6267 }
6268
6269
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)6270 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
6271 Register reg,
6272 Register scratch,
6273 Label* not_power_of_two_or_zero) {
6274 Dsubu(scratch, reg, Operand(1));
6275 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
6276 scratch, Operand(zero_reg));
6277 and_(at, scratch, reg); // In the delay slot.
6278 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
6279 }
6280
6281
SmiTagCheckOverflow(Register reg,Register overflow)6282 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
6283 DCHECK(!reg.is(overflow));
6284 mov(overflow, reg); // Save original value.
6285 SmiTag(reg);
6286 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
6287 }
6288
6289
SmiTagCheckOverflow(Register dst,Register src,Register overflow)6290 void MacroAssembler::SmiTagCheckOverflow(Register dst,
6291 Register src,
6292 Register overflow) {
6293 if (dst.is(src)) {
6294 // Fall back to slower case.
6295 SmiTagCheckOverflow(dst, overflow);
6296 } else {
6297 DCHECK(!dst.is(src));
6298 DCHECK(!dst.is(overflow));
6299 DCHECK(!src.is(overflow));
6300 SmiTag(dst, src);
6301 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
6302 }
6303 }
6304
6305
SmiLoadUntag(Register dst,MemOperand src)6306 void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
6307 if (SmiValuesAre32Bits()) {
6308 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
6309 } else {
6310 lw(dst, src);
6311 SmiUntag(dst);
6312 }
6313 }
6314
6315
SmiLoadScale(Register dst,MemOperand src,int scale)6316 void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
6317 if (SmiValuesAre32Bits()) {
6318 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
6319 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
6320 dsll(dst, dst, scale);
6321 } else {
6322 lw(dst, src);
6323 DCHECK(scale >= kSmiTagSize);
6324 sll(dst, dst, scale - kSmiTagSize);
6325 }
6326 }
6327
6328
6329 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
SmiLoadWithScale(Register d_smi,Register d_scaled,MemOperand src,int scale)6330 void MacroAssembler::SmiLoadWithScale(Register d_smi,
6331 Register d_scaled,
6332 MemOperand src,
6333 int scale) {
6334 if (SmiValuesAre32Bits()) {
6335 ld(d_smi, src);
6336 dsra(d_scaled, d_smi, kSmiShift - scale);
6337 } else {
6338 lw(d_smi, src);
6339 DCHECK(scale >= kSmiTagSize);
6340 sll(d_scaled, d_smi, scale - kSmiTagSize);
6341 }
6342 }
6343
6344
6345 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
SmiLoadUntagWithScale(Register d_int,Register d_scaled,MemOperand src,int scale)6346 void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
6347 Register d_scaled,
6348 MemOperand src,
6349 int scale) {
6350 if (SmiValuesAre32Bits()) {
6351 lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
6352 dsll(d_scaled, d_int, scale);
6353 } else {
6354 lw(d_int, src);
6355 // Need both the int and the scaled in, so use two instructions.
6356 SmiUntag(d_int);
6357 sll(d_scaled, d_int, scale);
6358 }
6359 }
6360
6361
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)6362 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
6363 Register src,
6364 Label* smi_case) {
6365 // DCHECK(!dst.is(src));
6366 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
6367 SmiUntag(dst, src);
6368 }
6369
6370
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)6371 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
6372 Register src,
6373 Label* non_smi_case) {
6374 // DCHECK(!dst.is(src));
6375 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
6376 SmiUntag(dst, src);
6377 }
6378
JumpIfSmi(Register value,Label * smi_label,Register scratch,BranchDelaySlot bd)6379 void MacroAssembler::JumpIfSmi(Register value,
6380 Label* smi_label,
6381 Register scratch,
6382 BranchDelaySlot bd) {
6383 DCHECK_EQ(0, kSmiTag);
6384 andi(scratch, value, kSmiTagMask);
6385 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
6386 }
6387
JumpIfNotSmi(Register value,Label * not_smi_label,Register scratch,BranchDelaySlot bd)6388 void MacroAssembler::JumpIfNotSmi(Register value,
6389 Label* not_smi_label,
6390 Register scratch,
6391 BranchDelaySlot bd) {
6392 DCHECK_EQ(0, kSmiTag);
6393 andi(scratch, value, kSmiTagMask);
6394 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
6395 }
6396
6397
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)6398 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
6399 Register reg2,
6400 Label* on_not_both_smi) {
6401 STATIC_ASSERT(kSmiTag == 0);
6402 // TODO(plind): Find some better to fix this assert issue.
6403 #if defined(__APPLE__)
6404 DCHECK_EQ(1, kSmiTagMask);
6405 #else
6406 DCHECK_EQ((int64_t)1, kSmiTagMask);
6407 #endif
6408 or_(at, reg1, reg2);
6409 JumpIfNotSmi(at, on_not_both_smi);
6410 }
6411
6412
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)6413 void MacroAssembler::JumpIfEitherSmi(Register reg1,
6414 Register reg2,
6415 Label* on_either_smi) {
6416 STATIC_ASSERT(kSmiTag == 0);
6417 // TODO(plind): Find some better to fix this assert issue.
6418 #if defined(__APPLE__)
6419 DCHECK_EQ(1, kSmiTagMask);
6420 #else
6421 DCHECK_EQ((int64_t)1, kSmiTagMask);
6422 #endif
6423 // Both Smi tags must be 1 (not Smi).
6424 and_(at, reg1, reg2);
6425 JumpIfSmi(at, on_either_smi);
6426 }
6427
AssertNotNumber(Register object)6428 void MacroAssembler::AssertNotNumber(Register object) {
6429 if (emit_debug_code()) {
6430 STATIC_ASSERT(kSmiTag == 0);
6431 andi(at, object, kSmiTagMask);
6432 Check(ne, kOperandIsANumber, at, Operand(zero_reg));
6433 GetObjectType(object, t8, t8);
6434 Check(ne, kOperandIsNotANumber, t8, Operand(HEAP_NUMBER_TYPE));
6435 }
6436 }
6437
AssertNotSmi(Register object)6438 void MacroAssembler::AssertNotSmi(Register object) {
6439 if (emit_debug_code()) {
6440 STATIC_ASSERT(kSmiTag == 0);
6441 andi(at, object, kSmiTagMask);
6442 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
6443 }
6444 }
6445
6446
AssertSmi(Register object)6447 void MacroAssembler::AssertSmi(Register object) {
6448 if (emit_debug_code()) {
6449 STATIC_ASSERT(kSmiTag == 0);
6450 andi(at, object, kSmiTagMask);
6451 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
6452 }
6453 }
6454
6455
AssertString(Register object)6456 void MacroAssembler::AssertString(Register object) {
6457 if (emit_debug_code()) {
6458 STATIC_ASSERT(kSmiTag == 0);
6459 SmiTst(object, t8);
6460 Check(ne, kOperandIsASmiAndNotAString, t8, Operand(zero_reg));
6461 GetObjectType(object, t8, t8);
6462 Check(lo, kOperandIsNotAString, t8, Operand(FIRST_NONSTRING_TYPE));
6463 }
6464 }
6465
6466
AssertName(Register object)6467 void MacroAssembler::AssertName(Register object) {
6468 if (emit_debug_code()) {
6469 STATIC_ASSERT(kSmiTag == 0);
6470 SmiTst(object, t8);
6471 Check(ne, kOperandIsASmiAndNotAName, t8, Operand(zero_reg));
6472 GetObjectType(object, t8, t8);
6473 Check(le, kOperandIsNotAName, t8, Operand(LAST_NAME_TYPE));
6474 }
6475 }
6476
6477
AssertFunction(Register object)6478 void MacroAssembler::AssertFunction(Register object) {
6479 if (emit_debug_code()) {
6480 STATIC_ASSERT(kSmiTag == 0);
6481 SmiTst(object, t8);
6482 Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
6483 GetObjectType(object, t8, t8);
6484 Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
6485 }
6486 }
6487
6488
AssertBoundFunction(Register object)6489 void MacroAssembler::AssertBoundFunction(Register object) {
6490 if (emit_debug_code()) {
6491 STATIC_ASSERT(kSmiTag == 0);
6492 SmiTst(object, t8);
6493 Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
6494 GetObjectType(object, t8, t8);
6495 Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
6496 }
6497 }
6498
AssertGeneratorObject(Register object)6499 void MacroAssembler::AssertGeneratorObject(Register object) {
6500 if (emit_debug_code()) {
6501 STATIC_ASSERT(kSmiTag == 0);
6502 SmiTst(object, t8);
6503 Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
6504 GetObjectType(object, t8, t8);
6505 Check(eq, kOperandIsNotAGeneratorObject, t8,
6506 Operand(JS_GENERATOR_OBJECT_TYPE));
6507 }
6508 }
6509
AssertReceiver(Register object)6510 void MacroAssembler::AssertReceiver(Register object) {
6511 if (emit_debug_code()) {
6512 STATIC_ASSERT(kSmiTag == 0);
6513 SmiTst(object, t8);
6514 Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
6515 GetObjectType(object, t8, t8);
6516 Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
6517 }
6518 }
6519
6520
AssertUndefinedOrAllocationSite(Register object,Register scratch)6521 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
6522 Register scratch) {
6523 if (emit_debug_code()) {
6524 Label done_checking;
6525 AssertNotSmi(object);
6526 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
6527 Branch(&done_checking, eq, object, Operand(scratch));
6528 ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
6529 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
6530 Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
6531 bind(&done_checking);
6532 }
6533 }
6534
6535
AssertIsRoot(Register reg,Heap::RootListIndex index)6536 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
6537 if (emit_debug_code()) {
6538 DCHECK(!reg.is(at));
6539 LoadRoot(at, index);
6540 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
6541 }
6542 }
6543
6544
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)6545 void MacroAssembler::JumpIfNotHeapNumber(Register object,
6546 Register heap_number_map,
6547 Register scratch,
6548 Label* on_not_heap_number) {
6549 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
6550 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
6551 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
6552 }
6553
6554
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)6555 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
6556 Register first, Register second, Register scratch1, Register scratch2,
6557 Label* failure) {
6558 // Test that both first and second are sequential one-byte strings.
6559 // Assume that they are non-smis.
6560 ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
6561 ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
6562 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
6563 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
6564
6565 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
6566 scratch2, failure);
6567 }
6568
6569
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)6570 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
6571 Register second,
6572 Register scratch1,
6573 Register scratch2,
6574 Label* failure) {
6575 // Check that neither is a smi.
6576 STATIC_ASSERT(kSmiTag == 0);
6577 And(scratch1, first, Operand(second));
6578 JumpIfSmi(scratch1, failure);
6579 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
6580 scratch2, failure);
6581 }
6582
6583
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)6584 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
6585 Register first, Register second, Register scratch1, Register scratch2,
6586 Label* failure) {
6587 const int kFlatOneByteStringMask =
6588 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6589 const int kFlatOneByteStringTag =
6590 kStringTag | kOneByteStringTag | kSeqStringTag;
6591 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
6592 andi(scratch1, first, kFlatOneByteStringMask);
6593 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
6594 andi(scratch2, second, kFlatOneByteStringMask);
6595 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
6596 }
6597
6598
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)6599 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
6600 Register scratch,
6601 Label* failure) {
6602 const int kFlatOneByteStringMask =
6603 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6604 const int kFlatOneByteStringTag =
6605 kStringTag | kOneByteStringTag | kSeqStringTag;
6606 And(scratch, type, Operand(kFlatOneByteStringMask));
6607 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
6608 }
6609
6610 static const int kRegisterPassedArguments = 8;
6611
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)6612 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
6613 int num_double_arguments) {
6614 int stack_passed_words = 0;
6615 num_reg_arguments += 2 * num_double_arguments;
6616
6617 // O32: Up to four simple arguments are passed in registers a0..a3.
6618 // N64: Up to eight simple arguments are passed in registers a0..a7.
6619 if (num_reg_arguments > kRegisterPassedArguments) {
6620 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
6621 }
6622 stack_passed_words += kCArgSlotCount;
6623 return stack_passed_words;
6624 }
6625
6626
EmitSeqStringSetCharCheck(Register string,Register index,Register value,Register scratch,uint32_t encoding_mask)6627 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
6628 Register index,
6629 Register value,
6630 Register scratch,
6631 uint32_t encoding_mask) {
6632 Label is_object;
6633 SmiTst(string, at);
6634 Check(ne, kNonObject, at, Operand(zero_reg));
6635
6636 ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
6637 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
6638
6639 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
6640 li(scratch, Operand(encoding_mask));
6641 Check(eq, kUnexpectedStringType, at, Operand(scratch));
6642
6643 // TODO(plind): requires Smi size check code for mips32.
6644
6645 ld(at, FieldMemOperand(string, String::kLengthOffset));
6646 Check(lt, kIndexIsTooLarge, index, Operand(at));
6647
6648 DCHECK(Smi::kZero == 0);
6649 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
6650 }
6651
6652
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)6653 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6654 int num_double_arguments,
6655 Register scratch) {
6656 int frame_alignment = ActivationFrameAlignment();
6657
6658 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
6659 // O32: Up to four simple arguments are passed in registers a0..a3.
6660 // Those four arguments must have reserved argument slots on the stack for
6661 // mips, even though those argument slots are not normally used.
6662 // Both ABIs: Remaining arguments are pushed on the stack, above (higher
6663 // address than) the (O32) argument slots. (arg slot calculation handled by
6664 // CalculateStackPassedWords()).
6665 int stack_passed_arguments = CalculateStackPassedWords(
6666 num_reg_arguments, num_double_arguments);
6667 if (frame_alignment > kPointerSize) {
6668 // Make stack end at alignment and make room for num_arguments - 4 words
6669 // and the original value of sp.
6670 mov(scratch, sp);
6671 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
6672 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6673 And(sp, sp, Operand(-frame_alignment));
6674 sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
6675 } else {
6676 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6677 }
6678 }
6679
6680
PrepareCallCFunction(int num_reg_arguments,Register scratch)6681 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6682 Register scratch) {
6683 PrepareCallCFunction(num_reg_arguments, 0, scratch);
6684 }
6685
6686
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)6687 void MacroAssembler::CallCFunction(ExternalReference function,
6688 int num_reg_arguments,
6689 int num_double_arguments) {
6690 li(t8, Operand(function));
6691 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
6692 }
6693
6694
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)6695 void MacroAssembler::CallCFunction(Register function,
6696 int num_reg_arguments,
6697 int num_double_arguments) {
6698 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
6699 }
6700
6701
CallCFunction(ExternalReference function,int num_arguments)6702 void MacroAssembler::CallCFunction(ExternalReference function,
6703 int num_arguments) {
6704 CallCFunction(function, num_arguments, 0);
6705 }
6706
6707
CallCFunction(Register function,int num_arguments)6708 void MacroAssembler::CallCFunction(Register function,
6709 int num_arguments) {
6710 CallCFunction(function, num_arguments, 0);
6711 }
6712
6713
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)6714 void MacroAssembler::CallCFunctionHelper(Register function,
6715 int num_reg_arguments,
6716 int num_double_arguments) {
6717 DCHECK(has_frame());
6718 // Make sure that the stack is aligned before calling a C function unless
6719 // running in the simulator. The simulator has its own alignment check which
6720 // provides more information.
6721 // The argument stots are presumed to have been set up by
6722 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
6723
6724 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
6725 if (emit_debug_code()) {
6726 int frame_alignment = base::OS::ActivationFrameAlignment();
6727 int frame_alignment_mask = frame_alignment - 1;
6728 if (frame_alignment > kPointerSize) {
6729 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6730 Label alignment_as_expected;
6731 And(at, sp, Operand(frame_alignment_mask));
6732 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
6733 // Don't use Check here, as it will call Runtime_Abort possibly
6734 // re-entering here.
6735 stop("Unexpected alignment in CallCFunction");
6736 bind(&alignment_as_expected);
6737 }
6738 }
6739 #endif // V8_HOST_ARCH_MIPS
6740
6741 // Just call directly. The function called cannot cause a GC, or
6742 // allow preemption, so the return address in the link register
6743 // stays correct.
6744
6745 if (!function.is(t9)) {
6746 mov(t9, function);
6747 function = t9;
6748 }
6749
6750 Call(function);
6751
6752 int stack_passed_arguments = CalculateStackPassedWords(
6753 num_reg_arguments, num_double_arguments);
6754
6755 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
6756 ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
6757 } else {
6758 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6759 }
6760 }
6761
6762
6763 #undef BRANCH_ARGS_CHECK
6764
6765
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)6766 void MacroAssembler::CheckPageFlag(
6767 Register object,
6768 Register scratch,
6769 int mask,
6770 Condition cc,
6771 Label* condition_met) {
6772 And(scratch, object, Operand(~Page::kPageAlignmentMask));
6773 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
6774 And(scratch, scratch, Operand(mask));
6775 Branch(condition_met, cc, scratch, Operand(zero_reg));
6776 }
6777
6778
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)6779 void MacroAssembler::JumpIfBlack(Register object,
6780 Register scratch0,
6781 Register scratch1,
6782 Label* on_black) {
6783 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
6784 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6785 }
6786
6787
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)6788 void MacroAssembler::HasColor(Register object,
6789 Register bitmap_scratch,
6790 Register mask_scratch,
6791 Label* has_color,
6792 int first_bit,
6793 int second_bit) {
6794 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
6795 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
6796
6797 GetMarkBits(object, bitmap_scratch, mask_scratch);
6798
6799 Label other_color;
6800 // Note that we are using two 4-byte aligned loads.
6801 LoadWordPair(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6802 And(t8, t9, Operand(mask_scratch));
6803 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
6804 // Shift left 1 by adding.
6805 Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
6806 And(t8, t9, Operand(mask_scratch));
6807 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
6808
6809 bind(&other_color);
6810 }
6811
6812
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)6813 void MacroAssembler::GetMarkBits(Register addr_reg,
6814 Register bitmap_reg,
6815 Register mask_reg) {
6816 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
6817 // addr_reg is divided into fields:
6818 // |63 page base 20|19 high 8|7 shift 3|2 0|
6819 // 'high' gives the index of the cell holding color bits for the object.
6820 // 'shift' gives the offset in the cell for this object's color.
6821 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
6822 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
6823 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
6824 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
6825 Dlsa(bitmap_reg, bitmap_reg, t8, Bitmap::kBytesPerCellLog2);
6826 li(t8, Operand(1));
6827 dsllv(mask_reg, t8, mask_reg);
6828 }
6829
6830
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)6831 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
6832 Register mask_scratch, Register load_scratch,
6833 Label* value_is_white) {
6834 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
6835 GetMarkBits(value, bitmap_scratch, mask_scratch);
6836
6837 // If the value is black or grey we don't need to do anything.
6838 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
6839 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6840 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
6841 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
6842
6843 // Since both black and grey have a 1 in the first position and white does
6844 // not have a 1 there we only need to check one bit.
6845 // Note that we are using a 4-byte aligned 8-byte load.
6846 if (emit_debug_code()) {
6847 LoadWordPair(load_scratch,
6848 MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6849 } else {
6850 lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6851 }
6852 And(t8, mask_scratch, load_scratch);
6853 Branch(value_is_white, eq, t8, Operand(zero_reg));
6854 }
6855
6856
LoadInstanceDescriptors(Register map,Register descriptors)6857 void MacroAssembler::LoadInstanceDescriptors(Register map,
6858 Register descriptors) {
6859 ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
6860 }
6861
6862
NumberOfOwnDescriptors(Register dst,Register map)6863 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
6864 lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
6865 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
6866 }
6867
6868
EnumLength(Register dst,Register map)6869 void MacroAssembler::EnumLength(Register dst, Register map) {
6870 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
6871 lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
6872 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
6873 SmiTag(dst);
6874 }
6875
6876
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)6877 void MacroAssembler::LoadAccessor(Register dst, Register holder,
6878 int accessor_index,
6879 AccessorComponent accessor) {
6880 ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
6881 LoadInstanceDescriptors(dst, dst);
6882 ld(dst,
6883 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
6884 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
6885 : AccessorPair::kSetterOffset;
6886 ld(dst, FieldMemOperand(dst, offset));
6887 }
6888
6889
CheckEnumCache(Label * call_runtime)6890 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
6891 Register null_value = a5;
6892 Register empty_fixed_array_value = a6;
6893 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
6894 Label next, start;
6895 mov(a2, a0);
6896
6897 // Check if the enum length field is properly initialized, indicating that
6898 // there is an enum cache.
6899 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6900
6901 EnumLength(a3, a1);
6902 Branch(
6903 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
6904
6905 LoadRoot(null_value, Heap::kNullValueRootIndex);
6906 jmp(&start);
6907
6908 bind(&next);
6909 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6910
6911 // For all objects but the receiver, check that the cache is empty.
6912 EnumLength(a3, a1);
6913 Branch(call_runtime, ne, a3, Operand(Smi::kZero));
6914
6915 bind(&start);
6916
6917 // Check that there are no elements. Register a2 contains the current JS
6918 // object we've reached through the prototype chain.
6919 Label no_elements;
6920 ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
6921 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
6922
6923 // Second chance, the object may be using the empty slow element dictionary.
6924 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
6925 Branch(call_runtime, ne, a2, Operand(at));
6926
6927 bind(&no_elements);
6928 ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
6929 Branch(&next, ne, a2, Operand(null_value));
6930 }
6931
6932
ClampUint8(Register output_reg,Register input_reg)6933 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
6934 DCHECK(!output_reg.is(input_reg));
6935 Label done;
6936 li(output_reg, Operand(255));
6937 // Normal branch: nop in delay slot.
6938 Branch(&done, gt, input_reg, Operand(output_reg));
6939 // Use delay slot in this branch.
6940 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
6941 mov(output_reg, zero_reg); // In delay slot.
6942 mov(output_reg, input_reg); // Value is in range 0..255.
6943 bind(&done);
6944 }
6945
6946
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister temp_double_reg)6947 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
6948 DoubleRegister input_reg,
6949 DoubleRegister temp_double_reg) {
6950 Label above_zero;
6951 Label done;
6952 Label in_bounds;
6953
6954 Move(temp_double_reg, 0.0);
6955 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
6956
6957 // Double value is less than zero, NaN or Inf, return 0.
6958 mov(result_reg, zero_reg);
6959 Branch(&done);
6960
6961 // Double value is >= 255, return 255.
6962 bind(&above_zero);
6963 Move(temp_double_reg, 255.0);
6964 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
6965 li(result_reg, Operand(255));
6966 Branch(&done);
6967
6968 // In 0-255 range, round and truncate.
6969 bind(&in_bounds);
6970 cvt_w_d(temp_double_reg, input_reg);
6971 mfc1(result_reg, temp_double_reg);
6972 bind(&done);
6973 }
6974
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)6975 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
6976 Register scratch_reg,
6977 Label* no_memento_found) {
6978 Label map_check;
6979 Label top_check;
6980 ExternalReference new_space_allocation_top_adr =
6981 ExternalReference::new_space_allocation_top_address(isolate());
6982 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
6983 const int kMementoLastWordOffset =
6984 kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
6985
6986 // Bail out if the object is not in new space.
6987 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
6988 // If the object is in new space, we need to check whether it is on the same
6989 // page as the current top.
6990 Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
6991 li(at, Operand(new_space_allocation_top_adr));
6992 ld(at, MemOperand(at));
6993 Xor(scratch_reg, scratch_reg, Operand(at));
6994 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
6995 Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
6996 // The object is on a different page than allocation top. Bail out if the
6997 // object sits on the page boundary as no memento can follow and we cannot
6998 // touch the memory following it.
6999 Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
7000 Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
7001 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
7002 Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
7003 // Continue with the actual map check.
7004 jmp(&map_check);
7005 // If top is on the same page as the current object, we need to check whether
7006 // we are below top.
7007 bind(&top_check);
7008 Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
7009 li(at, Operand(new_space_allocation_top_adr));
7010 ld(at, MemOperand(at));
7011 Branch(no_memento_found, ge, scratch_reg, Operand(at));
7012 // Memento map check.
7013 bind(&map_check);
7014 ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
7015 Branch(no_memento_found, ne, scratch_reg,
7016 Operand(isolate()->factory()->allocation_memento_map()));
7017 }
7018
7019
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)7020 Register GetRegisterThatIsNotOneOf(Register reg1,
7021 Register reg2,
7022 Register reg3,
7023 Register reg4,
7024 Register reg5,
7025 Register reg6) {
7026 RegList regs = 0;
7027 if (reg1.is_valid()) regs |= reg1.bit();
7028 if (reg2.is_valid()) regs |= reg2.bit();
7029 if (reg3.is_valid()) regs |= reg3.bit();
7030 if (reg4.is_valid()) regs |= reg4.bit();
7031 if (reg5.is_valid()) regs |= reg5.bit();
7032 if (reg6.is_valid()) regs |= reg6.bit();
7033
7034 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
7035 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
7036 int code = config->GetAllocatableGeneralCode(i);
7037 Register candidate = Register::from_code(code);
7038 if (regs & candidate.bit()) continue;
7039 return candidate;
7040 }
7041 UNREACHABLE();
7042 return no_reg;
7043 }
7044
7045
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)7046 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
7047 Register object,
7048 Register scratch0,
7049 Register scratch1,
7050 Label* found) {
7051 DCHECK(!scratch1.is(scratch0));
7052 Factory* factory = isolate()->factory();
7053 Register current = scratch0;
7054 Label loop_again, end;
7055
7056 // Scratch contained elements pointer.
7057 Move(current, object);
7058 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
7059 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
7060 Branch(&end, eq, current, Operand(factory->null_value()));
7061
7062 // Loop based on the map going up the prototype chain.
7063 bind(&loop_again);
7064 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
7065 lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
7066 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
7067 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
7068 Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
7069 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
7070 DecodeField<Map::ElementsKindBits>(scratch1);
7071 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
7072 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
7073 Branch(&loop_again, ne, current, Operand(factory->null_value()));
7074
7075 bind(&end);
7076 }
7077
7078
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8,Register reg9,Register reg10)7079 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
7080 Register reg5, Register reg6, Register reg7, Register reg8,
7081 Register reg9, Register reg10) {
7082 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
7083 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
7084 reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
7085 reg10.is_valid();
7086
7087 RegList regs = 0;
7088 if (reg1.is_valid()) regs |= reg1.bit();
7089 if (reg2.is_valid()) regs |= reg2.bit();
7090 if (reg3.is_valid()) regs |= reg3.bit();
7091 if (reg4.is_valid()) regs |= reg4.bit();
7092 if (reg5.is_valid()) regs |= reg5.bit();
7093 if (reg6.is_valid()) regs |= reg6.bit();
7094 if (reg7.is_valid()) regs |= reg7.bit();
7095 if (reg8.is_valid()) regs |= reg8.bit();
7096 if (reg9.is_valid()) regs |= reg9.bit();
7097 if (reg10.is_valid()) regs |= reg10.bit();
7098 int n_of_non_aliasing_regs = NumRegs(regs);
7099
7100 return n_of_valid_regs != n_of_non_aliasing_regs;
7101 }
7102
7103
CodePatcher(Isolate * isolate,byte * address,int instructions,FlushICache flush_cache)7104 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
7105 FlushICache flush_cache)
7106 : address_(address),
7107 size_(instructions * Assembler::kInstrSize),
7108 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
7109 flush_cache_(flush_cache) {
7110 // Create a new macro assembler pointing to the address of the code to patch.
7111 // The size is adjusted with kGap on order for the assembler to generate size
7112 // bytes of instructions without failing with buffer size constraints.
7113 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
7114 }
7115
7116
~CodePatcher()7117 CodePatcher::~CodePatcher() {
7118 // Indicate that code has changed.
7119 if (flush_cache_ == FLUSH) {
7120 Assembler::FlushICache(masm_.isolate(), address_, size_);
7121 }
7122 // Check that the code was patched as expected.
7123 DCHECK(masm_.pc_ == address_ + size_);
7124 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
7125 }
7126
7127
Emit(Instr instr)7128 void CodePatcher::Emit(Instr instr) {
7129 masm()->emit(instr);
7130 }
7131
7132
Emit(Address addr)7133 void CodePatcher::Emit(Address addr) {
7134 // masm()->emit(reinterpret_cast<Instr>(addr));
7135 }
7136
7137
ChangeBranchCondition(Instr current_instr,uint32_t new_opcode)7138 void CodePatcher::ChangeBranchCondition(Instr current_instr,
7139 uint32_t new_opcode) {
7140 current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
7141 masm_.emit(current_instr);
7142 }
7143
7144
TruncatingDiv(Register result,Register dividend,int32_t divisor)7145 void MacroAssembler::TruncatingDiv(Register result,
7146 Register dividend,
7147 int32_t divisor) {
7148 DCHECK(!dividend.is(result));
7149 DCHECK(!dividend.is(at));
7150 DCHECK(!result.is(at));
7151 base::MagicNumbersForDivision<uint32_t> mag =
7152 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
7153 li(at, Operand(static_cast<int32_t>(mag.multiplier)));
7154 Mulh(result, dividend, Operand(at));
7155 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
7156 if (divisor > 0 && neg) {
7157 Addu(result, result, Operand(dividend));
7158 }
7159 if (divisor < 0 && !neg && mag.multiplier > 0) {
7160 Subu(result, result, Operand(dividend));
7161 }
7162 if (mag.shift > 0) sra(result, result, mag.shift);
7163 srl(at, dividend, 31);
7164 Addu(result, result, Operand(at));
7165 }
7166
7167
7168 } // namespace internal
7169 } // namespace v8
7170
7171 #endif // V8_TARGET_ARCH_MIPS64
7172