1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6
7 #if V8_TARGET_ARCH_MIPS64
8
9 #include "src/base/division-by-constant.h"
10 #include "src/bootstrapper.h"
11 #include "src/codegen.h"
12 #include "src/debug/debug.h"
13 #include "src/mips64/macro-assembler-mips64.h"
14 #include "src/register-configuration.h"
15 #include "src/runtime/runtime.h"
16
17 namespace v8 {
18 namespace internal {
19
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)20 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
21 CodeObjectRequired create_code_object)
22 : Assembler(arg_isolate, buffer, size),
23 generating_stub_(false),
24 has_frame_(false),
25 has_double_zero_reg_set_(false) {
26 if (create_code_object == CodeObjectRequired::kYes) {
27 code_object_ =
28 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
29 }
30 }
31
32
Load(Register dst,const MemOperand & src,Representation r)33 void MacroAssembler::Load(Register dst,
34 const MemOperand& src,
35 Representation r) {
36 DCHECK(!r.IsDouble());
37 if (r.IsInteger8()) {
38 lb(dst, src);
39 } else if (r.IsUInteger8()) {
40 lbu(dst, src);
41 } else if (r.IsInteger16()) {
42 lh(dst, src);
43 } else if (r.IsUInteger16()) {
44 lhu(dst, src);
45 } else if (r.IsInteger32()) {
46 lw(dst, src);
47 } else {
48 ld(dst, src);
49 }
50 }
51
52
Store(Register src,const MemOperand & dst,Representation r)53 void MacroAssembler::Store(Register src,
54 const MemOperand& dst,
55 Representation r) {
56 DCHECK(!r.IsDouble());
57 if (r.IsInteger8() || r.IsUInteger8()) {
58 sb(src, dst);
59 } else if (r.IsInteger16() || r.IsUInteger16()) {
60 sh(src, dst);
61 } else if (r.IsInteger32()) {
62 sw(src, dst);
63 } else {
64 if (r.IsHeapObject()) {
65 AssertNotSmi(src);
66 } else if (r.IsSmi()) {
67 AssertSmi(src);
68 }
69 sd(src, dst);
70 }
71 }
72
73
LoadRoot(Register destination,Heap::RootListIndex index)74 void MacroAssembler::LoadRoot(Register destination,
75 Heap::RootListIndex index) {
76 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
77 }
78
79
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)80 void MacroAssembler::LoadRoot(Register destination,
81 Heap::RootListIndex index,
82 Condition cond,
83 Register src1, const Operand& src2) {
84 Branch(2, NegateCondition(cond), src1, src2);
85 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
86 }
87
88
StoreRoot(Register source,Heap::RootListIndex index)89 void MacroAssembler::StoreRoot(Register source,
90 Heap::RootListIndex index) {
91 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
92 sd(source, MemOperand(s6, index << kPointerSizeLog2));
93 }
94
95
StoreRoot(Register source,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)96 void MacroAssembler::StoreRoot(Register source,
97 Heap::RootListIndex index,
98 Condition cond,
99 Register src1, const Operand& src2) {
100 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
101 Branch(2, NegateCondition(cond), src1, src2);
102 sd(source, MemOperand(s6, index << kPointerSizeLog2));
103 }
104
105
106 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()107 void MacroAssembler::PushSafepointRegisters() {
108 // Safepoints expect a block of kNumSafepointRegisters values on the
109 // stack, so adjust the stack for unsaved registers.
110 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
111 DCHECK(num_unsaved >= 0);
112 if (num_unsaved > 0) {
113 Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
114 }
115 MultiPush(kSafepointSavedRegisters);
116 }
117
118
PopSafepointRegisters()119 void MacroAssembler::PopSafepointRegisters() {
120 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
121 MultiPop(kSafepointSavedRegisters);
122 if (num_unsaved > 0) {
123 Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
124 }
125 }
126
127
StoreToSafepointRegisterSlot(Register src,Register dst)128 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
129 sd(src, SafepointRegisterSlot(dst));
130 }
131
132
LoadFromSafepointRegisterSlot(Register dst,Register src)133 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
134 ld(dst, SafepointRegisterSlot(src));
135 }
136
137
SafepointRegisterStackIndex(int reg_code)138 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
139 // The registers are pushed starting with the highest encoding,
140 // which means that lowest encodings are closest to the stack pointer.
141 return kSafepointRegisterStackIndexMap[reg_code];
142 }
143
144
SafepointRegisterSlot(Register reg)145 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
146 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
147 }
148
149
SafepointRegistersAndDoublesSlot(Register reg)150 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
151 UNIMPLEMENTED_MIPS();
152 // General purpose registers are pushed last on the stack.
153 int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
154 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
155 return MemOperand(sp, doubles_size + register_offset);
156 }
157
158
InNewSpace(Register object,Register scratch,Condition cc,Label * branch)159 void MacroAssembler::InNewSpace(Register object,
160 Register scratch,
161 Condition cc,
162 Label* branch) {
163 DCHECK(cc == eq || cc == ne);
164 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
165 Branch(branch, cc, scratch,
166 Operand(ExternalReference::new_space_start(isolate())));
167 }
168
169
170 // Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
171 // The register 'object' contains a heap object pointer. The heap object
172 // tag is shifted away.
RecordWriteField(Register object,int offset,Register value,Register dst,RAStatus ra_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)173 void MacroAssembler::RecordWriteField(
174 Register object,
175 int offset,
176 Register value,
177 Register dst,
178 RAStatus ra_status,
179 SaveFPRegsMode save_fp,
180 RememberedSetAction remembered_set_action,
181 SmiCheck smi_check,
182 PointersToHereCheck pointers_to_here_check_for_value) {
183 DCHECK(!AreAliased(value, dst, t8, object));
184 // First, check if a write barrier is even needed. The tests below
185 // catch stores of Smis.
186 Label done;
187
188 // Skip barrier if writing a smi.
189 if (smi_check == INLINE_SMI_CHECK) {
190 JumpIfSmi(value, &done);
191 }
192
193 // Although the object register is tagged, the offset is relative to the start
194 // of the object, so so offset must be a multiple of kPointerSize.
195 DCHECK(IsAligned(offset, kPointerSize));
196
197 Daddu(dst, object, Operand(offset - kHeapObjectTag));
198 if (emit_debug_code()) {
199 Label ok;
200 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
201 Branch(&ok, eq, t8, Operand(zero_reg));
202 stop("Unaligned cell in write barrier");
203 bind(&ok);
204 }
205
206 RecordWrite(object,
207 dst,
208 value,
209 ra_status,
210 save_fp,
211 remembered_set_action,
212 OMIT_SMI_CHECK,
213 pointers_to_here_check_for_value);
214
215 bind(&done);
216
217 // Clobber clobbered input registers when running with the debug-code flag
218 // turned on to provoke errors.
219 if (emit_debug_code()) {
220 li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
221 li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
222 }
223 }
224
225
226 // Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
RecordWriteForMap(Register object,Register map,Register dst,RAStatus ra_status,SaveFPRegsMode fp_mode)227 void MacroAssembler::RecordWriteForMap(Register object,
228 Register map,
229 Register dst,
230 RAStatus ra_status,
231 SaveFPRegsMode fp_mode) {
232 if (emit_debug_code()) {
233 DCHECK(!dst.is(at));
234 ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
235 Check(eq,
236 kWrongAddressOrValuePassedToRecordWrite,
237 dst,
238 Operand(isolate()->factory()->meta_map()));
239 }
240
241 if (!FLAG_incremental_marking) {
242 return;
243 }
244
245 if (emit_debug_code()) {
246 ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
247 Check(eq,
248 kWrongAddressOrValuePassedToRecordWrite,
249 map,
250 Operand(at));
251 }
252
253 Label done;
254
255 // A single check of the map's pages interesting flag suffices, since it is
256 // only set during incremental collection, and then it's also guaranteed that
257 // the from object's page's interesting flag is also set. This optimization
258 // relies on the fact that maps can never be in new space.
259 CheckPageFlag(map,
260 map, // Used as scratch.
261 MemoryChunk::kPointersToHereAreInterestingMask,
262 eq,
263 &done);
264
265 Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
266 if (emit_debug_code()) {
267 Label ok;
268 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
269 Branch(&ok, eq, at, Operand(zero_reg));
270 stop("Unaligned cell in write barrier");
271 bind(&ok);
272 }
273
274 // Record the actual write.
275 if (ra_status == kRAHasNotBeenSaved) {
276 push(ra);
277 }
278 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
279 fp_mode);
280 CallStub(&stub);
281 if (ra_status == kRAHasNotBeenSaved) {
282 pop(ra);
283 }
284
285 bind(&done);
286
287 // Count number of write barriers in generated code.
288 isolate()->counters()->write_barriers_static()->Increment();
289 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
290
291 // Clobber clobbered registers when running with the debug-code flag
292 // turned on to provoke errors.
293 if (emit_debug_code()) {
294 li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
295 li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
296 }
297 }
298
299
300 // Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
301 // The register 'object' contains a heap object pointer. The heap object
302 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,RAStatus ra_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)303 void MacroAssembler::RecordWrite(
304 Register object,
305 Register address,
306 Register value,
307 RAStatus ra_status,
308 SaveFPRegsMode fp_mode,
309 RememberedSetAction remembered_set_action,
310 SmiCheck smi_check,
311 PointersToHereCheck pointers_to_here_check_for_value) {
312 DCHECK(!AreAliased(object, address, value, t8));
313 DCHECK(!AreAliased(object, address, value, t9));
314
315 if (emit_debug_code()) {
316 ld(at, MemOperand(address));
317 Assert(
318 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
319 }
320
321 if (remembered_set_action == OMIT_REMEMBERED_SET &&
322 !FLAG_incremental_marking) {
323 return;
324 }
325
326 // First, check if a write barrier is even needed. The tests below
327 // catch stores of smis and stores into the young generation.
328 Label done;
329
330 if (smi_check == INLINE_SMI_CHECK) {
331 DCHECK_EQ(0, kSmiTag);
332 JumpIfSmi(value, &done);
333 }
334
335 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
336 CheckPageFlag(value,
337 value, // Used as scratch.
338 MemoryChunk::kPointersToHereAreInterestingMask,
339 eq,
340 &done);
341 }
342 CheckPageFlag(object,
343 value, // Used as scratch.
344 MemoryChunk::kPointersFromHereAreInterestingMask,
345 eq,
346 &done);
347
348 // Record the actual write.
349 if (ra_status == kRAHasNotBeenSaved) {
350 push(ra);
351 }
352 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
353 fp_mode);
354 CallStub(&stub);
355 if (ra_status == kRAHasNotBeenSaved) {
356 pop(ra);
357 }
358
359 bind(&done);
360
361 // Count number of write barriers in generated code.
362 isolate()->counters()->write_barriers_static()->Increment();
363 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
364 value);
365
366 // Clobber clobbered registers when running with the debug-code flag
367 // turned on to provoke errors.
368 if (emit_debug_code()) {
369 li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
370 li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
371 }
372 }
373
374
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)375 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
376 Register address,
377 Register scratch,
378 SaveFPRegsMode fp_mode,
379 RememberedSetFinalAction and_then) {
380 Label done;
381 if (emit_debug_code()) {
382 Label ok;
383 JumpIfNotInNewSpace(object, scratch, &ok);
384 stop("Remembered set pointer is in new space");
385 bind(&ok);
386 }
387 // Load store buffer top.
388 ExternalReference store_buffer =
389 ExternalReference::store_buffer_top(isolate());
390 li(t8, Operand(store_buffer));
391 ld(scratch, MemOperand(t8));
392 // Store pointer to buffer and increment buffer top.
393 sd(address, MemOperand(scratch));
394 Daddu(scratch, scratch, kPointerSize);
395 // Write back new top of buffer.
396 sd(scratch, MemOperand(t8));
397 // Call stub on end of buffer.
398 // Check for end of buffer.
399 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
400 DCHECK(!scratch.is(t8));
401 if (and_then == kFallThroughAtEnd) {
402 Branch(&done, eq, t8, Operand(zero_reg));
403 } else {
404 DCHECK(and_then == kReturnAtEnd);
405 Ret(eq, t8, Operand(zero_reg));
406 }
407 push(ra);
408 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
409 CallStub(&store_buffer_overflow);
410 pop(ra);
411 bind(&done);
412 if (and_then == kReturnAtEnd) {
413 Ret();
414 }
415 }
416
417
418 // -----------------------------------------------------------------------------
419 // Allocation support.
420
421
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)422 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
423 Register scratch,
424 Label* miss) {
425 Label same_contexts;
426
427 DCHECK(!holder_reg.is(scratch));
428 DCHECK(!holder_reg.is(at));
429 DCHECK(!scratch.is(at));
430
431 // Load current lexical context from the stack frame.
432 ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
433 // In debug mode, make sure the lexical context is set.
434 #ifdef DEBUG
435 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
436 scratch, Operand(zero_reg));
437 #endif
438
439 // Load the native context of the current context.
440 ld(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
441
442 // Check the context is a native context.
443 if (emit_debug_code()) {
444 push(holder_reg); // Temporarily save holder on the stack.
445 // Read the first word and compare to the native_context_map.
446 ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
447 LoadRoot(at, Heap::kNativeContextMapRootIndex);
448 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
449 holder_reg, Operand(at));
450 pop(holder_reg); // Restore holder.
451 }
452
453 // Check if both contexts are the same.
454 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
455 Branch(&same_contexts, eq, scratch, Operand(at));
456
457 // Check the context is a native context.
458 if (emit_debug_code()) {
459 push(holder_reg); // Temporarily save holder on the stack.
460 mov(holder_reg, at); // Move at to its holding place.
461 LoadRoot(at, Heap::kNullValueRootIndex);
462 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
463 holder_reg, Operand(at));
464
465 ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
466 LoadRoot(at, Heap::kNativeContextMapRootIndex);
467 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
468 holder_reg, Operand(at));
469 // Restore at is not needed. at is reloaded below.
470 pop(holder_reg); // Restore holder.
471 // Restore at to holder's context.
472 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
473 }
474
475 // Check that the security token in the calling global object is
476 // compatible with the security token in the receiving global
477 // object.
478 int token_offset = Context::kHeaderSize +
479 Context::SECURITY_TOKEN_INDEX * kPointerSize;
480
481 ld(scratch, FieldMemOperand(scratch, token_offset));
482 ld(at, FieldMemOperand(at, token_offset));
483 Branch(miss, ne, scratch, Operand(at));
484
485 bind(&same_contexts);
486 }
487
488
489 // Compute the hash code from the untagged key. This must be kept in sync with
490 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
491 // code-stub-hydrogen.cc
GetNumberHash(Register reg0,Register scratch)492 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
493 // First of all we assign the hash seed to scratch.
494 LoadRoot(scratch, Heap::kHashSeedRootIndex);
495 SmiUntag(scratch);
496
497 // Xor original key with a seed.
498 xor_(reg0, reg0, scratch);
499
500 // Compute the hash code from the untagged key. This must be kept in sync
501 // with ComputeIntegerHash in utils.h.
502 //
503 // hash = ~hash + (hash << 15);
504 // The algorithm uses 32-bit integer values.
505 nor(scratch, reg0, zero_reg);
506 sll(at, reg0, 15);
507 addu(reg0, scratch, at);
508
509 // hash = hash ^ (hash >> 12);
510 srl(at, reg0, 12);
511 xor_(reg0, reg0, at);
512
513 // hash = hash + (hash << 2);
514 sll(at, reg0, 2);
515 addu(reg0, reg0, at);
516
517 // hash = hash ^ (hash >> 4);
518 srl(at, reg0, 4);
519 xor_(reg0, reg0, at);
520
521 // hash = hash * 2057;
522 sll(scratch, reg0, 11);
523 sll(at, reg0, 3);
524 addu(reg0, reg0, at);
525 addu(reg0, reg0, scratch);
526
527 // hash = hash ^ (hash >> 16);
528 srl(at, reg0, 16);
529 xor_(reg0, reg0, at);
530 And(reg0, reg0, Operand(0x3fffffff));
531 }
532
533
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register result,Register reg0,Register reg1,Register reg2)534 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
535 Register elements,
536 Register key,
537 Register result,
538 Register reg0,
539 Register reg1,
540 Register reg2) {
541 // Register use:
542 //
543 // elements - holds the slow-case elements of the receiver on entry.
544 // Unchanged unless 'result' is the same register.
545 //
546 // key - holds the smi key on entry.
547 // Unchanged unless 'result' is the same register.
548 //
549 //
550 // result - holds the result on exit if the load succeeded.
551 // Allowed to be the same as 'key' or 'result'.
552 // Unchanged on bailout so 'key' or 'result' can be used
553 // in further computation.
554 //
555 // Scratch registers:
556 //
557 // reg0 - holds the untagged key on entry and holds the hash once computed.
558 //
559 // reg1 - Used to hold the capacity mask of the dictionary.
560 //
561 // reg2 - Used for the index into the dictionary.
562 // at - Temporary (avoid MacroAssembler instructions also using 'at').
563 Label done;
564
565 GetNumberHash(reg0, reg1);
566
567 // Compute the capacity mask.
568 ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
569 SmiUntag(reg1, reg1);
570 Dsubu(reg1, reg1, Operand(1));
571
572 // Generate an unrolled loop that performs a few probes before giving up.
573 for (int i = 0; i < kNumberDictionaryProbes; i++) {
574 // Use reg2 for index calculations and keep the hash intact in reg0.
575 mov(reg2, reg0);
576 // Compute the masked index: (hash + i + i * i) & mask.
577 if (i > 0) {
578 Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
579 }
580 and_(reg2, reg2, reg1);
581
582 // Scale the index by multiplying by the element size.
583 DCHECK(SeededNumberDictionary::kEntrySize == 3);
584 dsll(at, reg2, 1); // 2x.
585 daddu(reg2, reg2, at); // reg2 = reg2 * 3.
586
587 // Check if the key is identical to the name.
588 dsll(at, reg2, kPointerSizeLog2);
589 daddu(reg2, elements, at);
590
591 ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
592 if (i != kNumberDictionaryProbes - 1) {
593 Branch(&done, eq, key, Operand(at));
594 } else {
595 Branch(miss, ne, key, Operand(at));
596 }
597 }
598
599 bind(&done);
600 // Check that the value is a field property.
601 // reg2: elements + (index * kPointerSize).
602 const int kDetailsOffset =
603 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
604 ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
605 DCHECK_EQ(DATA, 0);
606 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
607 Branch(miss, ne, at, Operand(zero_reg));
608
609 // Get the value at the masked, scaled index and return.
610 const int kValueOffset =
611 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
612 ld(result, FieldMemOperand(reg2, kValueOffset));
613 }
614
615
616 // ---------------------------------------------------------------------------
617 // Instruction macros.
618
Addu(Register rd,Register rs,const Operand & rt)619 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
620 if (rt.is_reg()) {
621 addu(rd, rs, rt.rm());
622 } else {
623 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
624 addiu(rd, rs, static_cast<int32_t>(rt.imm64_));
625 } else {
626 // li handles the relocation.
627 DCHECK(!rs.is(at));
628 li(at, rt);
629 addu(rd, rs, at);
630 }
631 }
632 }
633
634
Daddu(Register rd,Register rs,const Operand & rt)635 void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
636 if (rt.is_reg()) {
637 daddu(rd, rs, rt.rm());
638 } else {
639 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
640 daddiu(rd, rs, static_cast<int32_t>(rt.imm64_));
641 } else {
642 // li handles the relocation.
643 DCHECK(!rs.is(at));
644 li(at, rt);
645 daddu(rd, rs, at);
646 }
647 }
648 }
649
650
Subu(Register rd,Register rs,const Operand & rt)651 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
652 if (rt.is_reg()) {
653 subu(rd, rs, rt.rm());
654 } else {
655 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
656 addiu(rd, rs, static_cast<int32_t>(
657 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
658 } else {
659 // li handles the relocation.
660 DCHECK(!rs.is(at));
661 li(at, rt);
662 subu(rd, rs, at);
663 }
664 }
665 }
666
667
Dsubu(Register rd,Register rs,const Operand & rt)668 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
669 if (rt.is_reg()) {
670 dsubu(rd, rs, rt.rm());
671 } else {
672 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
673 daddiu(rd, rs,
674 static_cast<int32_t>(
675 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
676 } else {
677 // li handles the relocation.
678 DCHECK(!rs.is(at));
679 li(at, rt);
680 dsubu(rd, rs, at);
681 }
682 }
683 }
684
685
Mul(Register rd,Register rs,const Operand & rt)686 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
687 if (rt.is_reg()) {
688 mul(rd, rs, rt.rm());
689 } else {
690 // li handles the relocation.
691 DCHECK(!rs.is(at));
692 li(at, rt);
693 mul(rd, rs, at);
694 }
695 }
696
697
Mulh(Register rd,Register rs,const Operand & rt)698 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
699 if (rt.is_reg()) {
700 if (kArchVariant != kMips64r6) {
701 mult(rs, rt.rm());
702 mfhi(rd);
703 } else {
704 muh(rd, rs, rt.rm());
705 }
706 } else {
707 // li handles the relocation.
708 DCHECK(!rs.is(at));
709 li(at, rt);
710 if (kArchVariant != kMips64r6) {
711 mult(rs, at);
712 mfhi(rd);
713 } else {
714 muh(rd, rs, at);
715 }
716 }
717 }
718
719
Mulhu(Register rd,Register rs,const Operand & rt)720 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
721 if (rt.is_reg()) {
722 if (kArchVariant != kMips64r6) {
723 multu(rs, rt.rm());
724 mfhi(rd);
725 } else {
726 muhu(rd, rs, rt.rm());
727 }
728 } else {
729 // li handles the relocation.
730 DCHECK(!rs.is(at));
731 li(at, rt);
732 if (kArchVariant != kMips64r6) {
733 multu(rs, at);
734 mfhi(rd);
735 } else {
736 muhu(rd, rs, at);
737 }
738 }
739 }
740
741
Dmul(Register rd,Register rs,const Operand & rt)742 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
743 if (rt.is_reg()) {
744 if (kArchVariant == kMips64r6) {
745 dmul(rd, rs, rt.rm());
746 } else {
747 dmult(rs, rt.rm());
748 mflo(rd);
749 }
750 } else {
751 // li handles the relocation.
752 DCHECK(!rs.is(at));
753 li(at, rt);
754 if (kArchVariant == kMips64r6) {
755 dmul(rd, rs, at);
756 } else {
757 dmult(rs, at);
758 mflo(rd);
759 }
760 }
761 }
762
763
Dmulh(Register rd,Register rs,const Operand & rt)764 void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
765 if (rt.is_reg()) {
766 if (kArchVariant == kMips64r6) {
767 dmuh(rd, rs, rt.rm());
768 } else {
769 dmult(rs, rt.rm());
770 mfhi(rd);
771 }
772 } else {
773 // li handles the relocation.
774 DCHECK(!rs.is(at));
775 li(at, rt);
776 if (kArchVariant == kMips64r6) {
777 dmuh(rd, rs, at);
778 } else {
779 dmult(rs, at);
780 mfhi(rd);
781 }
782 }
783 }
784
785
Mult(Register rs,const Operand & rt)786 void MacroAssembler::Mult(Register rs, const Operand& rt) {
787 if (rt.is_reg()) {
788 mult(rs, rt.rm());
789 } else {
790 // li handles the relocation.
791 DCHECK(!rs.is(at));
792 li(at, rt);
793 mult(rs, at);
794 }
795 }
796
797
Dmult(Register rs,const Operand & rt)798 void MacroAssembler::Dmult(Register rs, const Operand& rt) {
799 if (rt.is_reg()) {
800 dmult(rs, rt.rm());
801 } else {
802 // li handles the relocation.
803 DCHECK(!rs.is(at));
804 li(at, rt);
805 dmult(rs, at);
806 }
807 }
808
809
Multu(Register rs,const Operand & rt)810 void MacroAssembler::Multu(Register rs, const Operand& rt) {
811 if (rt.is_reg()) {
812 multu(rs, rt.rm());
813 } else {
814 // li handles the relocation.
815 DCHECK(!rs.is(at));
816 li(at, rt);
817 multu(rs, at);
818 }
819 }
820
821
Dmultu(Register rs,const Operand & rt)822 void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
823 if (rt.is_reg()) {
824 dmultu(rs, rt.rm());
825 } else {
826 // li handles the relocation.
827 DCHECK(!rs.is(at));
828 li(at, rt);
829 dmultu(rs, at);
830 }
831 }
832
833
Div(Register rs,const Operand & rt)834 void MacroAssembler::Div(Register rs, const Operand& rt) {
835 if (rt.is_reg()) {
836 div(rs, rt.rm());
837 } else {
838 // li handles the relocation.
839 DCHECK(!rs.is(at));
840 li(at, rt);
841 div(rs, at);
842 }
843 }
844
845
Div(Register res,Register rs,const Operand & rt)846 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
847 if (rt.is_reg()) {
848 if (kArchVariant != kMips64r6) {
849 div(rs, rt.rm());
850 mflo(res);
851 } else {
852 div(res, rs, rt.rm());
853 }
854 } else {
855 // li handles the relocation.
856 DCHECK(!rs.is(at));
857 li(at, rt);
858 if (kArchVariant != kMips64r6) {
859 div(rs, at);
860 mflo(res);
861 } else {
862 div(res, rs, at);
863 }
864 }
865 }
866
867
Mod(Register rd,Register rs,const Operand & rt)868 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
869 if (rt.is_reg()) {
870 if (kArchVariant != kMips64r6) {
871 div(rs, rt.rm());
872 mfhi(rd);
873 } else {
874 mod(rd, rs, rt.rm());
875 }
876 } else {
877 // li handles the relocation.
878 DCHECK(!rs.is(at));
879 li(at, rt);
880 if (kArchVariant != kMips64r6) {
881 div(rs, at);
882 mfhi(rd);
883 } else {
884 mod(rd, rs, at);
885 }
886 }
887 }
888
889
Modu(Register rd,Register rs,const Operand & rt)890 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
891 if (rt.is_reg()) {
892 if (kArchVariant != kMips64r6) {
893 divu(rs, rt.rm());
894 mfhi(rd);
895 } else {
896 modu(rd, rs, rt.rm());
897 }
898 } else {
899 // li handles the relocation.
900 DCHECK(!rs.is(at));
901 li(at, rt);
902 if (kArchVariant != kMips64r6) {
903 divu(rs, at);
904 mfhi(rd);
905 } else {
906 modu(rd, rs, at);
907 }
908 }
909 }
910
911
Ddiv(Register rs,const Operand & rt)912 void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
913 if (rt.is_reg()) {
914 ddiv(rs, rt.rm());
915 } else {
916 // li handles the relocation.
917 DCHECK(!rs.is(at));
918 li(at, rt);
919 ddiv(rs, at);
920 }
921 }
922
923
Ddiv(Register rd,Register rs,const Operand & rt)924 void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
925 if (kArchVariant != kMips64r6) {
926 if (rt.is_reg()) {
927 ddiv(rs, rt.rm());
928 mflo(rd);
929 } else {
930 // li handles the relocation.
931 DCHECK(!rs.is(at));
932 li(at, rt);
933 ddiv(rs, at);
934 mflo(rd);
935 }
936 } else {
937 if (rt.is_reg()) {
938 ddiv(rd, rs, rt.rm());
939 } else {
940 // li handles the relocation.
941 DCHECK(!rs.is(at));
942 li(at, rt);
943 ddiv(rd, rs, at);
944 }
945 }
946 }
947
948
Divu(Register rs,const Operand & rt)949 void MacroAssembler::Divu(Register rs, const Operand& rt) {
950 if (rt.is_reg()) {
951 divu(rs, rt.rm());
952 } else {
953 // li handles the relocation.
954 DCHECK(!rs.is(at));
955 li(at, rt);
956 divu(rs, at);
957 }
958 }
959
960
Divu(Register res,Register rs,const Operand & rt)961 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
962 if (rt.is_reg()) {
963 if (kArchVariant != kMips64r6) {
964 divu(rs, rt.rm());
965 mflo(res);
966 } else {
967 divu(res, rs, rt.rm());
968 }
969 } else {
970 // li handles the relocation.
971 DCHECK(!rs.is(at));
972 li(at, rt);
973 if (kArchVariant != kMips64r6) {
974 divu(rs, at);
975 mflo(res);
976 } else {
977 divu(res, rs, at);
978 }
979 }
980 }
981
982
Ddivu(Register rs,const Operand & rt)983 void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
984 if (rt.is_reg()) {
985 ddivu(rs, rt.rm());
986 } else {
987 // li handles the relocation.
988 DCHECK(!rs.is(at));
989 li(at, rt);
990 ddivu(rs, at);
991 }
992 }
993
994
Ddivu(Register res,Register rs,const Operand & rt)995 void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
996 if (rt.is_reg()) {
997 if (kArchVariant != kMips64r6) {
998 ddivu(rs, rt.rm());
999 mflo(res);
1000 } else {
1001 ddivu(res, rs, rt.rm());
1002 }
1003 } else {
1004 // li handles the relocation.
1005 DCHECK(!rs.is(at));
1006 li(at, rt);
1007 if (kArchVariant != kMips64r6) {
1008 ddivu(rs, at);
1009 mflo(res);
1010 } else {
1011 ddivu(res, rs, at);
1012 }
1013 }
1014 }
1015
1016
Dmod(Register rd,Register rs,const Operand & rt)1017 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
1018 if (kArchVariant != kMips64r6) {
1019 if (rt.is_reg()) {
1020 ddiv(rs, rt.rm());
1021 mfhi(rd);
1022 } else {
1023 // li handles the relocation.
1024 DCHECK(!rs.is(at));
1025 li(at, rt);
1026 ddiv(rs, at);
1027 mfhi(rd);
1028 }
1029 } else {
1030 if (rt.is_reg()) {
1031 dmod(rd, rs, rt.rm());
1032 } else {
1033 // li handles the relocation.
1034 DCHECK(!rs.is(at));
1035 li(at, rt);
1036 dmod(rd, rs, at);
1037 }
1038 }
1039 }
1040
1041
Dmodu(Register rd,Register rs,const Operand & rt)1042 void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
1043 if (kArchVariant != kMips64r6) {
1044 if (rt.is_reg()) {
1045 ddivu(rs, rt.rm());
1046 mfhi(rd);
1047 } else {
1048 // li handles the relocation.
1049 DCHECK(!rs.is(at));
1050 li(at, rt);
1051 ddivu(rs, at);
1052 mfhi(rd);
1053 }
1054 } else {
1055 if (rt.is_reg()) {
1056 dmodu(rd, rs, rt.rm());
1057 } else {
1058 // li handles the relocation.
1059 DCHECK(!rs.is(at));
1060 li(at, rt);
1061 dmodu(rd, rs, at);
1062 }
1063 }
1064 }
1065
1066
And(Register rd,Register rs,const Operand & rt)1067 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
1068 if (rt.is_reg()) {
1069 and_(rd, rs, rt.rm());
1070 } else {
1071 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1072 andi(rd, rs, static_cast<int32_t>(rt.imm64_));
1073 } else {
1074 // li handles the relocation.
1075 DCHECK(!rs.is(at));
1076 li(at, rt);
1077 and_(rd, rs, at);
1078 }
1079 }
1080 }
1081
1082
Or(Register rd,Register rs,const Operand & rt)1083 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
1084 if (rt.is_reg()) {
1085 or_(rd, rs, rt.rm());
1086 } else {
1087 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1088 ori(rd, rs, static_cast<int32_t>(rt.imm64_));
1089 } else {
1090 // li handles the relocation.
1091 DCHECK(!rs.is(at));
1092 li(at, rt);
1093 or_(rd, rs, at);
1094 }
1095 }
1096 }
1097
1098
Xor(Register rd,Register rs,const Operand & rt)1099 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
1100 if (rt.is_reg()) {
1101 xor_(rd, rs, rt.rm());
1102 } else {
1103 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1104 xori(rd, rs, static_cast<int32_t>(rt.imm64_));
1105 } else {
1106 // li handles the relocation.
1107 DCHECK(!rs.is(at));
1108 li(at, rt);
1109 xor_(rd, rs, at);
1110 }
1111 }
1112 }
1113
1114
Nor(Register rd,Register rs,const Operand & rt)1115 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
1116 if (rt.is_reg()) {
1117 nor(rd, rs, rt.rm());
1118 } else {
1119 // li handles the relocation.
1120 DCHECK(!rs.is(at));
1121 li(at, rt);
1122 nor(rd, rs, at);
1123 }
1124 }
1125
1126
Neg(Register rs,const Operand & rt)1127 void MacroAssembler::Neg(Register rs, const Operand& rt) {
1128 DCHECK(rt.is_reg());
1129 DCHECK(!at.is(rs));
1130 DCHECK(!at.is(rt.rm()));
1131 li(at, -1);
1132 xor_(rs, rt.rm(), at);
1133 }
1134
1135
Slt(Register rd,Register rs,const Operand & rt)1136 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
1137 if (rt.is_reg()) {
1138 slt(rd, rs, rt.rm());
1139 } else {
1140 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1141 slti(rd, rs, static_cast<int32_t>(rt.imm64_));
1142 } else {
1143 // li handles the relocation.
1144 DCHECK(!rs.is(at));
1145 li(at, rt);
1146 slt(rd, rs, at);
1147 }
1148 }
1149 }
1150
1151
Sltu(Register rd,Register rs,const Operand & rt)1152 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1153 if (rt.is_reg()) {
1154 sltu(rd, rs, rt.rm());
1155 } else {
1156 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1157 sltiu(rd, rs, static_cast<int32_t>(rt.imm64_));
1158 } else {
1159 // li handles the relocation.
1160 DCHECK(!rs.is(at));
1161 li(at, rt);
1162 sltu(rd, rs, at);
1163 }
1164 }
1165 }
1166
1167
Ror(Register rd,Register rs,const Operand & rt)1168 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1169 if (rt.is_reg()) {
1170 rotrv(rd, rs, rt.rm());
1171 } else {
1172 rotr(rd, rs, rt.imm64_);
1173 }
1174 }
1175
1176
Dror(Register rd,Register rs,const Operand & rt)1177 void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
1178 if (rt.is_reg()) {
1179 drotrv(rd, rs, rt.rm());
1180 } else {
1181 drotr(rd, rs, rt.imm64_);
1182 }
1183 }
1184
1185
Pref(int32_t hint,const MemOperand & rs)1186 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1187 pref(hint, rs);
1188 }
1189
1190
Lsa(Register rd,Register rt,Register rs,uint8_t sa,Register scratch)1191 void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
1192 Register scratch) {
1193 if (kArchVariant == kMips64r6 && sa <= 4) {
1194 lsa(rd, rt, rs, sa);
1195 } else {
1196 Register tmp = rd.is(rt) ? scratch : rd;
1197 DCHECK(!tmp.is(rt));
1198 sll(tmp, rs, sa);
1199 Addu(rd, rt, tmp);
1200 }
1201 }
1202
1203
Dlsa(Register rd,Register rt,Register rs,uint8_t sa,Register scratch)1204 void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
1205 Register scratch) {
1206 if (kArchVariant == kMips64r6 && sa <= 4) {
1207 dlsa(rd, rt, rs, sa);
1208 } else {
1209 Register tmp = rd.is(rt) ? scratch : rd;
1210 DCHECK(!tmp.is(rt));
1211 dsll(tmp, rs, sa);
1212 Daddu(rd, rt, tmp);
1213 }
1214 }
1215
1216
1217 // ------------Pseudo-instructions-------------
1218
Ulw(Register rd,const MemOperand & rs)1219 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1220 lwr(rd, rs);
1221 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1222 }
1223
1224
Usw(Register rd,const MemOperand & rs)1225 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1226 swr(rd, rs);
1227 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1228 }
1229
1230
1231 // Do 64-bit load from unaligned address. Note this only handles
1232 // the specific case of 32-bit aligned, but not 64-bit aligned.
Uld(Register rd,const MemOperand & rs,Register scratch)1233 void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
1234 // Assert fail if the offset from start of object IS actually aligned.
1235 // ONLY use with known misalignment, since there is performance cost.
1236 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1237 if (kArchEndian == kLittle) {
1238 lwu(rd, rs);
1239 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1240 dsll32(scratch, scratch, 0);
1241 } else {
1242 lw(rd, rs);
1243 lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1244 dsll32(rd, rd, 0);
1245 }
1246 Daddu(rd, rd, scratch);
1247 }
1248
1249
1250 // Load consequent 32-bit word pair in 64-bit reg. and put first word in low
1251 // bits,
1252 // second word in high bits.
LoadWordPair(Register rd,const MemOperand & rs,Register scratch)1253 void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
1254 Register scratch) {
1255 lwu(rd, rs);
1256 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1257 dsll32(scratch, scratch, 0);
1258 Daddu(rd, rd, scratch);
1259 }
1260
1261
1262 // Do 64-bit store to unaligned address. Note this only handles
1263 // the specific case of 32-bit aligned, but not 64-bit aligned.
Usd(Register rd,const MemOperand & rs,Register scratch)1264 void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
1265 // Assert fail if the offset from start of object IS actually aligned.
1266 // ONLY use with known misalignment, since there is performance cost.
1267 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1268 if (kArchEndian == kLittle) {
1269 sw(rd, rs);
1270 dsrl32(scratch, rd, 0);
1271 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1272 } else {
1273 sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1274 dsrl32(scratch, rd, 0);
1275 sw(scratch, rs);
1276 }
1277 }
1278
1279
1280 // Do 64-bit store as two consequent 32-bit stores to unaligned address.
StoreWordPair(Register rd,const MemOperand & rs,Register scratch)1281 void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
1282 Register scratch) {
1283 sw(rd, rs);
1284 dsrl32(scratch, rd, 0);
1285 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1286 }
1287
1288
li(Register dst,Handle<Object> value,LiFlags mode)1289 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1290 AllowDeferredHandleDereference smi_check;
1291 if (value->IsSmi()) {
1292 li(dst, Operand(value), mode);
1293 } else {
1294 DCHECK(value->IsHeapObject());
1295 if (isolate()->heap()->InNewSpace(*value)) {
1296 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1297 li(dst, Operand(cell));
1298 ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
1299 } else {
1300 li(dst, Operand(value));
1301 }
1302 }
1303 }
1304
1305
li(Register rd,Operand j,LiFlags mode)1306 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1307 DCHECK(!j.is_reg());
1308 BlockTrampolinePoolScope block_trampoline_pool(this);
1309 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1310 // Normal load of an immediate value which does not need Relocation Info.
1311 if (is_int32(j.imm64_)) {
1312 if (is_int16(j.imm64_)) {
1313 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1314 } else if (!(j.imm64_ & kHiMask)) {
1315 ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
1316 } else if (!(j.imm64_ & kImm16Mask)) {
1317 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1318 } else {
1319 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1320 ori(rd, rd, (j.imm64_ & kImm16Mask));
1321 }
1322 } else {
1323 if (is_int48(j.imm64_)) {
1324 if ((j.imm64_ >> 32) & kImm16Mask) {
1325 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1326 if ((j.imm64_ >> 16) & kImm16Mask) {
1327 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1328 }
1329 } else {
1330 ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
1331 }
1332 dsll(rd, rd, 16);
1333 if (j.imm64_ & kImm16Mask) {
1334 ori(rd, rd, j.imm64_ & kImm16Mask);
1335 }
1336 } else {
1337 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1338 if ((j.imm64_ >> 32) & kImm16Mask) {
1339 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1340 }
1341 if ((j.imm64_ >> 16) & kImm16Mask) {
1342 dsll(rd, rd, 16);
1343 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1344 if (j.imm64_ & kImm16Mask) {
1345 dsll(rd, rd, 16);
1346 ori(rd, rd, j.imm64_ & kImm16Mask);
1347 } else {
1348 dsll(rd, rd, 16);
1349 }
1350 } else {
1351 if (j.imm64_ & kImm16Mask) {
1352 dsll32(rd, rd, 0);
1353 ori(rd, rd, j.imm64_ & kImm16Mask);
1354 } else {
1355 dsll32(rd, rd, 0);
1356 }
1357 }
1358 }
1359 }
1360 } else if (MustUseReg(j.rmode_)) {
1361 RecordRelocInfo(j.rmode_, j.imm64_);
1362 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1363 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1364 dsll(rd, rd, 16);
1365 ori(rd, rd, j.imm64_ & kImm16Mask);
1366 } else if (mode == ADDRESS_LOAD) {
1367 // We always need the same number of instructions as we may need to patch
1368 // this code to load another value which may need all 4 instructions.
1369 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1370 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1371 dsll(rd, rd, 16);
1372 ori(rd, rd, j.imm64_ & kImm16Mask);
1373 } else {
1374 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1375 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1376 dsll(rd, rd, 16);
1377 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1378 dsll(rd, rd, 16);
1379 ori(rd, rd, j.imm64_ & kImm16Mask);
1380 }
1381 }
1382
1383
MultiPush(RegList regs)1384 void MacroAssembler::MultiPush(RegList regs) {
1385 int16_t num_to_push = NumberOfBitsSet(regs);
1386 int16_t stack_offset = num_to_push * kPointerSize;
1387
1388 Dsubu(sp, sp, Operand(stack_offset));
1389 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1390 if ((regs & (1 << i)) != 0) {
1391 stack_offset -= kPointerSize;
1392 sd(ToRegister(i), MemOperand(sp, stack_offset));
1393 }
1394 }
1395 }
1396
1397
MultiPushReversed(RegList regs)1398 void MacroAssembler::MultiPushReversed(RegList regs) {
1399 int16_t num_to_push = NumberOfBitsSet(regs);
1400 int16_t stack_offset = num_to_push * kPointerSize;
1401
1402 Dsubu(sp, sp, Operand(stack_offset));
1403 for (int16_t i = 0; i < kNumRegisters; i++) {
1404 if ((regs & (1 << i)) != 0) {
1405 stack_offset -= kPointerSize;
1406 sd(ToRegister(i), MemOperand(sp, stack_offset));
1407 }
1408 }
1409 }
1410
1411
MultiPop(RegList regs)1412 void MacroAssembler::MultiPop(RegList regs) {
1413 int16_t stack_offset = 0;
1414
1415 for (int16_t i = 0; i < kNumRegisters; i++) {
1416 if ((regs & (1 << i)) != 0) {
1417 ld(ToRegister(i), MemOperand(sp, stack_offset));
1418 stack_offset += kPointerSize;
1419 }
1420 }
1421 daddiu(sp, sp, stack_offset);
1422 }
1423
1424
MultiPopReversed(RegList regs)1425 void MacroAssembler::MultiPopReversed(RegList regs) {
1426 int16_t stack_offset = 0;
1427
1428 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1429 if ((regs & (1 << i)) != 0) {
1430 ld(ToRegister(i), MemOperand(sp, stack_offset));
1431 stack_offset += kPointerSize;
1432 }
1433 }
1434 daddiu(sp, sp, stack_offset);
1435 }
1436
1437
MultiPushFPU(RegList regs)1438 void MacroAssembler::MultiPushFPU(RegList regs) {
1439 int16_t num_to_push = NumberOfBitsSet(regs);
1440 int16_t stack_offset = num_to_push * kDoubleSize;
1441
1442 Dsubu(sp, sp, Operand(stack_offset));
1443 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1444 if ((regs & (1 << i)) != 0) {
1445 stack_offset -= kDoubleSize;
1446 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1447 }
1448 }
1449 }
1450
1451
MultiPushReversedFPU(RegList regs)1452 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1453 int16_t num_to_push = NumberOfBitsSet(regs);
1454 int16_t stack_offset = num_to_push * kDoubleSize;
1455
1456 Dsubu(sp, sp, Operand(stack_offset));
1457 for (int16_t i = 0; i < kNumRegisters; i++) {
1458 if ((regs & (1 << i)) != 0) {
1459 stack_offset -= kDoubleSize;
1460 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1461 }
1462 }
1463 }
1464
1465
MultiPopFPU(RegList regs)1466 void MacroAssembler::MultiPopFPU(RegList regs) {
1467 int16_t stack_offset = 0;
1468
1469 for (int16_t i = 0; i < kNumRegisters; i++) {
1470 if ((regs & (1 << i)) != 0) {
1471 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1472 stack_offset += kDoubleSize;
1473 }
1474 }
1475 daddiu(sp, sp, stack_offset);
1476 }
1477
1478
MultiPopReversedFPU(RegList regs)1479 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1480 int16_t stack_offset = 0;
1481
1482 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1483 if ((regs & (1 << i)) != 0) {
1484 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1485 stack_offset += kDoubleSize;
1486 }
1487 }
1488 daddiu(sp, sp, stack_offset);
1489 }
1490
1491
Ext(Register rt,Register rs,uint16_t pos,uint16_t size)1492 void MacroAssembler::Ext(Register rt,
1493 Register rs,
1494 uint16_t pos,
1495 uint16_t size) {
1496 DCHECK(pos < 32);
1497 DCHECK(pos + size < 33);
1498 ext_(rt, rs, pos, size);
1499 }
1500
1501
Dext(Register rt,Register rs,uint16_t pos,uint16_t size)1502 void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
1503 uint16_t size) {
1504 DCHECK(pos < 32);
1505 DCHECK(pos + size < 33);
1506 dext_(rt, rs, pos, size);
1507 }
1508
1509
Dextm(Register rt,Register rs,uint16_t pos,uint16_t size)1510 void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
1511 uint16_t size) {
1512 DCHECK(pos < 32);
1513 DCHECK(size <= 64);
1514 dextm(rt, rs, pos, size);
1515 }
1516
1517
Dextu(Register rt,Register rs,uint16_t pos,uint16_t size)1518 void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
1519 uint16_t size) {
1520 DCHECK(pos >= 32 && pos < 64);
1521 DCHECK(size < 33);
1522 dextu(rt, rs, pos, size);
1523 }
1524
1525
Dins(Register rt,Register rs,uint16_t pos,uint16_t size)1526 void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
1527 uint16_t size) {
1528 DCHECK(pos < 32);
1529 DCHECK(pos + size <= 32);
1530 DCHECK(size != 0);
1531 dins_(rt, rs, pos, size);
1532 }
1533
1534
Ins(Register rt,Register rs,uint16_t pos,uint16_t size)1535 void MacroAssembler::Ins(Register rt,
1536 Register rs,
1537 uint16_t pos,
1538 uint16_t size) {
1539 DCHECK(pos < 32);
1540 DCHECK(pos + size <= 32);
1541 DCHECK(size != 0);
1542 ins_(rt, rs, pos, size);
1543 }
1544
1545
Cvt_d_uw(FPURegister fd,FPURegister fs)1546 void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
1547 // Move the data from fs to t8.
1548 mfc1(t8, fs);
1549 Cvt_d_uw(fd, t8);
1550 }
1551
1552
Cvt_d_uw(FPURegister fd,Register rs)1553 void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
1554 // Convert rs to a FP value in fd.
1555 DCHECK(!rs.is(t9));
1556 DCHECK(!rs.is(at));
1557
1558 // Zero extend int32 in rs.
1559 Dext(t9, rs, 0, 32);
1560 dmtc1(t9, fd);
1561 cvt_d_l(fd, fd);
1562 }
1563
1564
Cvt_d_ul(FPURegister fd,FPURegister fs)1565 void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
1566 // Move the data from fs to t8.
1567 dmfc1(t8, fs);
1568 Cvt_d_ul(fd, t8);
1569 }
1570
1571
Cvt_d_ul(FPURegister fd,Register rs)1572 void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
1573 // Convert rs to a FP value in fd.
1574
1575 DCHECK(!rs.is(t9));
1576 DCHECK(!rs.is(at));
1577
1578 Label msb_clear, conversion_done;
1579
1580 Branch(&msb_clear, ge, rs, Operand(zero_reg));
1581
1582 // Rs >= 2^63
1583 andi(t9, rs, 1);
1584 dsrl(rs, rs, 1);
1585 or_(t9, t9, rs);
1586 dmtc1(t9, fd);
1587 cvt_d_l(fd, fd);
1588 Branch(USE_DELAY_SLOT, &conversion_done);
1589 add_d(fd, fd, fd); // In delay slot.
1590
1591 bind(&msb_clear);
1592 // Rs < 2^63, we can do simple conversion.
1593 dmtc1(rs, fd);
1594 cvt_d_l(fd, fd);
1595
1596 bind(&conversion_done);
1597 }
1598
1599
Cvt_s_ul(FPURegister fd,FPURegister fs)1600 void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
1601 // Move the data from fs to t8.
1602 dmfc1(t8, fs);
1603 Cvt_s_ul(fd, t8);
1604 }
1605
1606
Cvt_s_ul(FPURegister fd,Register rs)1607 void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
1608 // Convert rs to a FP value in fd.
1609
1610 DCHECK(!rs.is(t9));
1611 DCHECK(!rs.is(at));
1612
1613 Label positive, conversion_done;
1614
1615 Branch(&positive, ge, rs, Operand(zero_reg));
1616
1617 // Rs >= 2^31.
1618 andi(t9, rs, 1);
1619 dsrl(rs, rs, 1);
1620 or_(t9, t9, rs);
1621 dmtc1(t9, fd);
1622 cvt_s_l(fd, fd);
1623 Branch(USE_DELAY_SLOT, &conversion_done);
1624 add_s(fd, fd, fd); // In delay slot.
1625
1626 bind(&positive);
1627 // Rs < 2^31, we can do simple conversion.
1628 dmtc1(rs, fd);
1629 cvt_s_l(fd, fd);
1630
1631 bind(&conversion_done);
1632 }
1633
1634
Round_l_d(FPURegister fd,FPURegister fs)1635 void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
1636 round_l_d(fd, fs);
1637 }
1638
1639
Floor_l_d(FPURegister fd,FPURegister fs)1640 void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
1641 floor_l_d(fd, fs);
1642 }
1643
1644
Ceil_l_d(FPURegister fd,FPURegister fs)1645 void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
1646 ceil_l_d(fd, fs);
1647 }
1648
1649
Trunc_l_d(FPURegister fd,FPURegister fs)1650 void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
1651 trunc_l_d(fd, fs);
1652 }
1653
1654
Trunc_l_ud(FPURegister fd,FPURegister fs,FPURegister scratch)1655 void MacroAssembler::Trunc_l_ud(FPURegister fd,
1656 FPURegister fs,
1657 FPURegister scratch) {
1658 // Load to GPR.
1659 dmfc1(t8, fs);
1660 // Reset sign bit.
1661 li(at, 0x7fffffffffffffff);
1662 and_(t8, t8, at);
1663 dmtc1(t8, fs);
1664 trunc_l_d(fd, fs);
1665 }
1666
1667
Trunc_uw_d(FPURegister fd,FPURegister fs,FPURegister scratch)1668 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1669 FPURegister fs,
1670 FPURegister scratch) {
1671 Trunc_uw_d(fs, t8, scratch);
1672 mtc1(t8, fd);
1673 }
1674
Trunc_ul_d(FPURegister fd,FPURegister fs,FPURegister scratch,Register result)1675 void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
1676 FPURegister scratch, Register result) {
1677 Trunc_ul_d(fs, t8, scratch, result);
1678 dmtc1(t8, fd);
1679 }
1680
1681
Trunc_ul_s(FPURegister fd,FPURegister fs,FPURegister scratch,Register result)1682 void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
1683 FPURegister scratch, Register result) {
1684 Trunc_ul_s(fs, t8, scratch, result);
1685 dmtc1(t8, fd);
1686 }
1687
1688
Trunc_w_d(FPURegister fd,FPURegister fs)1689 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1690 trunc_w_d(fd, fs);
1691 }
1692
1693
Round_w_d(FPURegister fd,FPURegister fs)1694 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1695 round_w_d(fd, fs);
1696 }
1697
1698
Floor_w_d(FPURegister fd,FPURegister fs)1699 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1700 floor_w_d(fd, fs);
1701 }
1702
1703
Ceil_w_d(FPURegister fd,FPURegister fs)1704 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1705 ceil_w_d(fd, fs);
1706 }
1707
1708
Trunc_uw_d(FPURegister fd,Register rs,FPURegister scratch)1709 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1710 Register rs,
1711 FPURegister scratch) {
1712 DCHECK(!fd.is(scratch));
1713 DCHECK(!rs.is(at));
1714
1715 // Load 2^31 into scratch as its float representation.
1716 li(at, 0x41E00000);
1717 mtc1(zero_reg, scratch);
1718 mthc1(at, scratch);
1719 // Test if scratch > fd.
1720 // If fd < 2^31 we can convert it normally.
1721 Label simple_convert;
1722 BranchF(&simple_convert, NULL, lt, fd, scratch);
1723
1724 // First we subtract 2^31 from fd, then trunc it to rs
1725 // and add 2^31 to rs.
1726 sub_d(scratch, fd, scratch);
1727 trunc_w_d(scratch, scratch);
1728 mfc1(rs, scratch);
1729 Or(rs, rs, 1 << 31);
1730
1731 Label done;
1732 Branch(&done);
1733 // Simple conversion.
1734 bind(&simple_convert);
1735 trunc_w_d(scratch, fd);
1736 mfc1(rs, scratch);
1737
1738 bind(&done);
1739 }
1740
1741
Trunc_ul_d(FPURegister fd,Register rs,FPURegister scratch,Register result)1742 void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
1743 FPURegister scratch, Register result) {
1744 DCHECK(!fd.is(scratch));
1745 DCHECK(!AreAliased(rs, result, at));
1746
1747 Label simple_convert, done, fail;
1748 if (result.is_valid()) {
1749 mov(result, zero_reg);
1750 Move(scratch, -1.0);
1751 // If fd =< -1 or unordered, then the conversion fails.
1752 BranchF(&fail, &fail, le, fd, scratch);
1753 }
1754
1755 // Load 2^63 into scratch as its double representation.
1756 li(at, 0x43e0000000000000);
1757 dmtc1(at, scratch);
1758
1759 // Test if scratch > fd.
1760 // If fd < 2^63 we can convert it normally.
1761 BranchF(&simple_convert, nullptr, lt, fd, scratch);
1762
1763 // First we subtract 2^63 from fd, then trunc it to rs
1764 // and add 2^63 to rs.
1765 sub_d(scratch, fd, scratch);
1766 trunc_l_d(scratch, scratch);
1767 dmfc1(rs, scratch);
1768 Or(rs, rs, Operand(1UL << 63));
1769 Branch(&done);
1770
1771 // Simple conversion.
1772 bind(&simple_convert);
1773 trunc_l_d(scratch, fd);
1774 dmfc1(rs, scratch);
1775
1776 bind(&done);
1777 if (result.is_valid()) {
1778 // Conversion is failed if the result is negative.
1779 addiu(at, zero_reg, -1);
1780 dsrl(at, at, 1); // Load 2^62.
1781 dmfc1(result, scratch);
1782 xor_(result, result, at);
1783 Slt(result, zero_reg, result);
1784 }
1785
1786 bind(&fail);
1787 }
1788
1789
Trunc_ul_s(FPURegister fd,Register rs,FPURegister scratch,Register result)1790 void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
1791 FPURegister scratch, Register result) {
1792 DCHECK(!fd.is(scratch));
1793 DCHECK(!AreAliased(rs, result, at));
1794
1795 Label simple_convert, done, fail;
1796 if (result.is_valid()) {
1797 mov(result, zero_reg);
1798 Move(scratch, -1.0f);
1799 // If fd =< -1 or unordered, then the conversion fails.
1800 BranchF32(&fail, &fail, le, fd, scratch);
1801 }
1802
1803 // Load 2^63 into scratch as its float representation.
1804 li(at, 0x5f000000);
1805 mtc1(at, scratch);
1806
1807 // Test if scratch > fd.
1808 // If fd < 2^63 we can convert it normally.
1809 BranchF32(&simple_convert, nullptr, lt, fd, scratch);
1810
1811 // First we subtract 2^63 from fd, then trunc it to rs
1812 // and add 2^63 to rs.
1813 sub_s(scratch, fd, scratch);
1814 trunc_l_s(scratch, scratch);
1815 dmfc1(rs, scratch);
1816 Or(rs, rs, Operand(1UL << 63));
1817 Branch(&done);
1818
1819 // Simple conversion.
1820 bind(&simple_convert);
1821 trunc_l_s(scratch, fd);
1822 dmfc1(rs, scratch);
1823
1824 bind(&done);
1825 if (result.is_valid()) {
1826 // Conversion is failed if the result is negative or unordered.
1827 addiu(at, zero_reg, -1);
1828 dsrl(at, at, 1); // Load 2^62.
1829 dmfc1(result, scratch);
1830 xor_(result, result, at);
1831 Slt(result, zero_reg, result);
1832 }
1833
1834 bind(&fail);
1835 }
1836
1837
Madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft,FPURegister scratch)1838 void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
1839 FPURegister ft, FPURegister scratch) {
1840 if (0) { // TODO(plind): find reasonable arch-variant symbol names.
1841 madd_d(fd, fr, fs, ft);
1842 } else {
1843 // Can not change source regs's value.
1844 DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
1845 mul_d(scratch, fs, ft);
1846 add_d(fd, fr, scratch);
1847 }
1848 }
1849
1850
BranchFCommon(SecondaryField sizeField,Label * target,Label * nan,Condition cond,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)1851 void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
1852 Label* nan, Condition cond, FPURegister cmp1,
1853 FPURegister cmp2, BranchDelaySlot bd) {
1854 BlockTrampolinePoolScope block_trampoline_pool(this);
1855 if (cond == al) {
1856 Branch(bd, target);
1857 return;
1858 }
1859
1860 if (kArchVariant == kMips64r6) {
1861 sizeField = sizeField == D ? L : W;
1862 }
1863
1864 DCHECK(nan || target);
1865 // Check for unordered (NaN) cases.
1866 if (nan) {
1867 bool long_branch = nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
1868 if (kArchVariant != kMips64r6) {
1869 if (long_branch) {
1870 Label skip;
1871 c(UN, sizeField, cmp1, cmp2);
1872 bc1f(&skip);
1873 nop();
1874 BranchLong(nan, bd);
1875 bind(&skip);
1876 } else {
1877 c(UN, sizeField, cmp1, cmp2);
1878 bc1t(nan);
1879 if (bd == PROTECT) {
1880 nop();
1881 }
1882 }
1883 } else {
1884 // Use kDoubleCompareReg for comparison result. It has to be unavailable
1885 // to lithium
1886 // register allocator.
1887 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1888 if (long_branch) {
1889 Label skip;
1890 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
1891 bc1eqz(&skip, kDoubleCompareReg);
1892 nop();
1893 BranchLong(nan, bd);
1894 bind(&skip);
1895 } else {
1896 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
1897 bc1nez(nan, kDoubleCompareReg);
1898 if (bd == PROTECT) {
1899 nop();
1900 }
1901 }
1902 }
1903 }
1904
1905 if (target) {
1906 bool long_branch =
1907 target->is_bound() ? is_near(target) : is_trampoline_emitted();
1908 if (long_branch) {
1909 Label skip;
1910 Condition neg_cond = NegateFpuCondition(cond);
1911 BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
1912 BranchLong(target, bd);
1913 bind(&skip);
1914 } else {
1915 BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
1916 }
1917 }
1918 }
1919
1920
BranchShortF(SecondaryField sizeField,Label * target,Condition cc,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)1921 void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
1922 Condition cc, FPURegister cmp1,
1923 FPURegister cmp2, BranchDelaySlot bd) {
1924 if (kArchVariant != kMips64r6) {
1925 BlockTrampolinePoolScope block_trampoline_pool(this);
1926 if (target) {
1927 // Here NaN cases were either handled by this function or are assumed to
1928 // have been handled by the caller.
1929 switch (cc) {
1930 case lt:
1931 c(OLT, sizeField, cmp1, cmp2);
1932 bc1t(target);
1933 break;
1934 case ult:
1935 c(ULT, sizeField, cmp1, cmp2);
1936 bc1t(target);
1937 break;
1938 case gt:
1939 c(ULE, sizeField, cmp1, cmp2);
1940 bc1f(target);
1941 break;
1942 case ugt:
1943 c(OLE, sizeField, cmp1, cmp2);
1944 bc1f(target);
1945 break;
1946 case ge:
1947 c(ULT, sizeField, cmp1, cmp2);
1948 bc1f(target);
1949 break;
1950 case uge:
1951 c(OLT, sizeField, cmp1, cmp2);
1952 bc1f(target);
1953 break;
1954 case le:
1955 c(OLE, sizeField, cmp1, cmp2);
1956 bc1t(target);
1957 break;
1958 case ule:
1959 c(ULE, sizeField, cmp1, cmp2);
1960 bc1t(target);
1961 break;
1962 case eq:
1963 c(EQ, sizeField, cmp1, cmp2);
1964 bc1t(target);
1965 break;
1966 case ueq:
1967 c(UEQ, sizeField, cmp1, cmp2);
1968 bc1t(target);
1969 break;
1970 case ne: // Unordered or not equal.
1971 c(EQ, sizeField, cmp1, cmp2);
1972 bc1f(target);
1973 break;
1974 case ogl:
1975 c(UEQ, sizeField, cmp1, cmp2);
1976 bc1f(target);
1977 break;
1978 default:
1979 CHECK(0);
1980 }
1981 }
1982 } else {
1983 BlockTrampolinePoolScope block_trampoline_pool(this);
1984 if (target) {
1985 // Here NaN cases were either handled by this function or are assumed to
1986 // have been handled by the caller.
1987 // Unsigned conditions are treated as their signed counterpart.
1988 // Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
1989 // 1) mode.
1990 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1991 switch (cc) {
1992 case lt:
1993 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1994 bc1nez(target, kDoubleCompareReg);
1995 break;
1996 case ult:
1997 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1998 bc1nez(target, kDoubleCompareReg);
1999 break;
2000 case gt:
2001 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2002 bc1eqz(target, kDoubleCompareReg);
2003 break;
2004 case ugt:
2005 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2006 bc1eqz(target, kDoubleCompareReg);
2007 break;
2008 case ge:
2009 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2010 bc1eqz(target, kDoubleCompareReg);
2011 break;
2012 case uge:
2013 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2014 bc1eqz(target, kDoubleCompareReg);
2015 break;
2016 case le:
2017 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2018 bc1nez(target, kDoubleCompareReg);
2019 break;
2020 case ule:
2021 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2022 bc1nez(target, kDoubleCompareReg);
2023 break;
2024 case eq:
2025 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2026 bc1nez(target, kDoubleCompareReg);
2027 break;
2028 case ueq:
2029 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2030 bc1nez(target, kDoubleCompareReg);
2031 break;
2032 case ne:
2033 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2034 bc1eqz(target, kDoubleCompareReg);
2035 break;
2036 case ogl:
2037 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2038 bc1eqz(target, kDoubleCompareReg);
2039 break;
2040 default:
2041 CHECK(0);
2042 }
2043 }
2044 }
2045
2046 if (bd == PROTECT) {
2047 nop();
2048 }
2049 }
2050
2051
FmoveLow(FPURegister dst,Register src_low)2052 void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
2053 DCHECK(!src_low.is(at));
2054 mfhc1(at, dst);
2055 mtc1(src_low, dst);
2056 mthc1(at, dst);
2057 }
2058
2059
Move(FPURegister dst,float imm)2060 void MacroAssembler::Move(FPURegister dst, float imm) {
2061 li(at, Operand(bit_cast<int32_t>(imm)));
2062 mtc1(at, dst);
2063 }
2064
2065
Move(FPURegister dst,double imm)2066 void MacroAssembler::Move(FPURegister dst, double imm) {
2067 static const DoubleRepresentation minus_zero(-0.0);
2068 static const DoubleRepresentation zero(0.0);
2069 DoubleRepresentation value_rep(imm);
2070 // Handle special values first.
2071 if (value_rep == zero && has_double_zero_reg_set_) {
2072 mov_d(dst, kDoubleRegZero);
2073 } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
2074 neg_d(dst, kDoubleRegZero);
2075 } else {
2076 uint32_t lo, hi;
2077 DoubleAsTwoUInt32(imm, &lo, &hi);
2078 // Move the low part of the double into the lower bits of the corresponding
2079 // FPU register.
2080 if (lo != 0) {
2081 if (!(lo & kImm16Mask)) {
2082 lui(at, (lo >> kLuiShift) & kImm16Mask);
2083 mtc1(at, dst);
2084 } else if (!(lo & kHiMask)) {
2085 ori(at, zero_reg, lo & kImm16Mask);
2086 mtc1(at, dst);
2087 } else {
2088 lui(at, (lo >> kLuiShift) & kImm16Mask);
2089 ori(at, at, lo & kImm16Mask);
2090 mtc1(at, dst);
2091 }
2092 } else {
2093 mtc1(zero_reg, dst);
2094 }
2095 // Move the high part of the double into the high bits of the corresponding
2096 // FPU register.
2097 if (hi != 0) {
2098 if (!(hi & kImm16Mask)) {
2099 lui(at, (hi >> kLuiShift) & kImm16Mask);
2100 mthc1(at, dst);
2101 } else if (!(hi & kHiMask)) {
2102 ori(at, zero_reg, hi & kImm16Mask);
2103 mthc1(at, dst);
2104 } else {
2105 lui(at, (hi >> kLuiShift) & kImm16Mask);
2106 ori(at, at, hi & kImm16Mask);
2107 mthc1(at, dst);
2108 }
2109 } else {
2110 mthc1(zero_reg, dst);
2111 }
2112 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
2113 }
2114 }
2115
2116
Movz(Register rd,Register rs,Register rt)2117 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
2118 if (kArchVariant == kMips64r6) {
2119 Label done;
2120 Branch(&done, ne, rt, Operand(zero_reg));
2121 mov(rd, rs);
2122 bind(&done);
2123 } else {
2124 movz(rd, rs, rt);
2125 }
2126 }
2127
2128
Movn(Register rd,Register rs,Register rt)2129 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
2130 if (kArchVariant == kMips64r6) {
2131 Label done;
2132 Branch(&done, eq, rt, Operand(zero_reg));
2133 mov(rd, rs);
2134 bind(&done);
2135 } else {
2136 movn(rd, rs, rt);
2137 }
2138 }
2139
2140
Movt(Register rd,Register rs,uint16_t cc)2141 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
2142 movt(rd, rs, cc);
2143 }
2144
2145
Movf(Register rd,Register rs,uint16_t cc)2146 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
2147 movf(rd, rs, cc);
2148 }
2149
2150
Clz(Register rd,Register rs)2151 void MacroAssembler::Clz(Register rd, Register rs) {
2152 clz(rd, rs);
2153 }
2154
2155
EmitFPUTruncate(FPURoundingMode rounding_mode,Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch,Register except_flag,CheckForInexactConversion check_inexact)2156 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
2157 Register result,
2158 DoubleRegister double_input,
2159 Register scratch,
2160 DoubleRegister double_scratch,
2161 Register except_flag,
2162 CheckForInexactConversion check_inexact) {
2163 DCHECK(!result.is(scratch));
2164 DCHECK(!double_input.is(double_scratch));
2165 DCHECK(!except_flag.is(scratch));
2166
2167 Label done;
2168
2169 // Clear the except flag (0 = no exception)
2170 mov(except_flag, zero_reg);
2171
2172 // Test for values that can be exactly represented as a signed 32-bit integer.
2173 cvt_w_d(double_scratch, double_input);
2174 mfc1(result, double_scratch);
2175 cvt_d_w(double_scratch, double_scratch);
2176 BranchF(&done, NULL, eq, double_input, double_scratch);
2177
2178 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
2179
2180 if (check_inexact == kDontCheckForInexactConversion) {
2181 // Ignore inexact exceptions.
2182 except_mask &= ~kFCSRInexactFlagMask;
2183 }
2184
2185 // Save FCSR.
2186 cfc1(scratch, FCSR);
2187 // Disable FPU exceptions.
2188 ctc1(zero_reg, FCSR);
2189
2190 // Do operation based on rounding mode.
2191 switch (rounding_mode) {
2192 case kRoundToNearest:
2193 Round_w_d(double_scratch, double_input);
2194 break;
2195 case kRoundToZero:
2196 Trunc_w_d(double_scratch, double_input);
2197 break;
2198 case kRoundToPlusInf:
2199 Ceil_w_d(double_scratch, double_input);
2200 break;
2201 case kRoundToMinusInf:
2202 Floor_w_d(double_scratch, double_input);
2203 break;
2204 } // End of switch-statement.
2205
2206 // Retrieve FCSR.
2207 cfc1(except_flag, FCSR);
2208 // Restore FCSR.
2209 ctc1(scratch, FCSR);
2210 // Move the converted value into the result register.
2211 mfc1(result, double_scratch);
2212
2213 // Check for fpu exceptions.
2214 And(except_flag, except_flag, Operand(except_mask));
2215
2216 bind(&done);
2217 }
2218
2219
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)2220 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2221 DoubleRegister double_input,
2222 Label* done) {
2223 DoubleRegister single_scratch = kLithiumScratchDouble.low();
2224 Register scratch = at;
2225 Register scratch2 = t9;
2226
2227 // Clear cumulative exception flags and save the FCSR.
2228 cfc1(scratch2, FCSR);
2229 ctc1(zero_reg, FCSR);
2230 // Try a conversion to a signed integer.
2231 trunc_w_d(single_scratch, double_input);
2232 mfc1(result, single_scratch);
2233 // Retrieve and restore the FCSR.
2234 cfc1(scratch, FCSR);
2235 ctc1(scratch2, FCSR);
2236 // Check for overflow and NaNs.
2237 And(scratch,
2238 scratch,
2239 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2240 // If we had no exceptions we are done.
2241 Branch(done, eq, scratch, Operand(zero_reg));
2242 }
2243
2244
TruncateDoubleToI(Register result,DoubleRegister double_input)2245 void MacroAssembler::TruncateDoubleToI(Register result,
2246 DoubleRegister double_input) {
2247 Label done;
2248
2249 TryInlineTruncateDoubleToI(result, double_input, &done);
2250
2251 // If we fell through then inline version didn't succeed - call stub instead.
2252 push(ra);
2253 Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2254 sdc1(double_input, MemOperand(sp, 0));
2255
2256 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2257 CallStub(&stub);
2258
2259 Daddu(sp, sp, Operand(kDoubleSize));
2260 pop(ra);
2261
2262 bind(&done);
2263 }
2264
2265
TruncateHeapNumberToI(Register result,Register object)2266 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2267 Label done;
2268 DoubleRegister double_scratch = f12;
2269 DCHECK(!result.is(object));
2270
2271 ldc1(double_scratch,
2272 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2273 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2274
2275 // If we fell through then inline version didn't succeed - call stub instead.
2276 push(ra);
2277 DoubleToIStub stub(isolate(),
2278 object,
2279 result,
2280 HeapNumber::kValueOffset - kHeapObjectTag,
2281 true,
2282 true);
2283 CallStub(&stub);
2284 pop(ra);
2285
2286 bind(&done);
2287 }
2288
2289
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch,Label * not_number)2290 void MacroAssembler::TruncateNumberToI(Register object,
2291 Register result,
2292 Register heap_number_map,
2293 Register scratch,
2294 Label* not_number) {
2295 Label done;
2296 DCHECK(!result.is(object));
2297
2298 UntagAndJumpIfSmi(result, object, &done);
2299 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
2300 TruncateHeapNumberToI(result, object);
2301
2302 bind(&done);
2303 }
2304
2305
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2306 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2307 Register src,
2308 int num_least_bits) {
2309 // Ext(dst, src, kSmiTagSize, num_least_bits);
2310 SmiUntag(dst, src);
2311 And(dst, dst, Operand((1 << num_least_bits) - 1));
2312 }
2313
2314
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2315 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2316 Register src,
2317 int num_least_bits) {
2318 DCHECK(!src.is(dst));
2319 And(dst, src, Operand((1 << num_least_bits) - 1));
2320 }
2321
2322
2323 // Emulated condtional branches do not emit a nop in the branch delay slot.
2324 //
2325 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
2326 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
2327 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
2328 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
2329
2330
Branch(int32_t offset,BranchDelaySlot bdslot)2331 void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
2332 DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset));
2333 BranchShort(offset, bdslot);
2334 }
2335
2336
Branch(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2337 void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
2338 const Operand& rt, BranchDelaySlot bdslot) {
2339 bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2340 DCHECK(is_near);
2341 USE(is_near);
2342 }
2343
2344
Branch(Label * L,BranchDelaySlot bdslot)2345 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
2346 if (L->is_bound()) {
2347 if (is_near_branch(L)) {
2348 BranchShort(L, bdslot);
2349 } else {
2350 BranchLong(L, bdslot);
2351 }
2352 } else {
2353 if (is_trampoline_emitted()) {
2354 BranchLong(L, bdslot);
2355 } else {
2356 BranchShort(L, bdslot);
2357 }
2358 }
2359 }
2360
2361
Branch(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2362 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
2363 const Operand& rt,
2364 BranchDelaySlot bdslot) {
2365 if (L->is_bound()) {
2366 if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
2367 if (cond != cc_always) {
2368 Label skip;
2369 Condition neg_cond = NegateCondition(cond);
2370 BranchShort(&skip, neg_cond, rs, rt);
2371 BranchLong(L, bdslot);
2372 bind(&skip);
2373 } else {
2374 BranchLong(L, bdslot);
2375 }
2376 }
2377 } else {
2378 if (is_trampoline_emitted()) {
2379 if (cond != cc_always) {
2380 Label skip;
2381 Condition neg_cond = NegateCondition(cond);
2382 BranchShort(&skip, neg_cond, rs, rt);
2383 BranchLong(L, bdslot);
2384 bind(&skip);
2385 } else {
2386 BranchLong(L, bdslot);
2387 }
2388 } else {
2389 BranchShort(L, cond, rs, rt, bdslot);
2390 }
2391 }
2392 }
2393
2394
Branch(Label * L,Condition cond,Register rs,Heap::RootListIndex index,BranchDelaySlot bdslot)2395 void MacroAssembler::Branch(Label* L,
2396 Condition cond,
2397 Register rs,
2398 Heap::RootListIndex index,
2399 BranchDelaySlot bdslot) {
2400 LoadRoot(at, index);
2401 Branch(L, cond, rs, Operand(at), bdslot);
2402 }
2403
2404
BranchShortHelper(int16_t offset,Label * L,BranchDelaySlot bdslot)2405 void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
2406 BranchDelaySlot bdslot) {
2407 DCHECK(L == nullptr || offset == 0);
2408 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2409 b(offset);
2410
2411 // Emit a nop in the branch delay slot if required.
2412 if (bdslot == PROTECT)
2413 nop();
2414 }
2415
2416
BranchShortHelperR6(int32_t offset,Label * L)2417 void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
2418 DCHECK(L == nullptr || offset == 0);
2419 offset = GetOffset(offset, L, OffsetSize::kOffset26);
2420 bc(offset);
2421 }
2422
2423
BranchShort(int32_t offset,BranchDelaySlot bdslot)2424 void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
2425 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2426 DCHECK(is_int26(offset));
2427 BranchShortHelperR6(offset, nullptr);
2428 } else {
2429 DCHECK(is_int16(offset));
2430 BranchShortHelper(offset, nullptr, bdslot);
2431 }
2432 }
2433
2434
BranchShort(Label * L,BranchDelaySlot bdslot)2435 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2436 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2437 BranchShortHelperR6(0, L);
2438 } else {
2439 BranchShortHelper(0, L, bdslot);
2440 }
2441 }
2442
2443
IsZero(const Operand & rt)2444 static inline bool IsZero(const Operand& rt) {
2445 if (rt.is_reg()) {
2446 return rt.rm().is(zero_reg);
2447 } else {
2448 return rt.immediate() == 0;
2449 }
2450 }
2451
2452
GetOffset(int32_t offset,Label * L,OffsetSize bits)2453 int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
2454 if (L) {
2455 offset = branch_offset_helper(L, bits) >> 2;
2456 } else {
2457 DCHECK(is_intn(offset, bits));
2458 }
2459 return offset;
2460 }
2461
2462
GetRtAsRegisterHelper(const Operand & rt,Register scratch)2463 Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
2464 Register scratch) {
2465 Register r2 = no_reg;
2466 if (rt.is_reg()) {
2467 r2 = rt.rm_;
2468 } else {
2469 r2 = scratch;
2470 li(r2, rt);
2471 }
2472
2473 return r2;
2474 }
2475
2476
BranchShortHelperR6(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt)2477 bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
2478 Condition cond, Register rs,
2479 const Operand& rt) {
2480 DCHECK(L == nullptr || offset == 0);
2481 Register scratch = rs.is(at) ? t8 : at;
2482 OffsetSize bits = OffsetSize::kOffset16;
2483
2484 // Be careful to always use shifted_branch_offset only just before the
2485 // branch instruction, as the location will be remember for patching the
2486 // target.
2487 {
2488 BlockTrampolinePoolScope block_trampoline_pool(this);
2489 switch (cond) {
2490 case cc_always:
2491 bits = OffsetSize::kOffset26;
2492 if (!is_near(L, bits)) return false;
2493 offset = GetOffset(offset, L, bits);
2494 bc(offset);
2495 break;
2496 case eq:
2497 if (rs.code() == rt.rm_.reg_code) {
2498 // Pre R6 beq is used here to make the code patchable. Otherwise bc
2499 // should be used which has no condition field so is not patchable.
2500 bits = OffsetSize::kOffset16;
2501 if (!is_near(L, bits)) return false;
2502 scratch = GetRtAsRegisterHelper(rt, scratch);
2503 offset = GetOffset(offset, L, bits);
2504 beq(rs, scratch, offset);
2505 nop();
2506 } else if (IsZero(rt)) {
2507 bits = OffsetSize::kOffset21;
2508 if (!is_near(L, bits)) return false;
2509 offset = GetOffset(offset, L, bits);
2510 beqzc(rs, offset);
2511 } else {
2512 // We don't want any other register but scratch clobbered.
2513 bits = OffsetSize::kOffset16;
2514 if (!is_near(L, bits)) return false;
2515 scratch = GetRtAsRegisterHelper(rt, scratch);
2516 offset = GetOffset(offset, L, bits);
2517 beqc(rs, scratch, offset);
2518 }
2519 break;
2520 case ne:
2521 if (rs.code() == rt.rm_.reg_code) {
2522 // Pre R6 bne is used here to make the code patchable. Otherwise we
2523 // should not generate any instruction.
2524 bits = OffsetSize::kOffset16;
2525 if (!is_near(L, bits)) return false;
2526 scratch = GetRtAsRegisterHelper(rt, scratch);
2527 offset = GetOffset(offset, L, bits);
2528 bne(rs, scratch, offset);
2529 nop();
2530 } else if (IsZero(rt)) {
2531 bits = OffsetSize::kOffset21;
2532 if (!is_near(L, bits)) return false;
2533 offset = GetOffset(offset, L, bits);
2534 bnezc(rs, offset);
2535 } else {
2536 // We don't want any other register but scratch clobbered.
2537 bits = OffsetSize::kOffset16;
2538 if (!is_near(L, bits)) return false;
2539 scratch = GetRtAsRegisterHelper(rt, scratch);
2540 offset = GetOffset(offset, L, bits);
2541 bnec(rs, scratch, offset);
2542 }
2543 break;
2544
2545 // Signed comparison.
2546 case greater:
2547 // rs > rt
2548 if (rs.code() == rt.rm_.reg_code) {
2549 break; // No code needs to be emitted.
2550 } else if (rs.is(zero_reg)) {
2551 bits = OffsetSize::kOffset16;
2552 if (!is_near(L, bits)) return false;
2553 scratch = GetRtAsRegisterHelper(rt, scratch);
2554 offset = GetOffset(offset, L, bits);
2555 bltzc(scratch, offset);
2556 } else if (IsZero(rt)) {
2557 bits = OffsetSize::kOffset16;
2558 if (!is_near(L, bits)) return false;
2559 offset = GetOffset(offset, L, bits);
2560 bgtzc(rs, offset);
2561 } else {
2562 bits = OffsetSize::kOffset16;
2563 if (!is_near(L, bits)) return false;
2564 scratch = GetRtAsRegisterHelper(rt, scratch);
2565 DCHECK(!rs.is(scratch));
2566 offset = GetOffset(offset, L, bits);
2567 bltc(scratch, rs, offset);
2568 }
2569 break;
2570 case greater_equal:
2571 // rs >= rt
2572 if (rs.code() == rt.rm_.reg_code) {
2573 bits = OffsetSize::kOffset26;
2574 if (!is_near(L, bits)) return false;
2575 offset = GetOffset(offset, L, bits);
2576 bc(offset);
2577 } else if (rs.is(zero_reg)) {
2578 bits = OffsetSize::kOffset16;
2579 if (!is_near(L, bits)) return false;
2580 scratch = GetRtAsRegisterHelper(rt, scratch);
2581 offset = GetOffset(offset, L, bits);
2582 blezc(scratch, offset);
2583 } else if (IsZero(rt)) {
2584 bits = OffsetSize::kOffset16;
2585 if (!is_near(L, bits)) return false;
2586 offset = GetOffset(offset, L, bits);
2587 bgezc(rs, offset);
2588 } else {
2589 bits = OffsetSize::kOffset16;
2590 if (!is_near(L, bits)) return false;
2591 scratch = GetRtAsRegisterHelper(rt, scratch);
2592 DCHECK(!rs.is(scratch));
2593 offset = GetOffset(offset, L, bits);
2594 bgec(rs, scratch, offset);
2595 }
2596 break;
2597 case less:
2598 // rs < rt
2599 if (rs.code() == rt.rm_.reg_code) {
2600 break; // No code needs to be emitted.
2601 } else if (rs.is(zero_reg)) {
2602 bits = OffsetSize::kOffset16;
2603 if (!is_near(L, bits)) return false;
2604 scratch = GetRtAsRegisterHelper(rt, scratch);
2605 offset = GetOffset(offset, L, bits);
2606 bgtzc(scratch, offset);
2607 } else if (IsZero(rt)) {
2608 bits = OffsetSize::kOffset16;
2609 if (!is_near(L, bits)) return false;
2610 offset = GetOffset(offset, L, bits);
2611 bltzc(rs, offset);
2612 } else {
2613 bits = OffsetSize::kOffset16;
2614 if (!is_near(L, bits)) return false;
2615 scratch = GetRtAsRegisterHelper(rt, scratch);
2616 DCHECK(!rs.is(scratch));
2617 offset = GetOffset(offset, L, bits);
2618 bltc(rs, scratch, offset);
2619 }
2620 break;
2621 case less_equal:
2622 // rs <= rt
2623 if (rs.code() == rt.rm_.reg_code) {
2624 bits = OffsetSize::kOffset26;
2625 if (!is_near(L, bits)) return false;
2626 offset = GetOffset(offset, L, bits);
2627 bc(offset);
2628 } else if (rs.is(zero_reg)) {
2629 bits = OffsetSize::kOffset16;
2630 if (!is_near(L, bits)) return false;
2631 scratch = GetRtAsRegisterHelper(rt, scratch);
2632 offset = GetOffset(offset, L, bits);
2633 bgezc(scratch, offset);
2634 } else if (IsZero(rt)) {
2635 bits = OffsetSize::kOffset16;
2636 if (!is_near(L, bits)) return false;
2637 offset = GetOffset(offset, L, bits);
2638 blezc(rs, offset);
2639 } else {
2640 bits = OffsetSize::kOffset16;
2641 if (!is_near(L, bits)) return false;
2642 scratch = GetRtAsRegisterHelper(rt, scratch);
2643 DCHECK(!rs.is(scratch));
2644 offset = GetOffset(offset, L, bits);
2645 bgec(scratch, rs, offset);
2646 }
2647 break;
2648
2649 // Unsigned comparison.
2650 case Ugreater:
2651 // rs > rt
2652 if (rs.code() == rt.rm_.reg_code) {
2653 break; // No code needs to be emitted.
2654 } else if (rs.is(zero_reg)) {
2655 bits = OffsetSize::kOffset21;
2656 if (!is_near(L, bits)) return false;
2657 scratch = GetRtAsRegisterHelper(rt, scratch);
2658 offset = GetOffset(offset, L, bits);
2659 bnezc(scratch, offset);
2660 } else if (IsZero(rt)) {
2661 bits = OffsetSize::kOffset21;
2662 if (!is_near(L, bits)) return false;
2663 offset = GetOffset(offset, L, bits);
2664 bnezc(rs, offset);
2665 } else {
2666 bits = OffsetSize::kOffset16;
2667 if (!is_near(L, bits)) return false;
2668 scratch = GetRtAsRegisterHelper(rt, scratch);
2669 DCHECK(!rs.is(scratch));
2670 offset = GetOffset(offset, L, bits);
2671 bltuc(scratch, rs, offset);
2672 }
2673 break;
2674 case Ugreater_equal:
2675 // rs >= rt
2676 if (rs.code() == rt.rm_.reg_code) {
2677 bits = OffsetSize::kOffset26;
2678 if (!is_near(L, bits)) return false;
2679 offset = GetOffset(offset, L, bits);
2680 bc(offset);
2681 } else if (rs.is(zero_reg)) {
2682 bits = OffsetSize::kOffset21;
2683 if (!is_near(L, bits)) return false;
2684 scratch = GetRtAsRegisterHelper(rt, scratch);
2685 offset = GetOffset(offset, L, bits);
2686 beqzc(scratch, offset);
2687 } else if (IsZero(rt)) {
2688 bits = OffsetSize::kOffset26;
2689 if (!is_near(L, bits)) return false;
2690 offset = GetOffset(offset, L, bits);
2691 bc(offset);
2692 } else {
2693 bits = OffsetSize::kOffset16;
2694 if (!is_near(L, bits)) return false;
2695 scratch = GetRtAsRegisterHelper(rt, scratch);
2696 DCHECK(!rs.is(scratch));
2697 offset = GetOffset(offset, L, bits);
2698 bgeuc(rs, scratch, offset);
2699 }
2700 break;
2701 case Uless:
2702 // rs < rt
2703 if (rs.code() == rt.rm_.reg_code) {
2704 break; // No code needs to be emitted.
2705 } else if (rs.is(zero_reg)) {
2706 bits = OffsetSize::kOffset21;
2707 if (!is_near(L, bits)) return false;
2708 scratch = GetRtAsRegisterHelper(rt, scratch);
2709 offset = GetOffset(offset, L, bits);
2710 bnezc(scratch, offset);
2711 } else if (IsZero(rt)) {
2712 break; // No code needs to be emitted.
2713 } else {
2714 bits = OffsetSize::kOffset16;
2715 if (!is_near(L, bits)) return false;
2716 scratch = GetRtAsRegisterHelper(rt, scratch);
2717 DCHECK(!rs.is(scratch));
2718 offset = GetOffset(offset, L, bits);
2719 bltuc(rs, scratch, offset);
2720 }
2721 break;
2722 case Uless_equal:
2723 // rs <= rt
2724 if (rs.code() == rt.rm_.reg_code) {
2725 bits = OffsetSize::kOffset26;
2726 if (!is_near(L, bits)) return false;
2727 offset = GetOffset(offset, L, bits);
2728 bc(offset);
2729 } else if (rs.is(zero_reg)) {
2730 bits = OffsetSize::kOffset26;
2731 if (!is_near(L, bits)) return false;
2732 scratch = GetRtAsRegisterHelper(rt, scratch);
2733 offset = GetOffset(offset, L, bits);
2734 bc(offset);
2735 } else if (IsZero(rt)) {
2736 bits = OffsetSize::kOffset21;
2737 if (!is_near(L, bits)) return false;
2738 offset = GetOffset(offset, L, bits);
2739 beqzc(rs, offset);
2740 } else {
2741 bits = OffsetSize::kOffset16;
2742 if (!is_near(L, bits)) return false;
2743 scratch = GetRtAsRegisterHelper(rt, scratch);
2744 DCHECK(!rs.is(scratch));
2745 offset = GetOffset(offset, L, bits);
2746 bgeuc(scratch, rs, offset);
2747 }
2748 break;
2749 default:
2750 UNREACHABLE();
2751 }
2752 }
2753 CheckTrampolinePoolQuick(1);
2754 return true;
2755 }
2756
2757
BranchShortHelper(int16_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2758 bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
2759 Register rs, const Operand& rt,
2760 BranchDelaySlot bdslot) {
2761 DCHECK(L == nullptr || offset == 0);
2762 if (!is_near(L, OffsetSize::kOffset16)) return false;
2763
2764 Register scratch = at;
2765 int32_t offset32;
2766
2767 // Be careful to always use shifted_branch_offset only just before the
2768 // branch instruction, as the location will be remember for patching the
2769 // target.
2770 {
2771 BlockTrampolinePoolScope block_trampoline_pool(this);
2772 switch (cond) {
2773 case cc_always:
2774 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2775 b(offset32);
2776 break;
2777 case eq:
2778 if (IsZero(rt)) {
2779 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2780 beq(rs, zero_reg, offset32);
2781 } else {
2782 // We don't want any other register but scratch clobbered.
2783 scratch = GetRtAsRegisterHelper(rt, scratch);
2784 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2785 beq(rs, scratch, offset32);
2786 }
2787 break;
2788 case ne:
2789 if (IsZero(rt)) {
2790 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2791 bne(rs, zero_reg, offset32);
2792 } else {
2793 // We don't want any other register but scratch clobbered.
2794 scratch = GetRtAsRegisterHelper(rt, scratch);
2795 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2796 bne(rs, scratch, offset32);
2797 }
2798 break;
2799
2800 // Signed comparison.
2801 case greater:
2802 if (IsZero(rt)) {
2803 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2804 bgtz(rs, offset32);
2805 } else {
2806 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2807 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2808 bne(scratch, zero_reg, offset32);
2809 }
2810 break;
2811 case greater_equal:
2812 if (IsZero(rt)) {
2813 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2814 bgez(rs, offset32);
2815 } else {
2816 Slt(scratch, rs, rt);
2817 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2818 beq(scratch, zero_reg, offset32);
2819 }
2820 break;
2821 case less:
2822 if (IsZero(rt)) {
2823 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2824 bltz(rs, offset32);
2825 } else {
2826 Slt(scratch, rs, rt);
2827 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2828 bne(scratch, zero_reg, offset32);
2829 }
2830 break;
2831 case less_equal:
2832 if (IsZero(rt)) {
2833 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2834 blez(rs, offset32);
2835 } else {
2836 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2837 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2838 beq(scratch, zero_reg, offset32);
2839 }
2840 break;
2841
2842 // Unsigned comparison.
2843 case Ugreater:
2844 if (IsZero(rt)) {
2845 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2846 bne(rs, zero_reg, offset32);
2847 } else {
2848 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2849 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2850 bne(scratch, zero_reg, offset32);
2851 }
2852 break;
2853 case Ugreater_equal:
2854 if (IsZero(rt)) {
2855 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2856 b(offset32);
2857 } else {
2858 Sltu(scratch, rs, rt);
2859 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2860 beq(scratch, zero_reg, offset32);
2861 }
2862 break;
2863 case Uless:
2864 if (IsZero(rt)) {
2865 return true; // No code needs to be emitted.
2866 } else {
2867 Sltu(scratch, rs, rt);
2868 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2869 bne(scratch, zero_reg, offset32);
2870 }
2871 break;
2872 case Uless_equal:
2873 if (IsZero(rt)) {
2874 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2875 beq(rs, zero_reg, offset32);
2876 } else {
2877 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2878 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2879 beq(scratch, zero_reg, offset32);
2880 }
2881 break;
2882 default:
2883 UNREACHABLE();
2884 }
2885 }
2886
2887 // Emit a nop in the branch delay slot if required.
2888 if (bdslot == PROTECT)
2889 nop();
2890
2891 return true;
2892 }
2893
2894
BranchShortCheck(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2895 bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
2896 Register rs, const Operand& rt,
2897 BranchDelaySlot bdslot) {
2898 BRANCH_ARGS_CHECK(cond, rs, rt);
2899
2900 if (!L) {
2901 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2902 DCHECK(is_int26(offset));
2903 return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
2904 } else {
2905 DCHECK(is_int16(offset));
2906 return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
2907 }
2908 } else {
2909 DCHECK(offset == 0);
2910 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2911 return BranchShortHelperR6(0, L, cond, rs, rt);
2912 } else {
2913 return BranchShortHelper(0, L, cond, rs, rt, bdslot);
2914 }
2915 }
2916 return false;
2917 }
2918
2919
BranchShort(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2920 void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
2921 const Operand& rt, BranchDelaySlot bdslot) {
2922 BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2923 }
2924
2925
BranchShort(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2926 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
2927 const Operand& rt, BranchDelaySlot bdslot) {
2928 BranchShortCheck(0, L, cond, rs, rt, bdslot);
2929 }
2930
2931
BranchAndLink(int32_t offset,BranchDelaySlot bdslot)2932 void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
2933 BranchAndLinkShort(offset, bdslot);
2934 }
2935
2936
BranchAndLink(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2937 void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
2938 const Operand& rt, BranchDelaySlot bdslot) {
2939 bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2940 DCHECK(is_near);
2941 USE(is_near);
2942 }
2943
2944
BranchAndLink(Label * L,BranchDelaySlot bdslot)2945 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2946 if (L->is_bound()) {
2947 if (is_near_branch(L)) {
2948 BranchAndLinkShort(L, bdslot);
2949 } else {
2950 BranchAndLinkLong(L, bdslot);
2951 }
2952 } else {
2953 if (is_trampoline_emitted()) {
2954 BranchAndLinkLong(L, bdslot);
2955 } else {
2956 BranchAndLinkShort(L, bdslot);
2957 }
2958 }
2959 }
2960
2961
BranchAndLink(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2962 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2963 const Operand& rt,
2964 BranchDelaySlot bdslot) {
2965 if (L->is_bound()) {
2966 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
2967 Label skip;
2968 Condition neg_cond = NegateCondition(cond);
2969 BranchShort(&skip, neg_cond, rs, rt);
2970 BranchAndLinkLong(L, bdslot);
2971 bind(&skip);
2972 }
2973 } else {
2974 if (is_trampoline_emitted()) {
2975 Label skip;
2976 Condition neg_cond = NegateCondition(cond);
2977 BranchShort(&skip, neg_cond, rs, rt);
2978 BranchAndLinkLong(L, bdslot);
2979 bind(&skip);
2980 } else {
2981 BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
2982 }
2983 }
2984 }
2985
2986
BranchAndLinkShortHelper(int16_t offset,Label * L,BranchDelaySlot bdslot)2987 void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
2988 BranchDelaySlot bdslot) {
2989 DCHECK(L == nullptr || offset == 0);
2990 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2991 bal(offset);
2992
2993 // Emit a nop in the branch delay slot if required.
2994 if (bdslot == PROTECT)
2995 nop();
2996 }
2997
2998
BranchAndLinkShortHelperR6(int32_t offset,Label * L)2999 void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
3000 DCHECK(L == nullptr || offset == 0);
3001 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3002 balc(offset);
3003 }
3004
3005
BranchAndLinkShort(int32_t offset,BranchDelaySlot bdslot)3006 void MacroAssembler::BranchAndLinkShort(int32_t offset,
3007 BranchDelaySlot bdslot) {
3008 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3009 DCHECK(is_int26(offset));
3010 BranchAndLinkShortHelperR6(offset, nullptr);
3011 } else {
3012 DCHECK(is_int16(offset));
3013 BranchAndLinkShortHelper(offset, nullptr, bdslot);
3014 }
3015 }
3016
3017
BranchAndLinkShort(Label * L,BranchDelaySlot bdslot)3018 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
3019 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3020 BranchAndLinkShortHelperR6(0, L);
3021 } else {
3022 BranchAndLinkShortHelper(0, L, bdslot);
3023 }
3024 }
3025
3026
BranchAndLinkShortHelperR6(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt)3027 bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
3028 Condition cond, Register rs,
3029 const Operand& rt) {
3030 DCHECK(L == nullptr || offset == 0);
3031 Register scratch = rs.is(at) ? t8 : at;
3032 OffsetSize bits = OffsetSize::kOffset16;
3033
3034 BlockTrampolinePoolScope block_trampoline_pool(this);
3035 DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3036 switch (cond) {
3037 case cc_always:
3038 bits = OffsetSize::kOffset26;
3039 if (!is_near(L, bits)) return false;
3040 offset = GetOffset(offset, L, bits);
3041 balc(offset);
3042 break;
3043 case eq:
3044 if (!is_near(L, bits)) return false;
3045 Subu(scratch, rs, rt);
3046 offset = GetOffset(offset, L, bits);
3047 beqzalc(scratch, offset);
3048 break;
3049 case ne:
3050 if (!is_near(L, bits)) return false;
3051 Subu(scratch, rs, rt);
3052 offset = GetOffset(offset, L, bits);
3053 bnezalc(scratch, offset);
3054 break;
3055
3056 // Signed comparison.
3057 case greater:
3058 // rs > rt
3059 if (rs.code() == rt.rm_.reg_code) {
3060 break; // No code needs to be emitted.
3061 } else if (rs.is(zero_reg)) {
3062 if (!is_near(L, bits)) return false;
3063 scratch = GetRtAsRegisterHelper(rt, scratch);
3064 offset = GetOffset(offset, L, bits);
3065 bltzalc(scratch, offset);
3066 } else if (IsZero(rt)) {
3067 if (!is_near(L, bits)) return false;
3068 offset = GetOffset(offset, L, bits);
3069 bgtzalc(rs, offset);
3070 } else {
3071 if (!is_near(L, bits)) return false;
3072 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3073 offset = GetOffset(offset, L, bits);
3074 bnezalc(scratch, offset);
3075 }
3076 break;
3077 case greater_equal:
3078 // rs >= rt
3079 if (rs.code() == rt.rm_.reg_code) {
3080 bits = OffsetSize::kOffset26;
3081 if (!is_near(L, bits)) return false;
3082 offset = GetOffset(offset, L, bits);
3083 balc(offset);
3084 } else if (rs.is(zero_reg)) {
3085 if (!is_near(L, bits)) return false;
3086 scratch = GetRtAsRegisterHelper(rt, scratch);
3087 offset = GetOffset(offset, L, bits);
3088 blezalc(scratch, offset);
3089 } else if (IsZero(rt)) {
3090 if (!is_near(L, bits)) return false;
3091 offset = GetOffset(offset, L, bits);
3092 bgezalc(rs, offset);
3093 } else {
3094 if (!is_near(L, bits)) return false;
3095 Slt(scratch, rs, rt);
3096 offset = GetOffset(offset, L, bits);
3097 beqzalc(scratch, offset);
3098 }
3099 break;
3100 case less:
3101 // rs < rt
3102 if (rs.code() == rt.rm_.reg_code) {
3103 break; // No code needs to be emitted.
3104 } else if (rs.is(zero_reg)) {
3105 if (!is_near(L, bits)) return false;
3106 scratch = GetRtAsRegisterHelper(rt, scratch);
3107 offset = GetOffset(offset, L, bits);
3108 bgtzalc(scratch, offset);
3109 } else if (IsZero(rt)) {
3110 if (!is_near(L, bits)) return false;
3111 offset = GetOffset(offset, L, bits);
3112 bltzalc(rs, offset);
3113 } else {
3114 if (!is_near(L, bits)) return false;
3115 Slt(scratch, rs, rt);
3116 offset = GetOffset(offset, L, bits);
3117 bnezalc(scratch, offset);
3118 }
3119 break;
3120 case less_equal:
3121 // rs <= r2
3122 if (rs.code() == rt.rm_.reg_code) {
3123 bits = OffsetSize::kOffset26;
3124 if (!is_near(L, bits)) return false;
3125 offset = GetOffset(offset, L, bits);
3126 balc(offset);
3127 } else if (rs.is(zero_reg)) {
3128 if (!is_near(L, bits)) return false;
3129 scratch = GetRtAsRegisterHelper(rt, scratch);
3130 offset = GetOffset(offset, L, bits);
3131 bgezalc(scratch, offset);
3132 } else if (IsZero(rt)) {
3133 if (!is_near(L, bits)) return false;
3134 offset = GetOffset(offset, L, bits);
3135 blezalc(rs, offset);
3136 } else {
3137 if (!is_near(L, bits)) return false;
3138 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3139 offset = GetOffset(offset, L, bits);
3140 beqzalc(scratch, offset);
3141 }
3142 break;
3143
3144
3145 // Unsigned comparison.
3146 case Ugreater:
3147 // rs > r2
3148 if (!is_near(L, bits)) return false;
3149 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3150 offset = GetOffset(offset, L, bits);
3151 bnezalc(scratch, offset);
3152 break;
3153 case Ugreater_equal:
3154 // rs >= r2
3155 if (!is_near(L, bits)) return false;
3156 Sltu(scratch, rs, rt);
3157 offset = GetOffset(offset, L, bits);
3158 beqzalc(scratch, offset);
3159 break;
3160 case Uless:
3161 // rs < r2
3162 if (!is_near(L, bits)) return false;
3163 Sltu(scratch, rs, rt);
3164 offset = GetOffset(offset, L, bits);
3165 bnezalc(scratch, offset);
3166 break;
3167 case Uless_equal:
3168 // rs <= r2
3169 if (!is_near(L, bits)) return false;
3170 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3171 offset = GetOffset(offset, L, bits);
3172 beqzalc(scratch, offset);
3173 break;
3174 default:
3175 UNREACHABLE();
3176 }
3177 return true;
3178 }
3179
3180
3181 // Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
3182 // with the slt instructions. We could use sub or add instead but we would miss
3183 // overflow cases, so we keep slt and add an intermediate third instruction.
BranchAndLinkShortHelper(int16_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3184 bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3185 Condition cond, Register rs,
3186 const Operand& rt,
3187 BranchDelaySlot bdslot) {
3188 DCHECK(L == nullptr || offset == 0);
3189 if (!is_near(L, OffsetSize::kOffset16)) return false;
3190
3191 Register scratch = t8;
3192 BlockTrampolinePoolScope block_trampoline_pool(this);
3193
3194 switch (cond) {
3195 case cc_always:
3196 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3197 bal(offset);
3198 break;
3199 case eq:
3200 bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3201 nop();
3202 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3203 bal(offset);
3204 break;
3205 case ne:
3206 beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3207 nop();
3208 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3209 bal(offset);
3210 break;
3211
3212 // Signed comparison.
3213 case greater:
3214 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3215 addiu(scratch, scratch, -1);
3216 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3217 bgezal(scratch, offset);
3218 break;
3219 case greater_equal:
3220 Slt(scratch, rs, rt);
3221 addiu(scratch, scratch, -1);
3222 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3223 bltzal(scratch, offset);
3224 break;
3225 case less:
3226 Slt(scratch, rs, rt);
3227 addiu(scratch, scratch, -1);
3228 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3229 bgezal(scratch, offset);
3230 break;
3231 case less_equal:
3232 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3233 addiu(scratch, scratch, -1);
3234 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3235 bltzal(scratch, offset);
3236 break;
3237
3238 // Unsigned comparison.
3239 case Ugreater:
3240 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3241 addiu(scratch, scratch, -1);
3242 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3243 bgezal(scratch, offset);
3244 break;
3245 case Ugreater_equal:
3246 Sltu(scratch, rs, rt);
3247 addiu(scratch, scratch, -1);
3248 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3249 bltzal(scratch, offset);
3250 break;
3251 case Uless:
3252 Sltu(scratch, rs, rt);
3253 addiu(scratch, scratch, -1);
3254 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3255 bgezal(scratch, offset);
3256 break;
3257 case Uless_equal:
3258 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3259 addiu(scratch, scratch, -1);
3260 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3261 bltzal(scratch, offset);
3262 break;
3263
3264 default:
3265 UNREACHABLE();
3266 }
3267
3268 // Emit a nop in the branch delay slot if required.
3269 if (bdslot == PROTECT)
3270 nop();
3271
3272 return true;
3273 }
3274
3275
BranchAndLinkShortCheck(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3276 bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
3277 Condition cond, Register rs,
3278 const Operand& rt,
3279 BranchDelaySlot bdslot) {
3280 BRANCH_ARGS_CHECK(cond, rs, rt);
3281
3282 if (!L) {
3283 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3284 DCHECK(is_int26(offset));
3285 return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
3286 } else {
3287 DCHECK(is_int16(offset));
3288 return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3289 }
3290 } else {
3291 DCHECK(offset == 0);
3292 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3293 return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
3294 } else {
3295 return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
3296 }
3297 }
3298 return false;
3299 }
3300
3301
Jump(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3302 void MacroAssembler::Jump(Register target,
3303 Condition cond,
3304 Register rs,
3305 const Operand& rt,
3306 BranchDelaySlot bd) {
3307 BlockTrampolinePoolScope block_trampoline_pool(this);
3308 if (cond == cc_always) {
3309 jr(target);
3310 } else {
3311 BRANCH_ARGS_CHECK(cond, rs, rt);
3312 Branch(2, NegateCondition(cond), rs, rt);
3313 jr(target);
3314 }
3315 // Emit a nop in the branch delay slot if required.
3316 if (bd == PROTECT)
3317 nop();
3318 }
3319
3320
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3321 void MacroAssembler::Jump(intptr_t target,
3322 RelocInfo::Mode rmode,
3323 Condition cond,
3324 Register rs,
3325 const Operand& rt,
3326 BranchDelaySlot bd) {
3327 Label skip;
3328 if (cond != cc_always) {
3329 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3330 }
3331 // The first instruction of 'li' may be placed in the delay slot.
3332 // This is not an issue, t9 is expected to be clobbered anyway.
3333 li(t9, Operand(target, rmode));
3334 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
3335 bind(&skip);
3336 }
3337
3338
Jump(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3339 void MacroAssembler::Jump(Address target,
3340 RelocInfo::Mode rmode,
3341 Condition cond,
3342 Register rs,
3343 const Operand& rt,
3344 BranchDelaySlot bd) {
3345 DCHECK(!RelocInfo::IsCodeTarget(rmode));
3346 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3347 }
3348
3349
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3350 void MacroAssembler::Jump(Handle<Code> code,
3351 RelocInfo::Mode rmode,
3352 Condition cond,
3353 Register rs,
3354 const Operand& rt,
3355 BranchDelaySlot bd) {
3356 DCHECK(RelocInfo::IsCodeTarget(rmode));
3357 AllowDeferredHandleDereference embedding_raw_address;
3358 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
3359 }
3360
3361
CallSize(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3362 int MacroAssembler::CallSize(Register target,
3363 Condition cond,
3364 Register rs,
3365 const Operand& rt,
3366 BranchDelaySlot bd) {
3367 int size = 0;
3368
3369 if (cond == cc_always) {
3370 size += 1;
3371 } else {
3372 size += 3;
3373 }
3374
3375 if (bd == PROTECT)
3376 size += 1;
3377
3378 return size * kInstrSize;
3379 }
3380
3381
3382 // Note: To call gcc-compiled C code on mips, you must call thru t9.
Call(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3383 void MacroAssembler::Call(Register target,
3384 Condition cond,
3385 Register rs,
3386 const Operand& rt,
3387 BranchDelaySlot bd) {
3388 #ifdef DEBUG
3389 int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
3390 #endif
3391
3392 BlockTrampolinePoolScope block_trampoline_pool(this);
3393 Label start;
3394 bind(&start);
3395 if (cond == cc_always) {
3396 jalr(target);
3397 } else {
3398 BRANCH_ARGS_CHECK(cond, rs, rt);
3399 Branch(2, NegateCondition(cond), rs, rt);
3400 jalr(target);
3401 }
3402 // Emit a nop in the branch delay slot if required.
3403 if (bd == PROTECT)
3404 nop();
3405
3406 #ifdef DEBUG
3407 CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
3408 SizeOfCodeGeneratedSince(&start));
3409 #endif
3410 }
3411
3412
CallSize(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3413 int MacroAssembler::CallSize(Address target,
3414 RelocInfo::Mode rmode,
3415 Condition cond,
3416 Register rs,
3417 const Operand& rt,
3418 BranchDelaySlot bd) {
3419 int size = CallSize(t9, cond, rs, rt, bd);
3420 return size + 4 * kInstrSize;
3421 }
3422
3423
Call(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3424 void MacroAssembler::Call(Address target,
3425 RelocInfo::Mode rmode,
3426 Condition cond,
3427 Register rs,
3428 const Operand& rt,
3429 BranchDelaySlot bd) {
3430 BlockTrampolinePoolScope block_trampoline_pool(this);
3431 Label start;
3432 bind(&start);
3433 int64_t target_int = reinterpret_cast<int64_t>(target);
3434 // Must record previous source positions before the
3435 // li() generates a new code target.
3436 positions_recorder()->WriteRecordedPositions();
3437 li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
3438 Call(t9, cond, rs, rt, bd);
3439 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3440 SizeOfCodeGeneratedSince(&start));
3441 }
3442
3443
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3444 int MacroAssembler::CallSize(Handle<Code> code,
3445 RelocInfo::Mode rmode,
3446 TypeFeedbackId ast_id,
3447 Condition cond,
3448 Register rs,
3449 const Operand& rt,
3450 BranchDelaySlot bd) {
3451 AllowDeferredHandleDereference using_raw_address;
3452 return CallSize(reinterpret_cast<Address>(code.location()),
3453 rmode, cond, rs, rt, bd);
3454 }
3455
3456
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3457 void MacroAssembler::Call(Handle<Code> code,
3458 RelocInfo::Mode rmode,
3459 TypeFeedbackId ast_id,
3460 Condition cond,
3461 Register rs,
3462 const Operand& rt,
3463 BranchDelaySlot bd) {
3464 BlockTrampolinePoolScope block_trampoline_pool(this);
3465 Label start;
3466 bind(&start);
3467 DCHECK(RelocInfo::IsCodeTarget(rmode));
3468 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3469 SetRecordedAstId(ast_id);
3470 rmode = RelocInfo::CODE_TARGET_WITH_ID;
3471 }
3472 AllowDeferredHandleDereference embedding_raw_address;
3473 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3474 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3475 SizeOfCodeGeneratedSince(&start));
3476 }
3477
3478
Ret(Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3479 void MacroAssembler::Ret(Condition cond,
3480 Register rs,
3481 const Operand& rt,
3482 BranchDelaySlot bd) {
3483 Jump(ra, cond, rs, rt, bd);
3484 }
3485
3486
BranchLong(Label * L,BranchDelaySlot bdslot)3487 void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
3488 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
3489 (!L->is_bound() || is_near_r6(L))) {
3490 BranchShortHelperR6(0, L);
3491 } else {
3492 EmitForbiddenSlotInstruction();
3493 BlockTrampolinePoolScope block_trampoline_pool(this);
3494 {
3495 BlockGrowBufferScope block_buf_growth(this);
3496 // Buffer growth (and relocation) must be blocked for internal references
3497 // until associated instructions are emitted and available to be patched.
3498 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3499 j(L);
3500 }
3501 // Emit a nop in the branch delay slot if required.
3502 if (bdslot == PROTECT) nop();
3503 }
3504 }
3505
3506
BranchAndLinkLong(Label * L,BranchDelaySlot bdslot)3507 void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
3508 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
3509 (!L->is_bound() || is_near_r6(L))) {
3510 BranchAndLinkShortHelperR6(0, L);
3511 } else {
3512 EmitForbiddenSlotInstruction();
3513 BlockTrampolinePoolScope block_trampoline_pool(this);
3514 {
3515 BlockGrowBufferScope block_buf_growth(this);
3516 // Buffer growth (and relocation) must be blocked for internal references
3517 // until associated instructions are emitted and available to be patched.
3518 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3519 jal(L);
3520 }
3521 // Emit a nop in the branch delay slot if required.
3522 if (bdslot == PROTECT) nop();
3523 }
3524 }
3525
3526
Jr(Label * L,BranchDelaySlot bdslot)3527 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
3528 BlockTrampolinePoolScope block_trampoline_pool(this);
3529
3530 uint64_t imm64;
3531 imm64 = jump_address(L);
3532 { BlockGrowBufferScope block_buf_growth(this);
3533 // Buffer growth (and relocation) must be blocked for internal references
3534 // until associated instructions are emitted and available to be patched.
3535 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3536 li(at, Operand(imm64), ADDRESS_LOAD);
3537 }
3538 jr(at);
3539
3540 // Emit a nop in the branch delay slot if required.
3541 if (bdslot == PROTECT)
3542 nop();
3543 }
3544
3545
Jalr(Label * L,BranchDelaySlot bdslot)3546 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
3547 BlockTrampolinePoolScope block_trampoline_pool(this);
3548
3549 uint64_t imm64;
3550 imm64 = jump_address(L);
3551 { BlockGrowBufferScope block_buf_growth(this);
3552 // Buffer growth (and relocation) must be blocked for internal references
3553 // until associated instructions are emitted and available to be patched.
3554 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3555 li(at, Operand(imm64), ADDRESS_LOAD);
3556 }
3557 jalr(at);
3558
3559 // Emit a nop in the branch delay slot if required.
3560 if (bdslot == PROTECT)
3561 nop();
3562 }
3563
3564
DropAndRet(int drop)3565 void MacroAssembler::DropAndRet(int drop) {
3566 DCHECK(is_int16(drop * kPointerSize));
3567 Ret(USE_DELAY_SLOT);
3568 daddiu(sp, sp, drop * kPointerSize);
3569 }
3570
DropAndRet(int drop,Condition cond,Register r1,const Operand & r2)3571 void MacroAssembler::DropAndRet(int drop,
3572 Condition cond,
3573 Register r1,
3574 const Operand& r2) {
3575 // Both Drop and Ret need to be conditional.
3576 Label skip;
3577 if (cond != cc_always) {
3578 Branch(&skip, NegateCondition(cond), r1, r2);
3579 }
3580
3581 Drop(drop);
3582 Ret();
3583
3584 if (cond != cc_always) {
3585 bind(&skip);
3586 }
3587 }
3588
3589
Drop(int count,Condition cond,Register reg,const Operand & op)3590 void MacroAssembler::Drop(int count,
3591 Condition cond,
3592 Register reg,
3593 const Operand& op) {
3594 if (count <= 0) {
3595 return;
3596 }
3597
3598 Label skip;
3599
3600 if (cond != al) {
3601 Branch(&skip, NegateCondition(cond), reg, op);
3602 }
3603
3604 Daddu(sp, sp, Operand(count * kPointerSize));
3605
3606 if (cond != al) {
3607 bind(&skip);
3608 }
3609 }
3610
3611
3612
Swap(Register reg1,Register reg2,Register scratch)3613 void MacroAssembler::Swap(Register reg1,
3614 Register reg2,
3615 Register scratch) {
3616 if (scratch.is(no_reg)) {
3617 Xor(reg1, reg1, Operand(reg2));
3618 Xor(reg2, reg2, Operand(reg1));
3619 Xor(reg1, reg1, Operand(reg2));
3620 } else {
3621 mov(scratch, reg1);
3622 mov(reg1, reg2);
3623 mov(reg2, scratch);
3624 }
3625 }
3626
3627
Call(Label * target)3628 void MacroAssembler::Call(Label* target) {
3629 BranchAndLink(target);
3630 }
3631
3632
Push(Handle<Object> handle)3633 void MacroAssembler::Push(Handle<Object> handle) {
3634 li(at, Operand(handle));
3635 push(at);
3636 }
3637
3638
PushRegisterAsTwoSmis(Register src,Register scratch)3639 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
3640 DCHECK(!src.is(scratch));
3641 mov(scratch, src);
3642 dsrl32(src, src, 0);
3643 dsll32(src, src, 0);
3644 push(src);
3645 dsll32(scratch, scratch, 0);
3646 push(scratch);
3647 }
3648
3649
PopRegisterAsTwoSmis(Register dst,Register scratch)3650 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
3651 DCHECK(!dst.is(scratch));
3652 pop(scratch);
3653 dsrl32(scratch, scratch, 0);
3654 pop(dst);
3655 dsrl32(dst, dst, 0);
3656 dsll32(dst, dst, 0);
3657 or_(dst, dst, scratch);
3658 }
3659
3660
DebugBreak()3661 void MacroAssembler::DebugBreak() {
3662 PrepareCEntryArgs(0);
3663 PrepareCEntryFunction(
3664 ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
3665 CEntryStub ces(isolate(), 1);
3666 DCHECK(AllowThisStubCall(&ces));
3667 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
3668 }
3669
3670
3671 // ---------------------------------------------------------------------------
3672 // Exception handling.
3673
PushStackHandler()3674 void MacroAssembler::PushStackHandler() {
3675 // Adjust this code if not the case.
3676 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3677 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3678
3679 // Link the current handler as the next handler.
3680 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3681 ld(a5, MemOperand(a6));
3682 push(a5);
3683
3684 // Set this new handler as the current one.
3685 sd(sp, MemOperand(a6));
3686 }
3687
3688
PopStackHandler()3689 void MacroAssembler::PopStackHandler() {
3690 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3691 pop(a1);
3692 Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
3693 kPointerSize)));
3694 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3695 sd(a1, MemOperand(at));
3696 }
3697
3698
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)3699 void MacroAssembler::Allocate(int object_size,
3700 Register result,
3701 Register scratch1,
3702 Register scratch2,
3703 Label* gc_required,
3704 AllocationFlags flags) {
3705 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3706 if (!FLAG_inline_new) {
3707 if (emit_debug_code()) {
3708 // Trash the registers to simulate an allocation failure.
3709 li(result, 0x7091);
3710 li(scratch1, 0x7191);
3711 li(scratch2, 0x7291);
3712 }
3713 jmp(gc_required);
3714 return;
3715 }
3716
3717 DCHECK(!AreAliased(result, scratch1, scratch2, t9));
3718
3719 // Make object size into bytes.
3720 if ((flags & SIZE_IN_WORDS) != 0) {
3721 object_size *= kPointerSize;
3722 }
3723 DCHECK(0 == (object_size & kObjectAlignmentMask));
3724
3725 // Check relative positions of allocation top and limit addresses.
3726 // ARM adds additional checks to make sure the ldm instruction can be
3727 // used. On MIPS we don't have ldm so we don't need additional checks either.
3728 ExternalReference allocation_top =
3729 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3730 ExternalReference allocation_limit =
3731 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3732
3733 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
3734 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
3735 DCHECK((limit - top) == kPointerSize);
3736
3737 // Set up allocation top address and allocation limit registers.
3738 Register top_address = scratch1;
3739 // This code stores a temporary value in t9.
3740 Register alloc_limit = t9;
3741 Register result_end = scratch2;
3742 li(top_address, Operand(allocation_top));
3743
3744 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3745 // Load allocation top into result and allocation limit into alloc_limit.
3746 ld(result, MemOperand(top_address));
3747 ld(alloc_limit, MemOperand(top_address, kPointerSize));
3748 } else {
3749 if (emit_debug_code()) {
3750 // Assert that result actually contains top on entry.
3751 ld(alloc_limit, MemOperand(top_address));
3752 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
3753 }
3754 // Load allocation limit. Result already contains allocation top.
3755 ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
3756 }
3757
3758 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3759 // the same alignment on ARM64.
3760 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3761
3762 if (emit_debug_code()) {
3763 And(at, result, Operand(kDoubleAlignmentMask));
3764 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3765 }
3766
3767 // Calculate new top and bail out if new space is exhausted. Use result
3768 // to calculate the new top.
3769 Daddu(result_end, result, Operand(object_size));
3770 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
3771 sd(result_end, MemOperand(top_address));
3772
3773 // Tag object if requested.
3774 if ((flags & TAG_OBJECT) != 0) {
3775 Daddu(result, result, Operand(kHeapObjectTag));
3776 }
3777 }
3778
3779
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)3780 void MacroAssembler::Allocate(Register object_size, Register result,
3781 Register result_end, Register scratch,
3782 Label* gc_required, AllocationFlags flags) {
3783 if (!FLAG_inline_new) {
3784 if (emit_debug_code()) {
3785 // Trash the registers to simulate an allocation failure.
3786 li(result, 0x7091);
3787 li(scratch, 0x7191);
3788 li(result_end, 0x7291);
3789 }
3790 jmp(gc_required);
3791 return;
3792 }
3793
3794 // |object_size| and |result_end| may overlap, other registers must not.
3795 DCHECK(!AreAliased(object_size, result, scratch, t9));
3796 DCHECK(!AreAliased(result_end, result, scratch, t9));
3797
3798 // Check relative positions of allocation top and limit addresses.
3799 // ARM adds additional checks to make sure the ldm instruction can be
3800 // used. On MIPS we don't have ldm so we don't need additional checks either.
3801 ExternalReference allocation_top =
3802 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3803 ExternalReference allocation_limit =
3804 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3805 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
3806 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
3807 DCHECK((limit - top) == kPointerSize);
3808
3809 // Set up allocation top address and object size registers.
3810 Register top_address = scratch;
3811 // This code stores a temporary value in t9.
3812 Register alloc_limit = t9;
3813 li(top_address, Operand(allocation_top));
3814
3815 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3816 // Load allocation top into result and allocation limit into alloc_limit.
3817 ld(result, MemOperand(top_address));
3818 ld(alloc_limit, MemOperand(top_address, kPointerSize));
3819 } else {
3820 if (emit_debug_code()) {
3821 // Assert that result actually contains top on entry.
3822 ld(alloc_limit, MemOperand(top_address));
3823 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
3824 }
3825 // Load allocation limit. Result already contains allocation top.
3826 ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
3827 }
3828
3829 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3830 // the same alignment on ARM64.
3831 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3832
3833 if (emit_debug_code()) {
3834 And(at, result, Operand(kDoubleAlignmentMask));
3835 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3836 }
3837
3838 // Calculate new top and bail out if new space is exhausted. Use result
3839 // to calculate the new top. Object size may be in words so a shift is
3840 // required to get the number of bytes.
3841 if ((flags & SIZE_IN_WORDS) != 0) {
3842 dsll(result_end, object_size, kPointerSizeLog2);
3843 Daddu(result_end, result, result_end);
3844 } else {
3845 Daddu(result_end, result, Operand(object_size));
3846 }
3847 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
3848
3849 // Update allocation top. result temporarily holds the new top.
3850 if (emit_debug_code()) {
3851 And(at, result_end, Operand(kObjectAlignmentMask));
3852 Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
3853 }
3854 sd(result_end, MemOperand(top_address));
3855
3856 // Tag object if requested.
3857 if ((flags & TAG_OBJECT) != 0) {
3858 Daddu(result, result, Operand(kHeapObjectTag));
3859 }
3860 }
3861
3862
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)3863 void MacroAssembler::AllocateTwoByteString(Register result,
3864 Register length,
3865 Register scratch1,
3866 Register scratch2,
3867 Register scratch3,
3868 Label* gc_required) {
3869 // Calculate the number of bytes needed for the characters in the string while
3870 // observing object alignment.
3871 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3872 dsll(scratch1, length, 1); // Length in bytes, not chars.
3873 daddiu(scratch1, scratch1,
3874 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3875 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3876
3877 // Allocate two-byte string in new space.
3878 Allocate(scratch1,
3879 result,
3880 scratch2,
3881 scratch3,
3882 gc_required,
3883 TAG_OBJECT);
3884
3885 // Set the map, length and hash field.
3886 InitializeNewString(result,
3887 length,
3888 Heap::kStringMapRootIndex,
3889 scratch1,
3890 scratch2);
3891 }
3892
3893
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)3894 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3895 Register scratch1, Register scratch2,
3896 Register scratch3,
3897 Label* gc_required) {
3898 // Calculate the number of bytes needed for the characters in the string
3899 // while observing object alignment.
3900 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3901 DCHECK(kCharSize == 1);
3902 daddiu(scratch1, length,
3903 kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3904 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3905
3906 // Allocate one-byte string in new space.
3907 Allocate(scratch1,
3908 result,
3909 scratch2,
3910 scratch3,
3911 gc_required,
3912 TAG_OBJECT);
3913
3914 // Set the map, length and hash field.
3915 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3916 scratch1, scratch2);
3917 }
3918
3919
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3920 void MacroAssembler::AllocateTwoByteConsString(Register result,
3921 Register length,
3922 Register scratch1,
3923 Register scratch2,
3924 Label* gc_required) {
3925 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3926 TAG_OBJECT);
3927 InitializeNewString(result,
3928 length,
3929 Heap::kConsStringMapRootIndex,
3930 scratch1,
3931 scratch2);
3932 }
3933
3934
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3935 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3936 Register scratch1,
3937 Register scratch2,
3938 Label* gc_required) {
3939 Allocate(ConsString::kSize,
3940 result,
3941 scratch1,
3942 scratch2,
3943 gc_required,
3944 TAG_OBJECT);
3945
3946 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3947 scratch1, scratch2);
3948 }
3949
3950
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3951 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3952 Register length,
3953 Register scratch1,
3954 Register scratch2,
3955 Label* gc_required) {
3956 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3957 TAG_OBJECT);
3958
3959 InitializeNewString(result,
3960 length,
3961 Heap::kSlicedStringMapRootIndex,
3962 scratch1,
3963 scratch2);
3964 }
3965
3966
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3967 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3968 Register length,
3969 Register scratch1,
3970 Register scratch2,
3971 Label* gc_required) {
3972 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3973 TAG_OBJECT);
3974
3975 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3976 scratch1, scratch2);
3977 }
3978
3979
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)3980 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3981 Label* not_unique_name) {
3982 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3983 Label succeed;
3984 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3985 Branch(&succeed, eq, at, Operand(zero_reg));
3986 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3987
3988 bind(&succeed);
3989 }
3990
3991
3992 // Allocates a heap number or jumps to the label if the young space is full and
3993 // a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * need_gc,TaggingMode tagging_mode,MutableMode mode)3994 void MacroAssembler::AllocateHeapNumber(Register result,
3995 Register scratch1,
3996 Register scratch2,
3997 Register heap_number_map,
3998 Label* need_gc,
3999 TaggingMode tagging_mode,
4000 MutableMode mode) {
4001 // Allocate an object in the heap for the heap number and tag it as a heap
4002 // object.
4003 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
4004 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
4005
4006 Heap::RootListIndex map_index = mode == MUTABLE
4007 ? Heap::kMutableHeapNumberMapRootIndex
4008 : Heap::kHeapNumberMapRootIndex;
4009 AssertIsRoot(heap_number_map, map_index);
4010
4011 // Store heap number map in the allocated object.
4012 if (tagging_mode == TAG_RESULT) {
4013 sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
4014 } else {
4015 sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
4016 }
4017 }
4018
4019
AllocateHeapNumberWithValue(Register result,FPURegister value,Register scratch1,Register scratch2,Label * gc_required)4020 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
4021 FPURegister value,
4022 Register scratch1,
4023 Register scratch2,
4024 Label* gc_required) {
4025 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
4026 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
4027 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
4028 }
4029
4030
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)4031 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
4032 Register value, Register scratch1,
4033 Register scratch2, Label* gc_required) {
4034 DCHECK(!result.is(constructor));
4035 DCHECK(!result.is(scratch1));
4036 DCHECK(!result.is(scratch2));
4037 DCHECK(!result.is(value));
4038
4039 // Allocate JSValue in new space.
4040 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
4041
4042 // Initialize the JSValue.
4043 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
4044 sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
4045 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
4046 sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
4047 sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
4048 sd(value, FieldMemOperand(result, JSValue::kValueOffset));
4049 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
4050 }
4051
4052
CopyBytes(Register src,Register dst,Register length,Register scratch)4053 void MacroAssembler::CopyBytes(Register src,
4054 Register dst,
4055 Register length,
4056 Register scratch) {
4057 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
4058
4059 // Align src before copying in word size chunks.
4060 Branch(&byte_loop, le, length, Operand(kPointerSize));
4061 bind(&align_loop_1);
4062 And(scratch, src, kPointerSize - 1);
4063 Branch(&word_loop, eq, scratch, Operand(zero_reg));
4064 lbu(scratch, MemOperand(src));
4065 Daddu(src, src, 1);
4066 sb(scratch, MemOperand(dst));
4067 Daddu(dst, dst, 1);
4068 Dsubu(length, length, Operand(1));
4069 Branch(&align_loop_1, ne, length, Operand(zero_reg));
4070
4071 // Copy bytes in word size chunks.
4072 bind(&word_loop);
4073 if (emit_debug_code()) {
4074 And(scratch, src, kPointerSize - 1);
4075 Assert(eq, kExpectingAlignmentForCopyBytes,
4076 scratch, Operand(zero_reg));
4077 }
4078 Branch(&byte_loop, lt, length, Operand(kPointerSize));
4079 ld(scratch, MemOperand(src));
4080 Daddu(src, src, kPointerSize);
4081
4082 // TODO(kalmard) check if this can be optimized to use sw in most cases.
4083 // Can't use unaligned access - copy byte by byte.
4084 if (kArchEndian == kLittle) {
4085 sb(scratch, MemOperand(dst, 0));
4086 dsrl(scratch, scratch, 8);
4087 sb(scratch, MemOperand(dst, 1));
4088 dsrl(scratch, scratch, 8);
4089 sb(scratch, MemOperand(dst, 2));
4090 dsrl(scratch, scratch, 8);
4091 sb(scratch, MemOperand(dst, 3));
4092 dsrl(scratch, scratch, 8);
4093 sb(scratch, MemOperand(dst, 4));
4094 dsrl(scratch, scratch, 8);
4095 sb(scratch, MemOperand(dst, 5));
4096 dsrl(scratch, scratch, 8);
4097 sb(scratch, MemOperand(dst, 6));
4098 dsrl(scratch, scratch, 8);
4099 sb(scratch, MemOperand(dst, 7));
4100 } else {
4101 sb(scratch, MemOperand(dst, 7));
4102 dsrl(scratch, scratch, 8);
4103 sb(scratch, MemOperand(dst, 6));
4104 dsrl(scratch, scratch, 8);
4105 sb(scratch, MemOperand(dst, 5));
4106 dsrl(scratch, scratch, 8);
4107 sb(scratch, MemOperand(dst, 4));
4108 dsrl(scratch, scratch, 8);
4109 sb(scratch, MemOperand(dst, 3));
4110 dsrl(scratch, scratch, 8);
4111 sb(scratch, MemOperand(dst, 2));
4112 dsrl(scratch, scratch, 8);
4113 sb(scratch, MemOperand(dst, 1));
4114 dsrl(scratch, scratch, 8);
4115 sb(scratch, MemOperand(dst, 0));
4116 }
4117 Daddu(dst, dst, 8);
4118
4119 Dsubu(length, length, Operand(kPointerSize));
4120 Branch(&word_loop);
4121
4122 // Copy the last bytes if any left.
4123 bind(&byte_loop);
4124 Branch(&done, eq, length, Operand(zero_reg));
4125 bind(&byte_loop_1);
4126 lbu(scratch, MemOperand(src));
4127 Daddu(src, src, 1);
4128 sb(scratch, MemOperand(dst));
4129 Daddu(dst, dst, 1);
4130 Dsubu(length, length, Operand(1));
4131 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
4132 bind(&done);
4133 }
4134
4135
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)4136 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
4137 Register end_address,
4138 Register filler) {
4139 Label loop, entry;
4140 Branch(&entry);
4141 bind(&loop);
4142 sd(filler, MemOperand(current_address));
4143 Daddu(current_address, current_address, kPointerSize);
4144 bind(&entry);
4145 Branch(&loop, ult, current_address, Operand(end_address));
4146 }
4147
4148
CheckFastElements(Register map,Register scratch,Label * fail)4149 void MacroAssembler::CheckFastElements(Register map,
4150 Register scratch,
4151 Label* fail) {
4152 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4153 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4154 STATIC_ASSERT(FAST_ELEMENTS == 2);
4155 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4156 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4157 Branch(fail, hi, scratch,
4158 Operand(Map::kMaximumBitField2FastHoleyElementValue));
4159 }
4160
4161
CheckFastObjectElements(Register map,Register scratch,Label * fail)4162 void MacroAssembler::CheckFastObjectElements(Register map,
4163 Register scratch,
4164 Label* fail) {
4165 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4166 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4167 STATIC_ASSERT(FAST_ELEMENTS == 2);
4168 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4169 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4170 Branch(fail, ls, scratch,
4171 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4172 Branch(fail, hi, scratch,
4173 Operand(Map::kMaximumBitField2FastHoleyElementValue));
4174 }
4175
4176
CheckFastSmiElements(Register map,Register scratch,Label * fail)4177 void MacroAssembler::CheckFastSmiElements(Register map,
4178 Register scratch,
4179 Label* fail) {
4180 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4181 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4182 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4183 Branch(fail, hi, scratch,
4184 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4185 }
4186
4187
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,Register scratch2,Label * fail,int elements_offset)4188 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
4189 Register key_reg,
4190 Register elements_reg,
4191 Register scratch1,
4192 Register scratch2,
4193 Label* fail,
4194 int elements_offset) {
4195 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2));
4196 Label smi_value, done;
4197
4198 // Handle smi values specially.
4199 JumpIfSmi(value_reg, &smi_value);
4200
4201 // Ensure that the object is a heap number.
4202 CheckMap(value_reg,
4203 scratch1,
4204 Heap::kHeapNumberMapRootIndex,
4205 fail,
4206 DONT_DO_SMI_CHECK);
4207
4208 // Double value, turn potential sNaN into qNan.
4209 DoubleRegister double_result = f0;
4210 DoubleRegister double_scratch = f2;
4211
4212 ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
4213 Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
4214 FPUCanonicalizeNaN(double_result, double_result);
4215
4216 bind(&smi_value);
4217 // Untag and transfer.
4218 dsrl32(scratch1, value_reg, 0);
4219 mtc1(scratch1, double_scratch);
4220 cvt_d_w(double_result, double_scratch);
4221
4222 bind(&done);
4223 Daddu(scratch1, elements_reg,
4224 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
4225 elements_offset));
4226 dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
4227 Daddu(scratch1, scratch1, scratch2);
4228 // scratch1 is now effective address of the double element.
4229 sdc1(double_result, MemOperand(scratch1, 0));
4230 }
4231
4232
CompareMapAndBranch(Register obj,Register scratch,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)4233 void MacroAssembler::CompareMapAndBranch(Register obj,
4234 Register scratch,
4235 Handle<Map> map,
4236 Label* early_success,
4237 Condition cond,
4238 Label* branch_to) {
4239 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4240 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
4241 }
4242
4243
CompareMapAndBranch(Register obj_map,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)4244 void MacroAssembler::CompareMapAndBranch(Register obj_map,
4245 Handle<Map> map,
4246 Label* early_success,
4247 Condition cond,
4248 Label* branch_to) {
4249 Branch(branch_to, cond, obj_map, Operand(map));
4250 }
4251
4252
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)4253 void MacroAssembler::CheckMap(Register obj,
4254 Register scratch,
4255 Handle<Map> map,
4256 Label* fail,
4257 SmiCheckType smi_check_type) {
4258 if (smi_check_type == DO_SMI_CHECK) {
4259 JumpIfSmi(obj, fail);
4260 }
4261 Label success;
4262 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
4263 bind(&success);
4264 }
4265
4266
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)4267 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
4268 Register scratch2, Handle<WeakCell> cell,
4269 Handle<Code> success,
4270 SmiCheckType smi_check_type) {
4271 Label fail;
4272 if (smi_check_type == DO_SMI_CHECK) {
4273 JumpIfSmi(obj, &fail);
4274 }
4275 ld(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
4276 GetWeakValue(scratch2, cell);
4277 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
4278 bind(&fail);
4279 }
4280
4281
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)4282 void MacroAssembler::CheckMap(Register obj,
4283 Register scratch,
4284 Heap::RootListIndex index,
4285 Label* fail,
4286 SmiCheckType smi_check_type) {
4287 if (smi_check_type == DO_SMI_CHECK) {
4288 JumpIfSmi(obj, fail);
4289 }
4290 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4291 LoadRoot(at, index);
4292 Branch(fail, ne, scratch, Operand(at));
4293 }
4294
4295
GetWeakValue(Register value,Handle<WeakCell> cell)4296 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
4297 li(value, Operand(cell));
4298 ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
4299 }
4300
FPUCanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)4301 void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
4302 const DoubleRegister src) {
4303 sub_d(dst, src, kDoubleRegZero);
4304 }
4305
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)4306 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
4307 Label* miss) {
4308 GetWeakValue(value, cell);
4309 JumpIfSmi(value, miss);
4310 }
4311
4312
MovFromFloatResult(const DoubleRegister dst)4313 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
4314 if (IsMipsSoftFloatABI) {
4315 if (kArchEndian == kLittle) {
4316 Move(dst, v0, v1);
4317 } else {
4318 Move(dst, v1, v0);
4319 }
4320 } else {
4321 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
4322 }
4323 }
4324
4325
MovFromFloatParameter(const DoubleRegister dst)4326 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
4327 if (IsMipsSoftFloatABI) {
4328 if (kArchEndian == kLittle) {
4329 Move(dst, a0, a1);
4330 } else {
4331 Move(dst, a1, a0);
4332 }
4333 } else {
4334 Move(dst, f12); // Reg f12 is n64 ABI FP first argument value.
4335 }
4336 }
4337
4338
MovToFloatParameter(DoubleRegister src)4339 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4340 if (!IsMipsSoftFloatABI) {
4341 Move(f12, src);
4342 } else {
4343 if (kArchEndian == kLittle) {
4344 Move(a0, a1, src);
4345 } else {
4346 Move(a1, a0, src);
4347 }
4348 }
4349 }
4350
4351
MovToFloatResult(DoubleRegister src)4352 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4353 if (!IsMipsSoftFloatABI) {
4354 Move(f0, src);
4355 } else {
4356 if (kArchEndian == kLittle) {
4357 Move(v0, v1, src);
4358 } else {
4359 Move(v1, v0, src);
4360 }
4361 }
4362 }
4363
4364
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)4365 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4366 DoubleRegister src2) {
4367 if (!IsMipsSoftFloatABI) {
4368 const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
4369 if (src2.is(f12)) {
4370 DCHECK(!src1.is(fparg2));
4371 Move(fparg2, src2);
4372 Move(f12, src1);
4373 } else {
4374 Move(f12, src1);
4375 Move(fparg2, src2);
4376 }
4377 } else {
4378 if (kArchEndian == kLittle) {
4379 Move(a0, a1, src1);
4380 Move(a2, a3, src2);
4381 } else {
4382 Move(a1, a0, src1);
4383 Move(a3, a2, src2);
4384 }
4385 }
4386 }
4387
4388
4389 // -----------------------------------------------------------------------------
4390 // JavaScript invokes.
4391
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)4392 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4393 const ParameterCount& actual,
4394 Label* done,
4395 bool* definitely_mismatches,
4396 InvokeFlag flag,
4397 const CallWrapper& call_wrapper) {
4398 bool definitely_matches = false;
4399 *definitely_mismatches = false;
4400 Label regular_invoke;
4401
4402 // Check whether the expected and actual arguments count match. If not,
4403 // setup registers according to contract with ArgumentsAdaptorTrampoline:
4404 // a0: actual arguments count
4405 // a1: function (passed through to callee)
4406 // a2: expected arguments count
4407
4408 // The code below is made a lot easier because the calling code already sets
4409 // up actual and expected registers according to the contract if values are
4410 // passed in registers.
4411 DCHECK(actual.is_immediate() || actual.reg().is(a0));
4412 DCHECK(expected.is_immediate() || expected.reg().is(a2));
4413
4414 if (expected.is_immediate()) {
4415 DCHECK(actual.is_immediate());
4416 li(a0, Operand(actual.immediate()));
4417 if (expected.immediate() == actual.immediate()) {
4418 definitely_matches = true;
4419 } else {
4420 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4421 if (expected.immediate() == sentinel) {
4422 // Don't worry about adapting arguments for builtins that
4423 // don't want that done. Skip adaption code by making it look
4424 // like we have a match between expected and actual number of
4425 // arguments.
4426 definitely_matches = true;
4427 } else {
4428 *definitely_mismatches = true;
4429 li(a2, Operand(expected.immediate()));
4430 }
4431 }
4432 } else if (actual.is_immediate()) {
4433 li(a0, Operand(actual.immediate()));
4434 Branch(®ular_invoke, eq, expected.reg(), Operand(a0));
4435 } else {
4436 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
4437 }
4438
4439 if (!definitely_matches) {
4440 Handle<Code> adaptor =
4441 isolate()->builtins()->ArgumentsAdaptorTrampoline();
4442 if (flag == CALL_FUNCTION) {
4443 call_wrapper.BeforeCall(CallSize(adaptor));
4444 Call(adaptor);
4445 call_wrapper.AfterCall();
4446 if (!*definitely_mismatches) {
4447 Branch(done);
4448 }
4449 } else {
4450 Jump(adaptor, RelocInfo::CODE_TARGET);
4451 }
4452 bind(®ular_invoke);
4453 }
4454 }
4455
4456
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)4457 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
4458 const ParameterCount& expected,
4459 const ParameterCount& actual) {
4460 Label skip_flooding;
4461 ExternalReference step_in_enabled =
4462 ExternalReference::debug_step_in_enabled_address(isolate());
4463 li(t0, Operand(step_in_enabled));
4464 lb(t0, MemOperand(t0));
4465 Branch(&skip_flooding, eq, t0, Operand(zero_reg));
4466 {
4467 FrameScope frame(this,
4468 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
4469 if (expected.is_reg()) {
4470 SmiTag(expected.reg());
4471 Push(expected.reg());
4472 }
4473 if (actual.is_reg()) {
4474 SmiTag(actual.reg());
4475 Push(actual.reg());
4476 }
4477 if (new_target.is_valid()) {
4478 Push(new_target);
4479 }
4480 Push(fun);
4481 Push(fun);
4482 CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
4483 Pop(fun);
4484 if (new_target.is_valid()) {
4485 Pop(new_target);
4486 }
4487 if (actual.is_reg()) {
4488 Pop(actual.reg());
4489 SmiUntag(actual.reg());
4490 }
4491 if (expected.is_reg()) {
4492 Pop(expected.reg());
4493 SmiUntag(expected.reg());
4494 }
4495 }
4496 bind(&skip_flooding);
4497 }
4498
4499
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4500 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
4501 const ParameterCount& expected,
4502 const ParameterCount& actual,
4503 InvokeFlag flag,
4504 const CallWrapper& call_wrapper) {
4505 // You can't call a function without a valid frame.
4506 DCHECK(flag == JUMP_FUNCTION || has_frame());
4507 DCHECK(function.is(a1));
4508 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
4509
4510 if (call_wrapper.NeedsDebugStepCheck()) {
4511 FloodFunctionIfStepping(function, new_target, expected, actual);
4512 }
4513
4514 // Clear the new.target register if not given.
4515 if (!new_target.is_valid()) {
4516 LoadRoot(a3, Heap::kUndefinedValueRootIndex);
4517 }
4518
4519 Label done;
4520 bool definitely_mismatches = false;
4521 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
4522 call_wrapper);
4523 if (!definitely_mismatches) {
4524 // We call indirectly through the code field in the function to
4525 // allow recompilation to take effect without changing any of the
4526 // call sites.
4527 Register code = t0;
4528 ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4529 if (flag == CALL_FUNCTION) {
4530 call_wrapper.BeforeCall(CallSize(code));
4531 Call(code);
4532 call_wrapper.AfterCall();
4533 } else {
4534 DCHECK(flag == JUMP_FUNCTION);
4535 Jump(code);
4536 }
4537 // Continue here if InvokePrologue does handle the invocation due to
4538 // mismatched parameter counts.
4539 bind(&done);
4540 }
4541 }
4542
4543
InvokeFunction(Register function,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4544 void MacroAssembler::InvokeFunction(Register function,
4545 Register new_target,
4546 const ParameterCount& actual,
4547 InvokeFlag flag,
4548 const CallWrapper& call_wrapper) {
4549 // You can't call a function without a valid frame.
4550 DCHECK(flag == JUMP_FUNCTION || has_frame());
4551
4552 // Contract with called JS functions requires that function is passed in a1.
4553 DCHECK(function.is(a1));
4554 Register expected_reg = a2;
4555 Register temp_reg = t0;
4556 ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4557 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4558 // The argument count is stored as int32_t on 64-bit platforms.
4559 // TODO(plind): Smi on 32-bit platforms.
4560 lw(expected_reg,
4561 FieldMemOperand(temp_reg,
4562 SharedFunctionInfo::kFormalParameterCountOffset));
4563 ParameterCount expected(expected_reg);
4564 InvokeFunctionCode(a1, new_target, expected, actual, flag, call_wrapper);
4565 }
4566
4567
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4568 void MacroAssembler::InvokeFunction(Register function,
4569 const ParameterCount& expected,
4570 const ParameterCount& actual,
4571 InvokeFlag flag,
4572 const CallWrapper& call_wrapper) {
4573 // You can't call a function without a valid frame.
4574 DCHECK(flag == JUMP_FUNCTION || has_frame());
4575
4576 // Contract with called JS functions requires that function is passed in a1.
4577 DCHECK(function.is(a1));
4578
4579 // Get the function and setup the context.
4580 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4581
4582 InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
4583 }
4584
4585
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4586 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4587 const ParameterCount& expected,
4588 const ParameterCount& actual,
4589 InvokeFlag flag,
4590 const CallWrapper& call_wrapper) {
4591 li(a1, function);
4592 InvokeFunction(a1, expected, actual, flag, call_wrapper);
4593 }
4594
4595
IsObjectJSStringType(Register object,Register scratch,Label * fail)4596 void MacroAssembler::IsObjectJSStringType(Register object,
4597 Register scratch,
4598 Label* fail) {
4599 DCHECK(kNotStringTag != 0);
4600
4601 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4602 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4603 And(scratch, scratch, Operand(kIsNotStringMask));
4604 Branch(fail, ne, scratch, Operand(zero_reg));
4605 }
4606
4607
IsObjectNameType(Register object,Register scratch,Label * fail)4608 void MacroAssembler::IsObjectNameType(Register object,
4609 Register scratch,
4610 Label* fail) {
4611 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4612 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4613 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4614 }
4615
4616
4617 // ---------------------------------------------------------------------------
4618 // Support functions.
4619
4620
GetMapConstructor(Register result,Register map,Register temp,Register temp2)4621 void MacroAssembler::GetMapConstructor(Register result, Register map,
4622 Register temp, Register temp2) {
4623 Label done, loop;
4624 ld(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
4625 bind(&loop);
4626 JumpIfSmi(result, &done);
4627 GetObjectType(result, temp, temp2);
4628 Branch(&done, ne, temp2, Operand(MAP_TYPE));
4629 ld(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
4630 Branch(&loop);
4631 bind(&done);
4632 }
4633
4634
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)4635 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
4636 Register scratch, Label* miss) {
4637 // Get the prototype or initial map from the function.
4638 ld(result,
4639 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4640
4641 // If the prototype or initial map is the hole, don't return it and
4642 // simply miss the cache instead. This will allow us to allocate a
4643 // prototype object on-demand in the runtime system.
4644 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4645 Branch(miss, eq, result, Operand(t8));
4646
4647 // If the function does not have an initial map, we're done.
4648 Label done;
4649 GetObjectType(result, scratch, scratch);
4650 Branch(&done, ne, scratch, Operand(MAP_TYPE));
4651
4652 // Get the prototype from the initial map.
4653 ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
4654
4655 // All done.
4656 bind(&done);
4657 }
4658
4659
GetObjectType(Register object,Register map,Register type_reg)4660 void MacroAssembler::GetObjectType(Register object,
4661 Register map,
4662 Register type_reg) {
4663 ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
4664 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4665 }
4666
4667
4668 // -----------------------------------------------------------------------------
4669 // Runtime calls.
4670
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)4671 void MacroAssembler::CallStub(CodeStub* stub,
4672 TypeFeedbackId ast_id,
4673 Condition cond,
4674 Register r1,
4675 const Operand& r2,
4676 BranchDelaySlot bd) {
4677 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
4678 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4679 cond, r1, r2, bd);
4680 }
4681
4682
TailCallStub(CodeStub * stub,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)4683 void MacroAssembler::TailCallStub(CodeStub* stub,
4684 Condition cond,
4685 Register r1,
4686 const Operand& r2,
4687 BranchDelaySlot bd) {
4688 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4689 }
4690
4691
AllowThisStubCall(CodeStub * stub)4692 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4693 return has_frame_ || !stub->SometimesSetsUpAFrame();
4694 }
4695
4696
IndexFromHash(Register hash,Register index)4697 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4698 // If the hash field contains an array index pick it out. The assert checks
4699 // that the constants for the maximum number of digits for an array index
4700 // cached in the hash field and the number of bits reserved for it does not
4701 // conflict.
4702 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4703 (1 << String::kArrayIndexValueBits));
4704 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4705 }
4706
4707
ObjectToDoubleFPURegister(Register object,FPURegister result,Register scratch1,Register scratch2,Register heap_number_map,Label * not_number,ObjectToDoubleFlags flags)4708 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4709 FPURegister result,
4710 Register scratch1,
4711 Register scratch2,
4712 Register heap_number_map,
4713 Label* not_number,
4714 ObjectToDoubleFlags flags) {
4715 Label done;
4716 if ((flags & OBJECT_NOT_SMI) == 0) {
4717 Label not_smi;
4718 JumpIfNotSmi(object, ¬_smi);
4719 // Remove smi tag and convert to double.
4720 // dsra(scratch1, object, kSmiTagSize);
4721 dsra32(scratch1, object, 0);
4722 mtc1(scratch1, result);
4723 cvt_d_w(result, result);
4724 Branch(&done);
4725 bind(¬_smi);
4726 }
4727 // Check for heap number and load double value from it.
4728 ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4729 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4730
4731 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4732 // If exponent is all ones the number is either a NaN or +/-Infinity.
4733 Register exponent = scratch1;
4734 Register mask_reg = scratch2;
4735 lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4736 li(mask_reg, HeapNumber::kExponentMask);
4737
4738 And(exponent, exponent, mask_reg);
4739 Branch(not_number, eq, exponent, Operand(mask_reg));
4740 }
4741 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4742 bind(&done);
4743 }
4744
4745
SmiToDoubleFPURegister(Register smi,FPURegister value,Register scratch1)4746 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4747 FPURegister value,
4748 Register scratch1) {
4749 dsra32(scratch1, smi, 0);
4750 mtc1(scratch1, value);
4751 cvt_d_w(value, value);
4752 }
4753
4754
AdduAndCheckForOverflow(Register dst,Register left,const Operand & right,Register overflow_dst,Register scratch)4755 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4756 const Operand& right,
4757 Register overflow_dst,
4758 Register scratch) {
4759 if (right.is_reg()) {
4760 AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4761 } else {
4762 if (dst.is(left)) {
4763 li(t9, right); // Load right.
4764 mov(scratch, left); // Preserve left.
4765 addu(dst, left, t9); // Left is overwritten.
4766 xor_(scratch, dst, scratch); // Original left.
4767 xor_(overflow_dst, dst, t9);
4768 and_(overflow_dst, overflow_dst, scratch);
4769 } else {
4770 li(t9, right);
4771 addu(dst, left, t9);
4772 xor_(overflow_dst, dst, left);
4773 xor_(scratch, dst, t9);
4774 and_(overflow_dst, scratch, overflow_dst);
4775 }
4776 }
4777 }
4778
4779
AdduAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)4780 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4781 Register right,
4782 Register overflow_dst,
4783 Register scratch) {
4784 DCHECK(!dst.is(overflow_dst));
4785 DCHECK(!dst.is(scratch));
4786 DCHECK(!overflow_dst.is(scratch));
4787 DCHECK(!overflow_dst.is(left));
4788 DCHECK(!overflow_dst.is(right));
4789
4790 if (left.is(right) && dst.is(left)) {
4791 DCHECK(!dst.is(t9));
4792 DCHECK(!scratch.is(t9));
4793 DCHECK(!left.is(t9));
4794 DCHECK(!right.is(t9));
4795 DCHECK(!overflow_dst.is(t9));
4796 mov(t9, right);
4797 right = t9;
4798 }
4799
4800 if (dst.is(left)) {
4801 mov(scratch, left); // Preserve left.
4802 addu(dst, left, right); // Left is overwritten.
4803 xor_(scratch, dst, scratch); // Original left.
4804 xor_(overflow_dst, dst, right);
4805 and_(overflow_dst, overflow_dst, scratch);
4806 } else if (dst.is(right)) {
4807 mov(scratch, right); // Preserve right.
4808 addu(dst, left, right); // Right is overwritten.
4809 xor_(scratch, dst, scratch); // Original right.
4810 xor_(overflow_dst, dst, left);
4811 and_(overflow_dst, overflow_dst, scratch);
4812 } else {
4813 addu(dst, left, right);
4814 xor_(overflow_dst, dst, left);
4815 xor_(scratch, dst, right);
4816 and_(overflow_dst, scratch, overflow_dst);
4817 }
4818 }
4819
4820
DadduAndCheckForOverflow(Register dst,Register left,const Operand & right,Register overflow_dst,Register scratch)4821 void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
4822 const Operand& right,
4823 Register overflow_dst,
4824 Register scratch) {
4825 if (right.is_reg()) {
4826 DadduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4827 } else {
4828 if (dst.is(left)) {
4829 li(t9, right); // Load right.
4830 mov(scratch, left); // Preserve left.
4831 daddu(dst, left, t9); // Left is overwritten.
4832 xor_(scratch, dst, scratch); // Original left.
4833 xor_(overflow_dst, dst, t9);
4834 and_(overflow_dst, overflow_dst, scratch);
4835 } else {
4836 li(t9, right); // Load right.
4837 Daddu(dst, left, t9);
4838 xor_(overflow_dst, dst, left);
4839 xor_(scratch, dst, t9);
4840 and_(overflow_dst, scratch, overflow_dst);
4841 }
4842 }
4843 }
4844
4845
DadduAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)4846 void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
4847 Register right,
4848 Register overflow_dst,
4849 Register scratch) {
4850 DCHECK(!dst.is(overflow_dst));
4851 DCHECK(!dst.is(scratch));
4852 DCHECK(!overflow_dst.is(scratch));
4853 DCHECK(!overflow_dst.is(left));
4854 DCHECK(!overflow_dst.is(right));
4855
4856 if (left.is(right) && dst.is(left)) {
4857 DCHECK(!dst.is(t9));
4858 DCHECK(!scratch.is(t9));
4859 DCHECK(!left.is(t9));
4860 DCHECK(!right.is(t9));
4861 DCHECK(!overflow_dst.is(t9));
4862 mov(t9, right);
4863 right = t9;
4864 }
4865
4866 if (dst.is(left)) {
4867 mov(scratch, left); // Preserve left.
4868 daddu(dst, left, right); // Left is overwritten.
4869 xor_(scratch, dst, scratch); // Original left.
4870 xor_(overflow_dst, dst, right);
4871 and_(overflow_dst, overflow_dst, scratch);
4872 } else if (dst.is(right)) {
4873 mov(scratch, right); // Preserve right.
4874 daddu(dst, left, right); // Right is overwritten.
4875 xor_(scratch, dst, scratch); // Original right.
4876 xor_(overflow_dst, dst, left);
4877 and_(overflow_dst, overflow_dst, scratch);
4878 } else {
4879 daddu(dst, left, right);
4880 xor_(overflow_dst, dst, left);
4881 xor_(scratch, dst, right);
4882 and_(overflow_dst, scratch, overflow_dst);
4883 }
4884 }
4885
4886
BranchOvfHelper(MacroAssembler * masm,Register overflow_dst,Label * overflow_label,Label * no_overflow_label)4887 static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
4888 Label* overflow_label,
4889 Label* no_overflow_label) {
4890 DCHECK(overflow_label || no_overflow_label);
4891 if (!overflow_label) {
4892 DCHECK(no_overflow_label);
4893 masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
4894 } else {
4895 masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
4896 if (no_overflow_label) masm->Branch(no_overflow_label);
4897 }
4898 }
4899
4900
DaddBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)4901 void MacroAssembler::DaddBranchOvf(Register dst, Register left,
4902 const Operand& right, Label* overflow_label,
4903 Label* no_overflow_label, Register scratch) {
4904 if (right.is_reg()) {
4905 DaddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
4906 scratch);
4907 } else {
4908 Register overflow_dst = t9;
4909 DCHECK(!dst.is(scratch));
4910 DCHECK(!dst.is(overflow_dst));
4911 DCHECK(!scratch.is(overflow_dst));
4912 DCHECK(!left.is(overflow_dst));
4913 li(overflow_dst, right); // Load right.
4914 if (dst.is(left)) {
4915 mov(scratch, left); // Preserve left.
4916 Daddu(dst, left, overflow_dst); // Left is overwritten.
4917 xor_(scratch, dst, scratch); // Original left.
4918 xor_(overflow_dst, dst, overflow_dst);
4919 and_(overflow_dst, overflow_dst, scratch);
4920 } else {
4921 Daddu(dst, left, overflow_dst);
4922 xor_(scratch, dst, overflow_dst);
4923 xor_(overflow_dst, dst, left);
4924 and_(overflow_dst, scratch, overflow_dst);
4925 }
4926 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
4927 }
4928 }
4929
4930
DaddBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)4931 void MacroAssembler::DaddBranchOvf(Register dst, Register left, Register right,
4932 Label* overflow_label,
4933 Label* no_overflow_label, Register scratch) {
4934 Register overflow_dst = t9;
4935 DCHECK(!dst.is(scratch));
4936 DCHECK(!dst.is(overflow_dst));
4937 DCHECK(!scratch.is(overflow_dst));
4938 DCHECK(!left.is(overflow_dst));
4939 DCHECK(!right.is(overflow_dst));
4940 DCHECK(!left.is(scratch));
4941 DCHECK(!right.is(scratch));
4942
4943 if (left.is(right) && dst.is(left)) {
4944 mov(overflow_dst, right);
4945 right = overflow_dst;
4946 }
4947
4948 if (dst.is(left)) {
4949 mov(scratch, left); // Preserve left.
4950 daddu(dst, left, right); // Left is overwritten.
4951 xor_(scratch, dst, scratch); // Original left.
4952 xor_(overflow_dst, dst, right);
4953 and_(overflow_dst, overflow_dst, scratch);
4954 } else if (dst.is(right)) {
4955 mov(scratch, right); // Preserve right.
4956 daddu(dst, left, right); // Right is overwritten.
4957 xor_(scratch, dst, scratch); // Original right.
4958 xor_(overflow_dst, dst, left);
4959 and_(overflow_dst, overflow_dst, scratch);
4960 } else {
4961 daddu(dst, left, right);
4962 xor_(overflow_dst, dst, left);
4963 xor_(scratch, dst, right);
4964 and_(overflow_dst, scratch, overflow_dst);
4965 }
4966 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
4967 }
4968
4969
SubuAndCheckForOverflow(Register dst,Register left,const Operand & right,Register overflow_dst,Register scratch)4970 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4971 const Operand& right,
4972 Register overflow_dst,
4973 Register scratch) {
4974 if (right.is_reg()) {
4975 SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4976 } else {
4977 if (dst.is(left)) {
4978 li(t9, right); // Load right.
4979 mov(scratch, left); // Preserve left.
4980 Subu(dst, left, t9); // Left is overwritten.
4981 xor_(overflow_dst, dst, scratch); // scratch is original left.
4982 xor_(scratch, scratch, t9); // scratch is original left.
4983 and_(overflow_dst, scratch, overflow_dst);
4984 } else {
4985 li(t9, right);
4986 subu(dst, left, t9);
4987 xor_(overflow_dst, dst, left);
4988 xor_(scratch, left, t9);
4989 and_(overflow_dst, scratch, overflow_dst);
4990 }
4991 }
4992 }
4993
4994
SubuAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)4995 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4996 Register right,
4997 Register overflow_dst,
4998 Register scratch) {
4999 DCHECK(!dst.is(overflow_dst));
5000 DCHECK(!dst.is(scratch));
5001 DCHECK(!overflow_dst.is(scratch));
5002 DCHECK(!overflow_dst.is(left));
5003 DCHECK(!overflow_dst.is(right));
5004 DCHECK(!scratch.is(left));
5005 DCHECK(!scratch.is(right));
5006
5007 // This happens with some crankshaft code. Since Subu works fine if
5008 // left == right, let's not make that restriction here.
5009 if (left.is(right)) {
5010 mov(dst, zero_reg);
5011 mov(overflow_dst, zero_reg);
5012 return;
5013 }
5014
5015 if (dst.is(left)) {
5016 mov(scratch, left); // Preserve left.
5017 subu(dst, left, right); // Left is overwritten.
5018 xor_(overflow_dst, dst, scratch); // scratch is original left.
5019 xor_(scratch, scratch, right); // scratch is original left.
5020 and_(overflow_dst, scratch, overflow_dst);
5021 } else if (dst.is(right)) {
5022 mov(scratch, right); // Preserve right.
5023 subu(dst, left, right); // Right is overwritten.
5024 xor_(overflow_dst, dst, left);
5025 xor_(scratch, left, scratch); // Original right.
5026 and_(overflow_dst, scratch, overflow_dst);
5027 } else {
5028 subu(dst, left, right);
5029 xor_(overflow_dst, dst, left);
5030 xor_(scratch, left, right);
5031 and_(overflow_dst, scratch, overflow_dst);
5032 }
5033 }
5034
5035
DsubuAndCheckForOverflow(Register dst,Register left,const Operand & right,Register overflow_dst,Register scratch)5036 void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
5037 const Operand& right,
5038 Register overflow_dst,
5039 Register scratch) {
5040 if (right.is_reg()) {
5041 DsubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
5042 } else {
5043 if (dst.is(left)) {
5044 li(t9, right); // Load right.
5045 mov(scratch, left); // Preserve left.
5046 dsubu(dst, left, t9); // Left is overwritten.
5047 xor_(overflow_dst, dst, scratch); // scratch is original left.
5048 xor_(scratch, scratch, t9); // scratch is original left.
5049 and_(overflow_dst, scratch, overflow_dst);
5050 } else {
5051 li(t9, right);
5052 dsubu(dst, left, t9);
5053 xor_(overflow_dst, dst, left);
5054 xor_(scratch, left, t9);
5055 and_(overflow_dst, scratch, overflow_dst);
5056 }
5057 }
5058 }
5059
5060
DsubuAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)5061 void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
5062 Register right,
5063 Register overflow_dst,
5064 Register scratch) {
5065 DCHECK(!dst.is(overflow_dst));
5066 DCHECK(!dst.is(scratch));
5067 DCHECK(!overflow_dst.is(scratch));
5068 DCHECK(!overflow_dst.is(left));
5069 DCHECK(!overflow_dst.is(right));
5070 DCHECK(!scratch.is(left));
5071 DCHECK(!scratch.is(right));
5072
5073 // This happens with some crankshaft code. Since Subu works fine if
5074 // left == right, let's not make that restriction here.
5075 if (left.is(right)) {
5076 mov(dst, zero_reg);
5077 mov(overflow_dst, zero_reg);
5078 return;
5079 }
5080
5081 if (dst.is(left)) {
5082 mov(scratch, left); // Preserve left.
5083 dsubu(dst, left, right); // Left is overwritten.
5084 xor_(overflow_dst, dst, scratch); // scratch is original left.
5085 xor_(scratch, scratch, right); // scratch is original left.
5086 and_(overflow_dst, scratch, overflow_dst);
5087 } else if (dst.is(right)) {
5088 mov(scratch, right); // Preserve right.
5089 dsubu(dst, left, right); // Right is overwritten.
5090 xor_(overflow_dst, dst, left);
5091 xor_(scratch, left, scratch); // Original right.
5092 and_(overflow_dst, scratch, overflow_dst);
5093 } else {
5094 dsubu(dst, left, right);
5095 xor_(overflow_dst, dst, left);
5096 xor_(scratch, left, right);
5097 and_(overflow_dst, scratch, overflow_dst);
5098 }
5099 }
5100
5101
DsubBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5102 void MacroAssembler::DsubBranchOvf(Register dst, Register left,
5103 const Operand& right, Label* overflow_label,
5104 Label* no_overflow_label, Register scratch) {
5105 DCHECK(overflow_label || no_overflow_label);
5106 if (right.is_reg()) {
5107 DsubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5108 scratch);
5109 } else {
5110 Register overflow_dst = t9;
5111 DCHECK(!dst.is(scratch));
5112 DCHECK(!dst.is(overflow_dst));
5113 DCHECK(!scratch.is(overflow_dst));
5114 DCHECK(!left.is(overflow_dst));
5115 DCHECK(!left.is(scratch));
5116 li(overflow_dst, right); // Load right.
5117 if (dst.is(left)) {
5118 mov(scratch, left); // Preserve left.
5119 Dsubu(dst, left, overflow_dst); // Left is overwritten.
5120 xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
5121 xor_(scratch, dst, scratch); // scratch is original left.
5122 and_(overflow_dst, scratch, overflow_dst);
5123 } else {
5124 Dsubu(dst, left, overflow_dst);
5125 xor_(scratch, left, overflow_dst);
5126 xor_(overflow_dst, dst, left);
5127 and_(overflow_dst, scratch, overflow_dst);
5128 }
5129 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5130 }
5131 }
5132
5133
DsubBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5134 void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
5135 Label* overflow_label,
5136 Label* no_overflow_label, Register scratch) {
5137 DCHECK(overflow_label || no_overflow_label);
5138 Register overflow_dst = t9;
5139 DCHECK(!dst.is(scratch));
5140 DCHECK(!dst.is(overflow_dst));
5141 DCHECK(!scratch.is(overflow_dst));
5142 DCHECK(!overflow_dst.is(left));
5143 DCHECK(!overflow_dst.is(right));
5144 DCHECK(!scratch.is(left));
5145 DCHECK(!scratch.is(right));
5146
5147 // This happens with some crankshaft code. Since Subu works fine if
5148 // left == right, let's not make that restriction here.
5149 if (left.is(right)) {
5150 mov(dst, zero_reg);
5151 if (no_overflow_label) {
5152 Branch(no_overflow_label);
5153 }
5154 }
5155
5156 if (dst.is(left)) {
5157 mov(scratch, left); // Preserve left.
5158 dsubu(dst, left, right); // Left is overwritten.
5159 xor_(overflow_dst, dst, scratch); // scratch is original left.
5160 xor_(scratch, scratch, right); // scratch is original left.
5161 and_(overflow_dst, scratch, overflow_dst);
5162 } else if (dst.is(right)) {
5163 mov(scratch, right); // Preserve right.
5164 dsubu(dst, left, right); // Right is overwritten.
5165 xor_(overflow_dst, dst, left);
5166 xor_(scratch, left, scratch); // Original right.
5167 and_(overflow_dst, scratch, overflow_dst);
5168 } else {
5169 dsubu(dst, left, right);
5170 xor_(overflow_dst, dst, left);
5171 xor_(scratch, left, right);
5172 and_(overflow_dst, scratch, overflow_dst);
5173 }
5174 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5175 }
5176
5177
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles,BranchDelaySlot bd)5178 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
5179 SaveFPRegsMode save_doubles,
5180 BranchDelaySlot bd) {
5181 // All parameters are on the stack. v0 has the return value after call.
5182
5183 // If the expected number of arguments of the runtime function is
5184 // constant, we check that the actual number of arguments match the
5185 // expectation.
5186 CHECK(f->nargs < 0 || f->nargs == num_arguments);
5187
5188 // TODO(1236192): Most runtime routines don't need the number of
5189 // arguments passed in because it is constant. At some point we
5190 // should remove this need and make the runtime routine entry code
5191 // smarter.
5192 PrepareCEntryArgs(num_arguments);
5193 PrepareCEntryFunction(ExternalReference(f, isolate()));
5194 CEntryStub stub(isolate(), 1, save_doubles);
5195 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5196 }
5197
5198
CallExternalReference(const ExternalReference & ext,int num_arguments,BranchDelaySlot bd)5199 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
5200 int num_arguments,
5201 BranchDelaySlot bd) {
5202 PrepareCEntryArgs(num_arguments);
5203 PrepareCEntryFunction(ext);
5204
5205 CEntryStub stub(isolate(), 1);
5206 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5207 }
5208
5209
TailCallRuntime(Runtime::FunctionId fid)5210 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
5211 const Runtime::Function* function = Runtime::FunctionForId(fid);
5212 DCHECK_EQ(1, function->result_size);
5213 if (function->nargs >= 0) {
5214 PrepareCEntryArgs(function->nargs);
5215 }
5216 JumpToExternalReference(ExternalReference(fid, isolate()));
5217 }
5218
5219
JumpToExternalReference(const ExternalReference & builtin,BranchDelaySlot bd)5220 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
5221 BranchDelaySlot bd) {
5222 PrepareCEntryFunction(builtin);
5223 CEntryStub stub(isolate(), 1);
5224 Jump(stub.GetCode(),
5225 RelocInfo::CODE_TARGET,
5226 al,
5227 zero_reg,
5228 Operand(zero_reg),
5229 bd);
5230 }
5231
5232
InvokeBuiltin(int native_context_index,InvokeFlag flag,const CallWrapper & call_wrapper)5233 void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
5234 const CallWrapper& call_wrapper) {
5235 // You can't call a builtin without a valid frame.
5236 DCHECK(flag == JUMP_FUNCTION || has_frame());
5237
5238 // Fake a parameter count to avoid emitting code to do the check.
5239 ParameterCount expected(0);
5240 LoadNativeContextSlot(native_context_index, a1);
5241 InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper);
5242 }
5243
5244
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5245 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
5246 Register scratch1, Register scratch2) {
5247 if (FLAG_native_code_counters && counter->Enabled()) {
5248 li(scratch1, Operand(value));
5249 li(scratch2, Operand(ExternalReference(counter)));
5250 sd(scratch1, MemOperand(scratch2));
5251 }
5252 }
5253
5254
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5255 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
5256 Register scratch1, Register scratch2) {
5257 DCHECK(value > 0);
5258 if (FLAG_native_code_counters && counter->Enabled()) {
5259 li(scratch2, Operand(ExternalReference(counter)));
5260 ld(scratch1, MemOperand(scratch2));
5261 Daddu(scratch1, scratch1, Operand(value));
5262 sd(scratch1, MemOperand(scratch2));
5263 }
5264 }
5265
5266
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5267 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
5268 Register scratch1, Register scratch2) {
5269 DCHECK(value > 0);
5270 if (FLAG_native_code_counters && counter->Enabled()) {
5271 li(scratch2, Operand(ExternalReference(counter)));
5272 ld(scratch1, MemOperand(scratch2));
5273 Dsubu(scratch1, scratch1, Operand(value));
5274 sd(scratch1, MemOperand(scratch2));
5275 }
5276 }
5277
5278
5279 // -----------------------------------------------------------------------------
5280 // Debugging.
5281
Assert(Condition cc,BailoutReason reason,Register rs,Operand rt)5282 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
5283 Register rs, Operand rt) {
5284 if (emit_debug_code())
5285 Check(cc, reason, rs, rt);
5286 }
5287
5288
AssertFastElements(Register elements)5289 void MacroAssembler::AssertFastElements(Register elements) {
5290 if (emit_debug_code()) {
5291 DCHECK(!elements.is(at));
5292 Label ok;
5293 push(elements);
5294 ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
5295 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
5296 Branch(&ok, eq, elements, Operand(at));
5297 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
5298 Branch(&ok, eq, elements, Operand(at));
5299 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
5300 Branch(&ok, eq, elements, Operand(at));
5301 Abort(kJSObjectWithFastElementsMapHasSlowElements);
5302 bind(&ok);
5303 pop(elements);
5304 }
5305 }
5306
5307
Check(Condition cc,BailoutReason reason,Register rs,Operand rt)5308 void MacroAssembler::Check(Condition cc, BailoutReason reason,
5309 Register rs, Operand rt) {
5310 Label L;
5311 Branch(&L, cc, rs, rt);
5312 Abort(reason);
5313 // Will not return here.
5314 bind(&L);
5315 }
5316
5317
Abort(BailoutReason reason)5318 void MacroAssembler::Abort(BailoutReason reason) {
5319 Label abort_start;
5320 bind(&abort_start);
5321 #ifdef DEBUG
5322 const char* msg = GetBailoutReason(reason);
5323 if (msg != NULL) {
5324 RecordComment("Abort message: ");
5325 RecordComment(msg);
5326 }
5327
5328 if (FLAG_trap_on_abort) {
5329 stop(msg);
5330 return;
5331 }
5332 #endif
5333
5334 li(a0, Operand(Smi::FromInt(reason)));
5335 push(a0);
5336 // Disable stub call restrictions to always allow calls to abort.
5337 if (!has_frame_) {
5338 // We don't actually want to generate a pile of code for this, so just
5339 // claim there is a stack frame, without generating one.
5340 FrameScope scope(this, StackFrame::NONE);
5341 CallRuntime(Runtime::kAbort, 1);
5342 } else {
5343 CallRuntime(Runtime::kAbort, 1);
5344 }
5345 // Will not return here.
5346 if (is_trampoline_pool_blocked()) {
5347 // If the calling code cares about the exact number of
5348 // instructions generated, we insert padding here to keep the size
5349 // of the Abort macro constant.
5350 // Currently in debug mode with debug_code enabled the number of
5351 // generated instructions is 10, so we use this as a maximum value.
5352 static const int kExpectedAbortInstructions = 10;
5353 int abort_instructions = InstructionsGeneratedSince(&abort_start);
5354 DCHECK(abort_instructions <= kExpectedAbortInstructions);
5355 while (abort_instructions++ < kExpectedAbortInstructions) {
5356 nop();
5357 }
5358 }
5359 }
5360
5361
LoadContext(Register dst,int context_chain_length)5362 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
5363 if (context_chain_length > 0) {
5364 // Move up the chain of contexts to the context containing the slot.
5365 ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5366 for (int i = 1; i < context_chain_length; i++) {
5367 ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5368 }
5369 } else {
5370 // Slot is in the current function context. Move it into the
5371 // destination register in case we store into it (the write barrier
5372 // cannot be allowed to destroy the context in esi).
5373 Move(dst, cp);
5374 }
5375 }
5376
5377
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)5378 void MacroAssembler::LoadTransitionedArrayMapConditional(
5379 ElementsKind expected_kind,
5380 ElementsKind transitioned_kind,
5381 Register map_in_out,
5382 Register scratch,
5383 Label* no_map_match) {
5384 DCHECK(IsFastElementsKind(expected_kind));
5385 DCHECK(IsFastElementsKind(transitioned_kind));
5386
5387 // Check that the function's map is the same as the expected cached map.
5388 ld(scratch, NativeContextMemOperand());
5389 ld(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
5390 Branch(no_map_match, ne, map_in_out, Operand(at));
5391
5392 // Use the transitioned cached map.
5393 ld(map_in_out,
5394 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
5395 }
5396
5397
LoadNativeContextSlot(int index,Register dst)5398 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
5399 ld(dst, NativeContextMemOperand());
5400 ld(dst, ContextMemOperand(dst, index));
5401 }
5402
5403
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)5404 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5405 Register map,
5406 Register scratch) {
5407 // Load the initial map. The global functions all have initial maps.
5408 ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5409 if (emit_debug_code()) {
5410 Label ok, fail;
5411 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
5412 Branch(&ok);
5413 bind(&fail);
5414 Abort(kGlobalFunctionsMustHaveInitialMap);
5415 bind(&ok);
5416 }
5417 }
5418
5419
StubPrologue()5420 void MacroAssembler::StubPrologue() {
5421 Push(ra, fp, cp);
5422 Push(Smi::FromInt(StackFrame::STUB));
5423 // Adjust FP to point to saved FP.
5424 Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
5425 }
5426
5427
Prologue(bool code_pre_aging)5428 void MacroAssembler::Prologue(bool code_pre_aging) {
5429 PredictableCodeSizeScope predictible_code_size_scope(
5430 this, kNoCodeAgeSequenceLength);
5431 // The following three instructions must remain together and unmodified
5432 // for code aging to work properly.
5433 if (code_pre_aging) {
5434 // Pre-age the code.
5435 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
5436 nop(Assembler::CODE_AGE_MARKER_NOP);
5437 // Load the stub address to t9 and call it,
5438 // GetCodeAgeAndParity() extracts the stub address from this instruction.
5439 li(t9,
5440 Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
5441 ADDRESS_LOAD);
5442 nop(); // Prevent jalr to jal optimization.
5443 jalr(t9, a0);
5444 nop(); // Branch delay slot nop.
5445 nop(); // Pad the empty space.
5446 } else {
5447 Push(ra, fp, cp, a1);
5448 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5449 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5450 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5451 // Adjust fp to point to caller's fp.
5452 Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
5453 }
5454 }
5455
5456
EmitLoadTypeFeedbackVector(Register vector)5457 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
5458 ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5459 ld(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
5460 ld(vector,
5461 FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
5462 }
5463
5464
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)5465 void MacroAssembler::EnterFrame(StackFrame::Type type,
5466 bool load_constant_pool_pointer_reg) {
5467 // Out-of-line constant pool not implemented on mips64.
5468 UNREACHABLE();
5469 }
5470
5471
EnterFrame(StackFrame::Type type)5472 void MacroAssembler::EnterFrame(StackFrame::Type type) {
5473 daddiu(sp, sp, -5 * kPointerSize);
5474 li(t8, Operand(Smi::FromInt(type)));
5475 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
5476 sd(ra, MemOperand(sp, 4 * kPointerSize));
5477 sd(fp, MemOperand(sp, 3 * kPointerSize));
5478 sd(cp, MemOperand(sp, 2 * kPointerSize));
5479 sd(t8, MemOperand(sp, 1 * kPointerSize));
5480 sd(t9, MemOperand(sp, 0 * kPointerSize));
5481 // Adjust FP to point to saved FP.
5482 Daddu(fp, sp,
5483 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
5484 }
5485
5486
LeaveFrame(StackFrame::Type type)5487 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
5488 mov(sp, fp);
5489 ld(fp, MemOperand(sp, 0 * kPointerSize));
5490 ld(ra, MemOperand(sp, 1 * kPointerSize));
5491 daddiu(sp, sp, 2 * kPointerSize);
5492 }
5493
5494
EnterExitFrame(bool save_doubles,int stack_space)5495 void MacroAssembler::EnterExitFrame(bool save_doubles,
5496 int stack_space) {
5497 // Set up the frame structure on the stack.
5498 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
5499 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
5500 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
5501
5502 // This is how the stack will look:
5503 // fp + 2 (==kCallerSPDisplacement) - old stack's end
5504 // [fp + 1 (==kCallerPCOffset)] - saved old ra
5505 // [fp + 0 (==kCallerFPOffset)] - saved old fp
5506 // [fp - 1 (==kSPOffset)] - sp of the called function
5507 // [fp - 2 (==kCodeOffset)] - CodeObject
5508 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
5509 // new stack (will contain saved ra)
5510
5511 // Save registers.
5512 daddiu(sp, sp, -4 * kPointerSize);
5513 sd(ra, MemOperand(sp, 3 * kPointerSize));
5514 sd(fp, MemOperand(sp, 2 * kPointerSize));
5515 daddiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
5516
5517 if (emit_debug_code()) {
5518 sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
5519 }
5520
5521 // Accessed from ExitFrame::code_slot.
5522 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
5523 sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
5524
5525 // Save the frame pointer and the context in top.
5526 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5527 sd(fp, MemOperand(t8));
5528 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5529 sd(cp, MemOperand(t8));
5530
5531 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
5532 if (save_doubles) {
5533 // The stack is already aligned to 0 modulo 8 for stores with sdc1.
5534 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
5535 int space = kNumOfSavedRegisters * kDoubleSize;
5536 Dsubu(sp, sp, Operand(space));
5537 // Remember: we only need to save every 2nd double FPU value.
5538 for (int i = 0; i < kNumOfSavedRegisters; i++) {
5539 FPURegister reg = FPURegister::from_code(2 * i);
5540 sdc1(reg, MemOperand(sp, i * kDoubleSize));
5541 }
5542 }
5543
5544 // Reserve place for the return address, stack space and an optional slot
5545 // (used by the DirectCEntryStub to hold the return value if a struct is
5546 // returned) and align the frame preparing for calling the runtime function.
5547 DCHECK(stack_space >= 0);
5548 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
5549 if (frame_alignment > 0) {
5550 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5551 And(sp, sp, Operand(-frame_alignment)); // Align stack.
5552 }
5553
5554 // Set the exit frame sp value to point just before the return address
5555 // location.
5556 daddiu(at, sp, kPointerSize);
5557 sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
5558 }
5559
5560
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool do_return,bool argument_count_is_length)5561 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
5562 bool restore_context, bool do_return,
5563 bool argument_count_is_length) {
5564 // Optionally restore all double registers.
5565 if (save_doubles) {
5566 // Remember: we only need to restore every 2nd double FPU value.
5567 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
5568 Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize +
5569 kNumOfSavedRegisters * kDoubleSize));
5570 for (int i = 0; i < kNumOfSavedRegisters; i++) {
5571 FPURegister reg = FPURegister::from_code(2 * i);
5572 ldc1(reg, MemOperand(t8, i * kDoubleSize));
5573 }
5574 }
5575
5576 // Clear top frame.
5577 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5578 sd(zero_reg, MemOperand(t8));
5579
5580 // Restore current context from top and clear it in debug mode.
5581 if (restore_context) {
5582 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5583 ld(cp, MemOperand(t8));
5584 }
5585 #ifdef DEBUG
5586 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5587 sd(a3, MemOperand(t8));
5588 #endif
5589
5590 // Pop the arguments, restore registers, and return.
5591 mov(sp, fp); // Respect ABI stack constraint.
5592 ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
5593 ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
5594
5595 if (argument_count.is_valid()) {
5596 if (argument_count_is_length) {
5597 daddu(sp, sp, argument_count);
5598 } else {
5599 dsll(t8, argument_count, kPointerSizeLog2);
5600 daddu(sp, sp, t8);
5601 }
5602 }
5603
5604 if (do_return) {
5605 Ret(USE_DELAY_SLOT);
5606 // If returning, the instruction in the delay slot will be the addiu below.
5607 }
5608 daddiu(sp, sp, 2 * kPointerSize);
5609 }
5610
5611
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)5612 void MacroAssembler::InitializeNewString(Register string,
5613 Register length,
5614 Heap::RootListIndex map_index,
5615 Register scratch1,
5616 Register scratch2) {
5617 // dsll(scratch1, length, kSmiTagSize);
5618 dsll32(scratch1, length, 0);
5619 LoadRoot(scratch2, map_index);
5620 sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
5621 li(scratch1, Operand(String::kEmptyHashField));
5622 sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
5623 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
5624 }
5625
5626
ActivationFrameAlignment()5627 int MacroAssembler::ActivationFrameAlignment() {
5628 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
5629 // Running on the real platform. Use the alignment as mandated by the local
5630 // environment.
5631 // Note: This will break if we ever start generating snapshots on one Mips
5632 // platform for another Mips platform with a different alignment.
5633 return base::OS::ActivationFrameAlignment();
5634 #else // V8_HOST_ARCH_MIPS
5635 // If we are using the simulator then we should always align to the expected
5636 // alignment. As the simulator is used to generate snapshots we do not know
5637 // if the target platform will need alignment, so this is controlled from a
5638 // flag.
5639 return FLAG_sim_stack_alignment;
5640 #endif // V8_HOST_ARCH_MIPS
5641 }
5642
5643
AssertStackIsAligned()5644 void MacroAssembler::AssertStackIsAligned() {
5645 if (emit_debug_code()) {
5646 const int frame_alignment = ActivationFrameAlignment();
5647 const int frame_alignment_mask = frame_alignment - 1;
5648
5649 if (frame_alignment > kPointerSize) {
5650 Label alignment_as_expected;
5651 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5652 andi(at, sp, frame_alignment_mask);
5653 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5654 // Don't use Check here, as it will call Runtime_Abort re-entering here.
5655 stop("Unexpected stack alignment");
5656 bind(&alignment_as_expected);
5657 }
5658 }
5659 }
5660
5661
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)5662 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5663 Register reg,
5664 Register scratch,
5665 Label* not_power_of_two_or_zero) {
5666 Dsubu(scratch, reg, Operand(1));
5667 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
5668 scratch, Operand(zero_reg));
5669 and_(at, scratch, reg); // In the delay slot.
5670 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
5671 }
5672
5673
SmiTagCheckOverflow(Register reg,Register overflow)5674 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
5675 DCHECK(!reg.is(overflow));
5676 mov(overflow, reg); // Save original value.
5677 SmiTag(reg);
5678 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
5679 }
5680
5681
SmiTagCheckOverflow(Register dst,Register src,Register overflow)5682 void MacroAssembler::SmiTagCheckOverflow(Register dst,
5683 Register src,
5684 Register overflow) {
5685 if (dst.is(src)) {
5686 // Fall back to slower case.
5687 SmiTagCheckOverflow(dst, overflow);
5688 } else {
5689 DCHECK(!dst.is(src));
5690 DCHECK(!dst.is(overflow));
5691 DCHECK(!src.is(overflow));
5692 SmiTag(dst, src);
5693 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
5694 }
5695 }
5696
5697
SmiLoadUntag(Register dst,MemOperand src)5698 void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
5699 if (SmiValuesAre32Bits()) {
5700 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5701 } else {
5702 lw(dst, src);
5703 SmiUntag(dst);
5704 }
5705 }
5706
5707
SmiLoadScale(Register dst,MemOperand src,int scale)5708 void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
5709 if (SmiValuesAre32Bits()) {
5710 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
5711 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5712 dsll(dst, dst, scale);
5713 } else {
5714 lw(dst, src);
5715 DCHECK(scale >= kSmiTagSize);
5716 sll(dst, dst, scale - kSmiTagSize);
5717 }
5718 }
5719
5720
5721 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
SmiLoadWithScale(Register d_smi,Register d_scaled,MemOperand src,int scale)5722 void MacroAssembler::SmiLoadWithScale(Register d_smi,
5723 Register d_scaled,
5724 MemOperand src,
5725 int scale) {
5726 if (SmiValuesAre32Bits()) {
5727 ld(d_smi, src);
5728 dsra(d_scaled, d_smi, kSmiShift - scale);
5729 } else {
5730 lw(d_smi, src);
5731 DCHECK(scale >= kSmiTagSize);
5732 sll(d_scaled, d_smi, scale - kSmiTagSize);
5733 }
5734 }
5735
5736
5737 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
SmiLoadUntagWithScale(Register d_int,Register d_scaled,MemOperand src,int scale)5738 void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
5739 Register d_scaled,
5740 MemOperand src,
5741 int scale) {
5742 if (SmiValuesAre32Bits()) {
5743 lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
5744 dsll(d_scaled, d_int, scale);
5745 } else {
5746 lw(d_int, src);
5747 // Need both the int and the scaled in, so use two instructions.
5748 SmiUntag(d_int);
5749 sll(d_scaled, d_int, scale);
5750 }
5751 }
5752
5753
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)5754 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5755 Register src,
5756 Label* smi_case) {
5757 // DCHECK(!dst.is(src));
5758 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5759 SmiUntag(dst, src);
5760 }
5761
5762
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)5763 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5764 Register src,
5765 Label* non_smi_case) {
5766 // DCHECK(!dst.is(src));
5767 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5768 SmiUntag(dst, src);
5769 }
5770
JumpIfSmi(Register value,Label * smi_label,Register scratch,BranchDelaySlot bd)5771 void MacroAssembler::JumpIfSmi(Register value,
5772 Label* smi_label,
5773 Register scratch,
5774 BranchDelaySlot bd) {
5775 DCHECK_EQ(0, kSmiTag);
5776 andi(scratch, value, kSmiTagMask);
5777 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5778 }
5779
JumpIfNotSmi(Register value,Label * not_smi_label,Register scratch,BranchDelaySlot bd)5780 void MacroAssembler::JumpIfNotSmi(Register value,
5781 Label* not_smi_label,
5782 Register scratch,
5783 BranchDelaySlot bd) {
5784 DCHECK_EQ(0, kSmiTag);
5785 andi(scratch, value, kSmiTagMask);
5786 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5787 }
5788
5789
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)5790 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5791 Register reg2,
5792 Label* on_not_both_smi) {
5793 STATIC_ASSERT(kSmiTag == 0);
5794 // TODO(plind): Find some better to fix this assert issue.
5795 #if defined(__APPLE__)
5796 DCHECK_EQ(1, kSmiTagMask);
5797 #else
5798 DCHECK_EQ((int64_t)1, kSmiTagMask);
5799 #endif
5800 or_(at, reg1, reg2);
5801 JumpIfNotSmi(at, on_not_both_smi);
5802 }
5803
5804
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)5805 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5806 Register reg2,
5807 Label* on_either_smi) {
5808 STATIC_ASSERT(kSmiTag == 0);
5809 // TODO(plind): Find some better to fix this assert issue.
5810 #if defined(__APPLE__)
5811 DCHECK_EQ(1, kSmiTagMask);
5812 #else
5813 DCHECK_EQ((int64_t)1, kSmiTagMask);
5814 #endif
5815 // Both Smi tags must be 1 (not Smi).
5816 and_(at, reg1, reg2);
5817 JumpIfSmi(at, on_either_smi);
5818 }
5819
5820
AssertNotSmi(Register object)5821 void MacroAssembler::AssertNotSmi(Register object) {
5822 if (emit_debug_code()) {
5823 STATIC_ASSERT(kSmiTag == 0);
5824 andi(at, object, kSmiTagMask);
5825 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5826 }
5827 }
5828
5829
AssertSmi(Register object)5830 void MacroAssembler::AssertSmi(Register object) {
5831 if (emit_debug_code()) {
5832 STATIC_ASSERT(kSmiTag == 0);
5833 andi(at, object, kSmiTagMask);
5834 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5835 }
5836 }
5837
5838
AssertString(Register object)5839 void MacroAssembler::AssertString(Register object) {
5840 if (emit_debug_code()) {
5841 STATIC_ASSERT(kSmiTag == 0);
5842 SmiTst(object, t8);
5843 Check(ne, kOperandIsASmiAndNotAString, t8, Operand(zero_reg));
5844 GetObjectType(object, t8, t8);
5845 Check(lo, kOperandIsNotAString, t8, Operand(FIRST_NONSTRING_TYPE));
5846 }
5847 }
5848
5849
AssertName(Register object)5850 void MacroAssembler::AssertName(Register object) {
5851 if (emit_debug_code()) {
5852 STATIC_ASSERT(kSmiTag == 0);
5853 SmiTst(object, t8);
5854 Check(ne, kOperandIsASmiAndNotAName, t8, Operand(zero_reg));
5855 GetObjectType(object, t8, t8);
5856 Check(le, kOperandIsNotAName, t8, Operand(LAST_NAME_TYPE));
5857 }
5858 }
5859
5860
AssertFunction(Register object)5861 void MacroAssembler::AssertFunction(Register object) {
5862 if (emit_debug_code()) {
5863 STATIC_ASSERT(kSmiTag == 0);
5864 SmiTst(object, t8);
5865 Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
5866 GetObjectType(object, t8, t8);
5867 Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
5868 }
5869 }
5870
5871
AssertBoundFunction(Register object)5872 void MacroAssembler::AssertBoundFunction(Register object) {
5873 if (emit_debug_code()) {
5874 STATIC_ASSERT(kSmiTag == 0);
5875 SmiTst(object, t8);
5876 Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
5877 GetObjectType(object, t8, t8);
5878 Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
5879 }
5880 }
5881
5882
AssertUndefinedOrAllocationSite(Register object,Register scratch)5883 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5884 Register scratch) {
5885 if (emit_debug_code()) {
5886 Label done_checking;
5887 AssertNotSmi(object);
5888 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5889 Branch(&done_checking, eq, object, Operand(scratch));
5890 ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
5891 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5892 Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
5893 bind(&done_checking);
5894 }
5895 }
5896
5897
AssertIsRoot(Register reg,Heap::RootListIndex index)5898 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5899 if (emit_debug_code()) {
5900 DCHECK(!reg.is(at));
5901 LoadRoot(at, index);
5902 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5903 }
5904 }
5905
5906
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)5907 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5908 Register heap_number_map,
5909 Register scratch,
5910 Label* on_not_heap_number) {
5911 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5912 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5913 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
5914 }
5915
5916
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5917 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5918 Register first, Register second, Register scratch1, Register scratch2,
5919 Label* failure) {
5920 // Test that both first and second are sequential one-byte strings.
5921 // Assume that they are non-smis.
5922 ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5923 ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5924 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5925 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5926
5927 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5928 scratch2, failure);
5929 }
5930
5931
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5932 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5933 Register second,
5934 Register scratch1,
5935 Register scratch2,
5936 Label* failure) {
5937 // Check that neither is a smi.
5938 STATIC_ASSERT(kSmiTag == 0);
5939 And(scratch1, first, Operand(second));
5940 JumpIfSmi(scratch1, failure);
5941 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5942 scratch2, failure);
5943 }
5944
5945
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5946 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5947 Register first, Register second, Register scratch1, Register scratch2,
5948 Label* failure) {
5949 const int kFlatOneByteStringMask =
5950 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5951 const int kFlatOneByteStringTag =
5952 kStringTag | kOneByteStringTag | kSeqStringTag;
5953 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5954 andi(scratch1, first, kFlatOneByteStringMask);
5955 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
5956 andi(scratch2, second, kFlatOneByteStringMask);
5957 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
5958 }
5959
5960
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)5961 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
5962 Register scratch,
5963 Label* failure) {
5964 const int kFlatOneByteStringMask =
5965 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5966 const int kFlatOneByteStringTag =
5967 kStringTag | kOneByteStringTag | kSeqStringTag;
5968 And(scratch, type, Operand(kFlatOneByteStringMask));
5969 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
5970 }
5971
5972
5973 static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
5974
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)5975 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5976 int num_double_arguments) {
5977 int stack_passed_words = 0;
5978 num_reg_arguments += 2 * num_double_arguments;
5979
5980 // O32: Up to four simple arguments are passed in registers a0..a3.
5981 // N64: Up to eight simple arguments are passed in registers a0..a7.
5982 if (num_reg_arguments > kRegisterPassedArguments) {
5983 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5984 }
5985 stack_passed_words += kCArgSlotCount;
5986 return stack_passed_words;
5987 }
5988
5989
EmitSeqStringSetCharCheck(Register string,Register index,Register value,Register scratch,uint32_t encoding_mask)5990 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5991 Register index,
5992 Register value,
5993 Register scratch,
5994 uint32_t encoding_mask) {
5995 Label is_object;
5996 SmiTst(string, at);
5997 Check(ne, kNonObject, at, Operand(zero_reg));
5998
5999 ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
6000 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
6001
6002 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
6003 li(scratch, Operand(encoding_mask));
6004 Check(eq, kUnexpectedStringType, at, Operand(scratch));
6005
6006 // TODO(plind): requires Smi size check code for mips32.
6007
6008 ld(at, FieldMemOperand(string, String::kLengthOffset));
6009 Check(lt, kIndexIsTooLarge, index, Operand(at));
6010
6011 DCHECK(Smi::FromInt(0) == 0);
6012 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
6013 }
6014
6015
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)6016 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6017 int num_double_arguments,
6018 Register scratch) {
6019 int frame_alignment = ActivationFrameAlignment();
6020
6021 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
6022 // O32: Up to four simple arguments are passed in registers a0..a3.
6023 // Those four arguments must have reserved argument slots on the stack for
6024 // mips, even though those argument slots are not normally used.
6025 // Both ABIs: Remaining arguments are pushed on the stack, above (higher
6026 // address than) the (O32) argument slots. (arg slot calculation handled by
6027 // CalculateStackPassedWords()).
6028 int stack_passed_arguments = CalculateStackPassedWords(
6029 num_reg_arguments, num_double_arguments);
6030 if (frame_alignment > kPointerSize) {
6031 // Make stack end at alignment and make room for num_arguments - 4 words
6032 // and the original value of sp.
6033 mov(scratch, sp);
6034 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
6035 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6036 And(sp, sp, Operand(-frame_alignment));
6037 sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
6038 } else {
6039 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6040 }
6041 }
6042
6043
PrepareCallCFunction(int num_reg_arguments,Register scratch)6044 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6045 Register scratch) {
6046 PrepareCallCFunction(num_reg_arguments, 0, scratch);
6047 }
6048
6049
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)6050 void MacroAssembler::CallCFunction(ExternalReference function,
6051 int num_reg_arguments,
6052 int num_double_arguments) {
6053 li(t8, Operand(function));
6054 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
6055 }
6056
6057
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)6058 void MacroAssembler::CallCFunction(Register function,
6059 int num_reg_arguments,
6060 int num_double_arguments) {
6061 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
6062 }
6063
6064
CallCFunction(ExternalReference function,int num_arguments)6065 void MacroAssembler::CallCFunction(ExternalReference function,
6066 int num_arguments) {
6067 CallCFunction(function, num_arguments, 0);
6068 }
6069
6070
CallCFunction(Register function,int num_arguments)6071 void MacroAssembler::CallCFunction(Register function,
6072 int num_arguments) {
6073 CallCFunction(function, num_arguments, 0);
6074 }
6075
6076
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)6077 void MacroAssembler::CallCFunctionHelper(Register function,
6078 int num_reg_arguments,
6079 int num_double_arguments) {
6080 DCHECK(has_frame());
6081 // Make sure that the stack is aligned before calling a C function unless
6082 // running in the simulator. The simulator has its own alignment check which
6083 // provides more information.
6084 // The argument stots are presumed to have been set up by
6085 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
6086
6087 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
6088 if (emit_debug_code()) {
6089 int frame_alignment = base::OS::ActivationFrameAlignment();
6090 int frame_alignment_mask = frame_alignment - 1;
6091 if (frame_alignment > kPointerSize) {
6092 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6093 Label alignment_as_expected;
6094 And(at, sp, Operand(frame_alignment_mask));
6095 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
6096 // Don't use Check here, as it will call Runtime_Abort possibly
6097 // re-entering here.
6098 stop("Unexpected alignment in CallCFunction");
6099 bind(&alignment_as_expected);
6100 }
6101 }
6102 #endif // V8_HOST_ARCH_MIPS
6103
6104 // Just call directly. The function called cannot cause a GC, or
6105 // allow preemption, so the return address in the link register
6106 // stays correct.
6107
6108 if (!function.is(t9)) {
6109 mov(t9, function);
6110 function = t9;
6111 }
6112
6113 Call(function);
6114
6115 int stack_passed_arguments = CalculateStackPassedWords(
6116 num_reg_arguments, num_double_arguments);
6117
6118 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
6119 ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
6120 } else {
6121 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6122 }
6123 }
6124
6125
6126 #undef BRANCH_ARGS_CHECK
6127
6128
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)6129 void MacroAssembler::CheckPageFlag(
6130 Register object,
6131 Register scratch,
6132 int mask,
6133 Condition cc,
6134 Label* condition_met) {
6135 And(scratch, object, Operand(~Page::kPageAlignmentMask));
6136 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
6137 And(scratch, scratch, Operand(mask));
6138 Branch(condition_met, cc, scratch, Operand(zero_reg));
6139 }
6140
6141
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)6142 void MacroAssembler::JumpIfBlack(Register object,
6143 Register scratch0,
6144 Register scratch1,
6145 Label* on_black) {
6146 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
6147 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6148 }
6149
6150
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)6151 void MacroAssembler::HasColor(Register object,
6152 Register bitmap_scratch,
6153 Register mask_scratch,
6154 Label* has_color,
6155 int first_bit,
6156 int second_bit) {
6157 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
6158 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
6159
6160 GetMarkBits(object, bitmap_scratch, mask_scratch);
6161
6162 Label other_color;
6163 // Note that we are using two 4-byte aligned loads.
6164 LoadWordPair(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6165 And(t8, t9, Operand(mask_scratch));
6166 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
6167 // Shift left 1 by adding.
6168 Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
6169 And(t8, t9, Operand(mask_scratch));
6170 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
6171
6172 bind(&other_color);
6173 }
6174
6175
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)6176 void MacroAssembler::GetMarkBits(Register addr_reg,
6177 Register bitmap_reg,
6178 Register mask_reg) {
6179 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
6180 // addr_reg is divided into fields:
6181 // |63 page base 20|19 high 8|7 shift 3|2 0|
6182 // 'high' gives the index of the cell holding color bits for the object.
6183 // 'shift' gives the offset in the cell for this object's color.
6184 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
6185 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
6186 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
6187 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
6188 dsll(t8, t8, Bitmap::kBytesPerCellLog2);
6189 Daddu(bitmap_reg, bitmap_reg, t8);
6190 li(t8, Operand(1));
6191 dsllv(mask_reg, t8, mask_reg);
6192 }
6193
6194
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)6195 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
6196 Register mask_scratch, Register load_scratch,
6197 Label* value_is_white) {
6198 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
6199 GetMarkBits(value, bitmap_scratch, mask_scratch);
6200
6201 // If the value is black or grey we don't need to do anything.
6202 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
6203 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6204 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
6205 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
6206
6207 // Since both black and grey have a 1 in the first position and white does
6208 // not have a 1 there we only need to check one bit.
6209 // Note that we are using a 4-byte aligned 8-byte load.
6210 if (emit_debug_code()) {
6211 LoadWordPair(load_scratch,
6212 MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6213 } else {
6214 lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6215 }
6216 And(t8, mask_scratch, load_scratch);
6217 Branch(value_is_white, eq, t8, Operand(zero_reg));
6218 }
6219
6220
LoadInstanceDescriptors(Register map,Register descriptors)6221 void MacroAssembler::LoadInstanceDescriptors(Register map,
6222 Register descriptors) {
6223 ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
6224 }
6225
6226
NumberOfOwnDescriptors(Register dst,Register map)6227 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
6228 lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
6229 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
6230 }
6231
6232
EnumLength(Register dst,Register map)6233 void MacroAssembler::EnumLength(Register dst, Register map) {
6234 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
6235 lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
6236 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
6237 SmiTag(dst);
6238 }
6239
6240
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)6241 void MacroAssembler::LoadAccessor(Register dst, Register holder,
6242 int accessor_index,
6243 AccessorComponent accessor) {
6244 ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
6245 LoadInstanceDescriptors(dst, dst);
6246 ld(dst,
6247 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
6248 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
6249 : AccessorPair::kSetterOffset;
6250 ld(dst, FieldMemOperand(dst, offset));
6251 }
6252
6253
CheckEnumCache(Register null_value,Label * call_runtime)6254 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
6255 Register empty_fixed_array_value = a6;
6256 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
6257 Label next, start;
6258 mov(a2, a0);
6259
6260 // Check if the enum length field is properly initialized, indicating that
6261 // there is an enum cache.
6262 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6263
6264 EnumLength(a3, a1);
6265 Branch(
6266 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
6267
6268 jmp(&start);
6269
6270 bind(&next);
6271 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6272
6273 // For all objects but the receiver, check that the cache is empty.
6274 EnumLength(a3, a1);
6275 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
6276
6277 bind(&start);
6278
6279 // Check that there are no elements. Register a2 contains the current JS
6280 // object we've reached through the prototype chain.
6281 Label no_elements;
6282 ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
6283 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
6284
6285 // Second chance, the object may be using the empty slow element dictionary.
6286 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
6287 Branch(call_runtime, ne, a2, Operand(at));
6288
6289 bind(&no_elements);
6290 ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
6291 Branch(&next, ne, a2, Operand(null_value));
6292 }
6293
6294
ClampUint8(Register output_reg,Register input_reg)6295 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
6296 DCHECK(!output_reg.is(input_reg));
6297 Label done;
6298 li(output_reg, Operand(255));
6299 // Normal branch: nop in delay slot.
6300 Branch(&done, gt, input_reg, Operand(output_reg));
6301 // Use delay slot in this branch.
6302 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
6303 mov(output_reg, zero_reg); // In delay slot.
6304 mov(output_reg, input_reg); // Value is in range 0..255.
6305 bind(&done);
6306 }
6307
6308
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister temp_double_reg)6309 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
6310 DoubleRegister input_reg,
6311 DoubleRegister temp_double_reg) {
6312 Label above_zero;
6313 Label done;
6314 Label in_bounds;
6315
6316 Move(temp_double_reg, 0.0);
6317 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
6318
6319 // Double value is less than zero, NaN or Inf, return 0.
6320 mov(result_reg, zero_reg);
6321 Branch(&done);
6322
6323 // Double value is >= 255, return 255.
6324 bind(&above_zero);
6325 Move(temp_double_reg, 255.0);
6326 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
6327 li(result_reg, Operand(255));
6328 Branch(&done);
6329
6330 // In 0-255 range, round and truncate.
6331 bind(&in_bounds);
6332 cvt_w_d(temp_double_reg, input_reg);
6333 mfc1(result_reg, temp_double_reg);
6334 bind(&done);
6335 }
6336
6337
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found,Condition cond,Label * allocation_memento_present)6338 void MacroAssembler::TestJSArrayForAllocationMemento(
6339 Register receiver_reg,
6340 Register scratch_reg,
6341 Label* no_memento_found,
6342 Condition cond,
6343 Label* allocation_memento_present) {
6344 ExternalReference new_space_start =
6345 ExternalReference::new_space_start(isolate());
6346 ExternalReference new_space_allocation_top =
6347 ExternalReference::new_space_allocation_top_address(isolate());
6348 Daddu(scratch_reg, receiver_reg,
6349 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
6350 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
6351 li(at, Operand(new_space_allocation_top));
6352 ld(at, MemOperand(at));
6353 Branch(no_memento_found, gt, scratch_reg, Operand(at));
6354 ld(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
6355 if (allocation_memento_present) {
6356 Branch(allocation_memento_present, cond, scratch_reg,
6357 Operand(isolate()->factory()->allocation_memento_map()));
6358 }
6359 }
6360
6361
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)6362 Register GetRegisterThatIsNotOneOf(Register reg1,
6363 Register reg2,
6364 Register reg3,
6365 Register reg4,
6366 Register reg5,
6367 Register reg6) {
6368 RegList regs = 0;
6369 if (reg1.is_valid()) regs |= reg1.bit();
6370 if (reg2.is_valid()) regs |= reg2.bit();
6371 if (reg3.is_valid()) regs |= reg3.bit();
6372 if (reg4.is_valid()) regs |= reg4.bit();
6373 if (reg5.is_valid()) regs |= reg5.bit();
6374 if (reg6.is_valid()) regs |= reg6.bit();
6375
6376 const RegisterConfiguration* config =
6377 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
6378 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
6379 int code = config->GetAllocatableGeneralCode(i);
6380 Register candidate = Register::from_code(code);
6381 if (regs & candidate.bit()) continue;
6382 return candidate;
6383 }
6384 UNREACHABLE();
6385 return no_reg;
6386 }
6387
6388
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)6389 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
6390 Register object,
6391 Register scratch0,
6392 Register scratch1,
6393 Label* found) {
6394 DCHECK(!scratch1.is(scratch0));
6395 Factory* factory = isolate()->factory();
6396 Register current = scratch0;
6397 Label loop_again, end;
6398
6399 // Scratch contained elements pointer.
6400 Move(current, object);
6401 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
6402 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
6403 Branch(&end, eq, current, Operand(factory->null_value()));
6404
6405 // Loop based on the map going up the prototype chain.
6406 bind(&loop_again);
6407 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
6408 lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
6409 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
6410 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
6411 Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
6412 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
6413 DecodeField<Map::ElementsKindBits>(scratch1);
6414 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
6415 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
6416 Branch(&loop_again, ne, current, Operand(factory->null_value()));
6417
6418 bind(&end);
6419 }
6420
6421
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8,Register reg9,Register reg10)6422 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
6423 Register reg5, Register reg6, Register reg7, Register reg8,
6424 Register reg9, Register reg10) {
6425 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
6426 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
6427 reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
6428 reg10.is_valid();
6429
6430 RegList regs = 0;
6431 if (reg1.is_valid()) regs |= reg1.bit();
6432 if (reg2.is_valid()) regs |= reg2.bit();
6433 if (reg3.is_valid()) regs |= reg3.bit();
6434 if (reg4.is_valid()) regs |= reg4.bit();
6435 if (reg5.is_valid()) regs |= reg5.bit();
6436 if (reg6.is_valid()) regs |= reg6.bit();
6437 if (reg7.is_valid()) regs |= reg7.bit();
6438 if (reg8.is_valid()) regs |= reg8.bit();
6439 if (reg9.is_valid()) regs |= reg9.bit();
6440 if (reg10.is_valid()) regs |= reg10.bit();
6441 int n_of_non_aliasing_regs = NumRegs(regs);
6442
6443 return n_of_valid_regs != n_of_non_aliasing_regs;
6444 }
6445
6446
CodePatcher(Isolate * isolate,byte * address,int instructions,FlushICache flush_cache)6447 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
6448 FlushICache flush_cache)
6449 : address_(address),
6450 size_(instructions * Assembler::kInstrSize),
6451 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
6452 flush_cache_(flush_cache) {
6453 // Create a new macro assembler pointing to the address of the code to patch.
6454 // The size is adjusted with kGap on order for the assembler to generate size
6455 // bytes of instructions without failing with buffer size constraints.
6456 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6457 }
6458
6459
~CodePatcher()6460 CodePatcher::~CodePatcher() {
6461 // Indicate that code has changed.
6462 if (flush_cache_ == FLUSH) {
6463 Assembler::FlushICache(masm_.isolate(), address_, size_);
6464 }
6465 // Check that the code was patched as expected.
6466 DCHECK(masm_.pc_ == address_ + size_);
6467 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6468 }
6469
6470
Emit(Instr instr)6471 void CodePatcher::Emit(Instr instr) {
6472 masm()->emit(instr);
6473 }
6474
6475
Emit(Address addr)6476 void CodePatcher::Emit(Address addr) {
6477 // masm()->emit(reinterpret_cast<Instr>(addr));
6478 }
6479
6480
ChangeBranchCondition(Instr current_instr,uint32_t new_opcode)6481 void CodePatcher::ChangeBranchCondition(Instr current_instr,
6482 uint32_t new_opcode) {
6483 current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
6484 masm_.emit(current_instr);
6485 }
6486
6487
TruncatingDiv(Register result,Register dividend,int32_t divisor)6488 void MacroAssembler::TruncatingDiv(Register result,
6489 Register dividend,
6490 int32_t divisor) {
6491 DCHECK(!dividend.is(result));
6492 DCHECK(!dividend.is(at));
6493 DCHECK(!result.is(at));
6494 base::MagicNumbersForDivision<uint32_t> mag =
6495 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6496 li(at, Operand(static_cast<int32_t>(mag.multiplier)));
6497 Mulh(result, dividend, Operand(at));
6498 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6499 if (divisor > 0 && neg) {
6500 Addu(result, result, Operand(dividend));
6501 }
6502 if (divisor < 0 && !neg && mag.multiplier > 0) {
6503 Subu(result, result, Operand(dividend));
6504 }
6505 if (mag.shift > 0) sra(result, result, mag.shift);
6506 srl(at, dividend, 31);
6507 Addu(result, result, Operand(at));
6508 }
6509
6510
6511 } // namespace internal
6512 } // namespace v8
6513
6514 #endif // V8_TARGET_ARCH_MIPS64
6515