1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34
35 #include "src/mips/assembler-mips.h"
36
37 #if V8_TARGET_ARCH_MIPS
38
39 #include "src/base/bits.h"
40 #include "src/base/cpu.h"
41 #include "src/mips/assembler-mips-inl.h"
42
43 namespace v8 {
44 namespace internal {
45
46 // Get the CPU features enabled by the build. For cross compilation the
47 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
48 // can be defined to enable FPU instructions when building the
49 // snapshot.
CpuFeaturesImpliedByCompiler()50 static unsigned CpuFeaturesImpliedByCompiler() {
51 unsigned answer = 0;
52 #ifdef CAN_USE_FPU_INSTRUCTIONS
53 answer |= 1u << FPU;
54 #endif // def CAN_USE_FPU_INSTRUCTIONS
55
56 // If the compiler is allowed to use FPU then we can use FPU too in our code
57 // generation even when generating snapshots. This won't work for cross
58 // compilation.
59 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
60 answer |= 1u << FPU;
61 #endif
62
63 return answer;
64 }
65
66
ProbeImpl(bool cross_compile)67 void CpuFeatures::ProbeImpl(bool cross_compile) {
68 supported_ |= CpuFeaturesImpliedByCompiler();
69
70 // Only use statically determined features for cross compile (snapshot).
71 if (cross_compile) return;
72
73 // If the compiler is allowed to use fpu then we can use fpu too in our
74 // code generation.
75 #ifndef __mips__
76 // For the simulator build, use FPU.
77 supported_ |= 1u << FPU;
78 #if defined(_MIPS_ARCH_MIPS32R6)
79 // FP64 mode is implied on r6.
80 supported_ |= 1u << FP64FPU;
81 #endif
82 #if defined(FPU_MODE_FP64)
83 supported_ |= 1u << FP64FPU;
84 #endif
85 #else
86 // Probe for additional features at runtime.
87 base::CPU cpu;
88 if (cpu.has_fpu()) supported_ |= 1u << FPU;
89 #if defined(FPU_MODE_FPXX)
90 if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
91 #elif defined(FPU_MODE_FP64)
92 supported_ |= 1u << FP64FPU;
93 #endif
94 #if defined(_MIPS_ARCH_MIPS32RX)
95 if (cpu.architecture() == 6) {
96 supported_ |= 1u << MIPSr6;
97 } else if (cpu.architecture() == 2) {
98 supported_ |= 1u << MIPSr1;
99 supported_ |= 1u << MIPSr2;
100 } else {
101 supported_ |= 1u << MIPSr1;
102 }
103 #endif
104 #endif
105 }
106
107
PrintTarget()108 void CpuFeatures::PrintTarget() { }
PrintFeatures()109 void CpuFeatures::PrintFeatures() { }
110
111
ToNumber(Register reg)112 int ToNumber(Register reg) {
113 DCHECK(reg.is_valid());
114 const int kNumbers[] = {
115 0, // zero_reg
116 1, // at
117 2, // v0
118 3, // v1
119 4, // a0
120 5, // a1
121 6, // a2
122 7, // a3
123 8, // t0
124 9, // t1
125 10, // t2
126 11, // t3
127 12, // t4
128 13, // t5
129 14, // t6
130 15, // t7
131 16, // s0
132 17, // s1
133 18, // s2
134 19, // s3
135 20, // s4
136 21, // s5
137 22, // s6
138 23, // s7
139 24, // t8
140 25, // t9
141 26, // k0
142 27, // k1
143 28, // gp
144 29, // sp
145 30, // fp
146 31, // ra
147 };
148 return kNumbers[reg.code()];
149 }
150
151
ToRegister(int num)152 Register ToRegister(int num) {
153 DCHECK(num >= 0 && num < kNumRegisters);
154 const Register kRegisters[] = {
155 zero_reg,
156 at,
157 v0, v1,
158 a0, a1, a2, a3,
159 t0, t1, t2, t3, t4, t5, t6, t7,
160 s0, s1, s2, s3, s4, s5, s6, s7,
161 t8, t9,
162 k0, k1,
163 gp,
164 sp,
165 fp,
166 ra
167 };
168 return kRegisters[num];
169 }
170
171
172 // -----------------------------------------------------------------------------
173 // Implementation of RelocInfo.
174
175 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
176 1 << RelocInfo::INTERNAL_REFERENCE |
177 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
178
179
IsCodedSpecially()180 bool RelocInfo::IsCodedSpecially() {
181 // The deserializer needs to know whether a pointer is specially coded. Being
182 // specially coded on MIPS means that it is a lui/ori instruction, and that is
183 // always the case inside code objects.
184 return true;
185 }
186
187
IsInConstantPool()188 bool RelocInfo::IsInConstantPool() {
189 return false;
190 }
191
192
193 // -----------------------------------------------------------------------------
194 // Implementation of Operand and MemOperand.
195 // See assembler-mips-inl.h for inlined constructors.
196
Operand(Handle<Object> handle)197 Operand::Operand(Handle<Object> handle) {
198 AllowDeferredHandleDereference using_raw_address;
199 rm_ = no_reg;
200 // Verify all Objects referred by code are NOT in new space.
201 Object* obj = *handle;
202 if (obj->IsHeapObject()) {
203 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
204 imm32_ = reinterpret_cast<intptr_t>(handle.location());
205 rmode_ = RelocInfo::EMBEDDED_OBJECT;
206 } else {
207 // No relocation needed.
208 imm32_ = reinterpret_cast<intptr_t>(obj);
209 rmode_ = RelocInfo::NONE32;
210 }
211 }
212
213
MemOperand(Register rm,int32_t offset)214 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
215 offset_ = offset;
216 }
217
218
MemOperand(Register rm,int32_t unit,int32_t multiplier,OffsetAddend offset_addend)219 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
220 OffsetAddend offset_addend) : Operand(rm) {
221 offset_ = unit * multiplier + offset_addend;
222 }
223
224
225 // -----------------------------------------------------------------------------
226 // Specific instructions, constants, and masks.
227
228 static const int kNegOffset = 0x00008000;
229 // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
230 // operations as post-increment of sp.
231 const Instr kPopInstruction = ADDIU | (Register::kCode_sp << kRsShift) |
232 (Register::kCode_sp << kRtShift) |
233 (kPointerSize & kImm16Mask); // NOLINT
234 // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
235 const Instr kPushInstruction = ADDIU | (Register::kCode_sp << kRsShift) |
236 (Register::kCode_sp << kRtShift) |
237 (-kPointerSize & kImm16Mask); // NOLINT
238 // sw(r, MemOperand(sp, 0))
239 const Instr kPushRegPattern =
240 SW | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
241 // lw(r, MemOperand(sp, 0))
242 const Instr kPopRegPattern =
243 LW | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
244
245 const Instr kLwRegFpOffsetPattern =
246 LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
247
248 const Instr kSwRegFpOffsetPattern =
249 SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
250
251 const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
252 (kNegOffset & kImm16Mask); // NOLINT
253
254 const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
255 (kNegOffset & kImm16Mask); // NOLINT
256 // A mask for the Rt register for push, pop, lw, sw instructions.
257 const Instr kRtMask = kRtFieldMask;
258 const Instr kLwSwInstrTypeMask = 0xffe00000;
259 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
260 const Instr kLwSwOffsetMask = kImm16Mask;
261
262
Assembler(Isolate * isolate,void * buffer,int buffer_size)263 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
264 : AssemblerBase(isolate, buffer, buffer_size),
265 recorded_ast_id_(TypeFeedbackId::None()),
266 positions_recorder_(this) {
267 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
268
269 last_trampoline_pool_end_ = 0;
270 no_trampoline_pool_before_ = 0;
271 trampoline_pool_blocked_nesting_ = 0;
272 // We leave space (16 * kTrampolineSlotsSize)
273 // for BlockTrampolinePoolScope buffer.
274 next_buffer_check_ = FLAG_force_long_branches
275 ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
276 internal_trampoline_exception_ = false;
277 last_bound_pos_ = 0;
278
279 trampoline_emitted_ = FLAG_force_long_branches;
280 unbound_labels_count_ = 0;
281 block_buffer_growth_ = false;
282
283 ClearRecordedAstId();
284 }
285
286
GetCode(CodeDesc * desc)287 void Assembler::GetCode(CodeDesc* desc) {
288 if (IsPrevInstrCompactBranch()) {
289 nop();
290 ClearCompactBranchState();
291 }
292 DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
293 // Set up code descriptor.
294 desc->buffer = buffer_;
295 desc->buffer_size = buffer_size_;
296 desc->instr_size = pc_offset();
297 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
298 desc->origin = this;
299 desc->constant_pool_size = 0;
300 }
301
302
Align(int m)303 void Assembler::Align(int m) {
304 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
305 if (IsPrevInstrCompactBranch()) {
306 nop();
307 ClearCompactBranchState();
308 }
309 while ((pc_offset() & (m - 1)) != 0) {
310 nop();
311 }
312 }
313
314
CodeTargetAlign()315 void Assembler::CodeTargetAlign() {
316 // No advantage to aligning branch/call targets to more than
317 // single instruction, that I am aware of.
318 Align(4);
319 }
320
321
GetRtReg(Instr instr)322 Register Assembler::GetRtReg(Instr instr) {
323 Register rt;
324 rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
325 return rt;
326 }
327
328
GetRsReg(Instr instr)329 Register Assembler::GetRsReg(Instr instr) {
330 Register rs;
331 rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
332 return rs;
333 }
334
335
GetRdReg(Instr instr)336 Register Assembler::GetRdReg(Instr instr) {
337 Register rd;
338 rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
339 return rd;
340 }
341
342
GetRt(Instr instr)343 uint32_t Assembler::GetRt(Instr instr) {
344 return (instr & kRtFieldMask) >> kRtShift;
345 }
346
347
GetRtField(Instr instr)348 uint32_t Assembler::GetRtField(Instr instr) {
349 return instr & kRtFieldMask;
350 }
351
352
GetRs(Instr instr)353 uint32_t Assembler::GetRs(Instr instr) {
354 return (instr & kRsFieldMask) >> kRsShift;
355 }
356
357
GetRsField(Instr instr)358 uint32_t Assembler::GetRsField(Instr instr) {
359 return instr & kRsFieldMask;
360 }
361
362
GetRd(Instr instr)363 uint32_t Assembler::GetRd(Instr instr) {
364 return (instr & kRdFieldMask) >> kRdShift;
365 }
366
367
GetRdField(Instr instr)368 uint32_t Assembler::GetRdField(Instr instr) {
369 return instr & kRdFieldMask;
370 }
371
372
GetSa(Instr instr)373 uint32_t Assembler::GetSa(Instr instr) {
374 return (instr & kSaFieldMask) >> kSaShift;
375 }
376
377
GetSaField(Instr instr)378 uint32_t Assembler::GetSaField(Instr instr) {
379 return instr & kSaFieldMask;
380 }
381
382
GetOpcodeField(Instr instr)383 uint32_t Assembler::GetOpcodeField(Instr instr) {
384 return instr & kOpcodeMask;
385 }
386
387
GetFunction(Instr instr)388 uint32_t Assembler::GetFunction(Instr instr) {
389 return (instr & kFunctionFieldMask) >> kFunctionShift;
390 }
391
392
GetFunctionField(Instr instr)393 uint32_t Assembler::GetFunctionField(Instr instr) {
394 return instr & kFunctionFieldMask;
395 }
396
397
GetImmediate16(Instr instr)398 uint32_t Assembler::GetImmediate16(Instr instr) {
399 return instr & kImm16Mask;
400 }
401
402
GetLabelConst(Instr instr)403 uint32_t Assembler::GetLabelConst(Instr instr) {
404 return instr & ~kImm16Mask;
405 }
406
407
IsPop(Instr instr)408 bool Assembler::IsPop(Instr instr) {
409 return (instr & ~kRtMask) == kPopRegPattern;
410 }
411
412
IsPush(Instr instr)413 bool Assembler::IsPush(Instr instr) {
414 return (instr & ~kRtMask) == kPushRegPattern;
415 }
416
417
IsSwRegFpOffset(Instr instr)418 bool Assembler::IsSwRegFpOffset(Instr instr) {
419 return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
420 }
421
422
IsLwRegFpOffset(Instr instr)423 bool Assembler::IsLwRegFpOffset(Instr instr) {
424 return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
425 }
426
427
IsSwRegFpNegOffset(Instr instr)428 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
429 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
430 kSwRegFpNegOffsetPattern);
431 }
432
433
IsLwRegFpNegOffset(Instr instr)434 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
435 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
436 kLwRegFpNegOffsetPattern);
437 }
438
439
440 // Labels refer to positions in the (to be) generated code.
441 // There are bound, linked, and unused labels.
442 //
443 // Bound labels refer to known positions in the already
444 // generated code. pos() is the position the label refers to.
445 //
446 // Linked labels refer to unknown positions in the code
447 // to be generated; pos() is the position of the last
448 // instruction using the label.
449
450 // The link chain is terminated by a value in the instruction of -1,
451 // which is an otherwise illegal value (branch -1 is inf loop).
452 // The instruction 16-bit offset field addresses 32-bit words, but in
453 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
454
455 const int kEndOfChain = -4;
456 // Determines the end of the Jump chain (a subset of the label link chain).
457 const int kEndOfJumpChain = 0;
458
459
IsBranch(Instr instr)460 bool Assembler::IsBranch(Instr instr) {
461 uint32_t opcode = GetOpcodeField(instr);
462 uint32_t rt_field = GetRtField(instr);
463 uint32_t rs_field = GetRsField(instr);
464 // Checks if the instruction is a branch.
465 bool isBranch =
466 opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
467 opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
468 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
469 rt_field == BLTZAL || rt_field == BGEZAL)) ||
470 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
471 (opcode == COP1 && rs_field == BC1EQZ) ||
472 (opcode == COP1 && rs_field == BC1NEZ);
473 if (!isBranch && IsMipsArchVariant(kMips32r6)) {
474 // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
475 // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
476 isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
477 opcode == BALC ||
478 (opcode == POP66 && rs_field != 0) || // BEQZC
479 (opcode == POP76 && rs_field != 0); // BNEZC
480 }
481 return isBranch;
482 }
483
484
IsBc(Instr instr)485 bool Assembler::IsBc(Instr instr) {
486 uint32_t opcode = GetOpcodeField(instr);
487 // Checks if the instruction is a BC or BALC.
488 return opcode == BC || opcode == BALC;
489 }
490
491
IsBzc(Instr instr)492 bool Assembler::IsBzc(Instr instr) {
493 uint32_t opcode = GetOpcodeField(instr);
494 // Checks if the instruction is BEQZC or BNEZC.
495 return (opcode == POP66 && GetRsField(instr) != 0) ||
496 (opcode == POP76 && GetRsField(instr) != 0);
497 }
498
499
IsEmittedConstant(Instr instr)500 bool Assembler::IsEmittedConstant(Instr instr) {
501 uint32_t label_constant = GetLabelConst(instr);
502 return label_constant == 0; // Emitted label const in reg-exp engine.
503 }
504
505
IsBeq(Instr instr)506 bool Assembler::IsBeq(Instr instr) {
507 return GetOpcodeField(instr) == BEQ;
508 }
509
510
IsBne(Instr instr)511 bool Assembler::IsBne(Instr instr) {
512 return GetOpcodeField(instr) == BNE;
513 }
514
515
IsBeqzc(Instr instr)516 bool Assembler::IsBeqzc(Instr instr) {
517 uint32_t opcode = GetOpcodeField(instr);
518 return opcode == POP66 && GetRsField(instr) != 0;
519 }
520
521
IsBnezc(Instr instr)522 bool Assembler::IsBnezc(Instr instr) {
523 uint32_t opcode = GetOpcodeField(instr);
524 return opcode == POP76 && GetRsField(instr) != 0;
525 }
526
527
IsBeqc(Instr instr)528 bool Assembler::IsBeqc(Instr instr) {
529 uint32_t opcode = GetOpcodeField(instr);
530 uint32_t rs = GetRsField(instr);
531 uint32_t rt = GetRtField(instr);
532 return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
533 }
534
535
IsBnec(Instr instr)536 bool Assembler::IsBnec(Instr instr) {
537 uint32_t opcode = GetOpcodeField(instr);
538 uint32_t rs = GetRsField(instr);
539 uint32_t rt = GetRtField(instr);
540 return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
541 }
542
543
IsJump(Instr instr)544 bool Assembler::IsJump(Instr instr) {
545 uint32_t opcode = GetOpcodeField(instr);
546 uint32_t rt_field = GetRtField(instr);
547 uint32_t rd_field = GetRdField(instr);
548 uint32_t function_field = GetFunctionField(instr);
549 // Checks if the instruction is a jump.
550 return opcode == J || opcode == JAL ||
551 (opcode == SPECIAL && rt_field == 0 &&
552 ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
553 }
554
555
IsJ(Instr instr)556 bool Assembler::IsJ(Instr instr) {
557 uint32_t opcode = GetOpcodeField(instr);
558 // Checks if the instruction is a jump.
559 return opcode == J;
560 }
561
562
IsJal(Instr instr)563 bool Assembler::IsJal(Instr instr) {
564 return GetOpcodeField(instr) == JAL;
565 }
566
567
IsJr(Instr instr)568 bool Assembler::IsJr(Instr instr) {
569 if (!IsMipsArchVariant(kMips32r6)) {
570 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
571 } else {
572 return GetOpcodeField(instr) == SPECIAL &&
573 GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
574 }
575 }
576
577
IsJalr(Instr instr)578 bool Assembler::IsJalr(Instr instr) {
579 return GetOpcodeField(instr) == SPECIAL &&
580 GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
581 }
582
583
IsLui(Instr instr)584 bool Assembler::IsLui(Instr instr) {
585 uint32_t opcode = GetOpcodeField(instr);
586 // Checks if the instruction is a load upper immediate.
587 return opcode == LUI;
588 }
589
590
IsOri(Instr instr)591 bool Assembler::IsOri(Instr instr) {
592 uint32_t opcode = GetOpcodeField(instr);
593 // Checks if the instruction is a load upper immediate.
594 return opcode == ORI;
595 }
596
597
IsNop(Instr instr,unsigned int type)598 bool Assembler::IsNop(Instr instr, unsigned int type) {
599 // See Assembler::nop(type).
600 DCHECK(type < 32);
601 uint32_t opcode = GetOpcodeField(instr);
602 uint32_t function = GetFunctionField(instr);
603 uint32_t rt = GetRt(instr);
604 uint32_t rd = GetRd(instr);
605 uint32_t sa = GetSa(instr);
606
607 // Traditional mips nop == sll(zero_reg, zero_reg, 0)
608 // When marking non-zero type, use sll(zero_reg, at, type)
609 // to avoid use of mips ssnop and ehb special encodings
610 // of the sll instruction.
611
612 Register nop_rt_reg = (type == 0) ? zero_reg : at;
613 bool ret = (opcode == SPECIAL && function == SLL &&
614 rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
615 rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
616 sa == type);
617
618 return ret;
619 }
620
621
GetBranchOffset(Instr instr)622 int32_t Assembler::GetBranchOffset(Instr instr) {
623 DCHECK(IsBranch(instr));
624 return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
625 }
626
627
IsLw(Instr instr)628 bool Assembler::IsLw(Instr instr) {
629 return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
630 }
631
632
GetLwOffset(Instr instr)633 int16_t Assembler::GetLwOffset(Instr instr) {
634 DCHECK(IsLw(instr));
635 return ((instr & kImm16Mask));
636 }
637
638
SetLwOffset(Instr instr,int16_t offset)639 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
640 DCHECK(IsLw(instr));
641
642 // We actually create a new lw instruction based on the original one.
643 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
644 | (offset & kImm16Mask);
645
646 return temp_instr;
647 }
648
649
IsSw(Instr instr)650 bool Assembler::IsSw(Instr instr) {
651 return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
652 }
653
654
SetSwOffset(Instr instr,int16_t offset)655 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
656 DCHECK(IsSw(instr));
657 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
658 }
659
660
IsAddImmediate(Instr instr)661 bool Assembler::IsAddImmediate(Instr instr) {
662 return ((instr & kOpcodeMask) == ADDIU);
663 }
664
665
SetAddImmediateOffset(Instr instr,int16_t offset)666 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
667 DCHECK(IsAddImmediate(instr));
668 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
669 }
670
671
IsAndImmediate(Instr instr)672 bool Assembler::IsAndImmediate(Instr instr) {
673 return GetOpcodeField(instr) == ANDI;
674 }
675
676
OffsetSizeInBits(Instr instr)677 static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
678 if (IsMipsArchVariant(kMips32r6)) {
679 if (Assembler::IsBc(instr)) {
680 return Assembler::OffsetSize::kOffset26;
681 } else if (Assembler::IsBzc(instr)) {
682 return Assembler::OffsetSize::kOffset21;
683 }
684 }
685 return Assembler::OffsetSize::kOffset16;
686 }
687
688
AddBranchOffset(int pos,Instr instr)689 static inline int32_t AddBranchOffset(int pos, Instr instr) {
690 int bits = OffsetSizeInBits(instr);
691 const int32_t mask = (1 << bits) - 1;
692 bits = 32 - bits;
693
694 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
695 // the compiler uses arithmetic shifts for signed integers.
696 int32_t imm = ((instr & mask) << bits) >> (bits - 2);
697
698 if (imm == kEndOfChain) {
699 // EndOfChain sentinel is returned directly, not relative to pc or pos.
700 return kEndOfChain;
701 } else {
702 return pos + Assembler::kBranchPCOffset + imm;
703 }
704 }
705
706
target_at(int pos,bool is_internal)707 int Assembler::target_at(int pos, bool is_internal) {
708 Instr instr = instr_at(pos);
709 if (is_internal) {
710 if (instr == 0) {
711 return kEndOfChain;
712 } else {
713 int32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
714 int delta = static_cast<int>(instr_address - instr);
715 DCHECK(pos > delta);
716 return pos - delta;
717 }
718 }
719 if ((instr & ~kImm16Mask) == 0) {
720 // Emitted label constant, not part of a branch.
721 if (instr == 0) {
722 return kEndOfChain;
723 } else {
724 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
725 return (imm18 + pos);
726 }
727 }
728 // Check we have a branch or jump instruction.
729 DCHECK(IsBranch(instr) || IsLui(instr));
730 if (IsBranch(instr)) {
731 return AddBranchOffset(pos, instr);
732 } else {
733 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
734 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
735 DCHECK(IsOri(instr_ori));
736 int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
737 imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
738
739 if (imm == kEndOfJumpChain) {
740 // EndOfChain sentinel is returned directly, not relative to pc or pos.
741 return kEndOfChain;
742 } else {
743 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
744 int32_t delta = instr_address - imm;
745 DCHECK(pos > delta);
746 return pos - delta;
747 }
748 }
749 return 0;
750 }
751
752
SetBranchOffset(int32_t pos,int32_t target_pos,Instr instr)753 static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
754 Instr instr) {
755 int32_t bits = OffsetSizeInBits(instr);
756 int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
757 DCHECK((imm & 3) == 0);
758 imm >>= 2;
759
760 const int32_t mask = (1 << bits) - 1;
761 instr &= ~mask;
762 DCHECK(is_intn(imm, bits));
763
764 return instr | (imm & mask);
765 }
766
767
target_at_put(int32_t pos,int32_t target_pos,bool is_internal)768 void Assembler::target_at_put(int32_t pos, int32_t target_pos,
769 bool is_internal) {
770 Instr instr = instr_at(pos);
771
772 if (is_internal) {
773 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
774 instr_at_put(pos, imm);
775 return;
776 }
777 if ((instr & ~kImm16Mask) == 0) {
778 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
779 // Emitted label constant, not part of a branch.
780 // Make label relative to Code* of generated Code object.
781 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
782 return;
783 }
784
785 DCHECK(IsBranch(instr) || IsLui(instr));
786 if (IsBranch(instr)) {
787 instr = SetBranchOffset(pos, target_pos, instr);
788 instr_at_put(pos, instr);
789 } else {
790 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
791 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
792 DCHECK(IsOri(instr_ori));
793 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
794 DCHECK((imm & 3) == 0);
795
796 instr_lui &= ~kImm16Mask;
797 instr_ori &= ~kImm16Mask;
798
799 instr_at_put(pos + 0 * Assembler::kInstrSize,
800 instr_lui | ((imm & kHiMask) >> kLuiShift));
801 instr_at_put(pos + 1 * Assembler::kInstrSize,
802 instr_ori | (imm & kImm16Mask));
803 }
804 }
805
806
print(Label * L)807 void Assembler::print(Label* L) {
808 if (L->is_unused()) {
809 PrintF("unused label\n");
810 } else if (L->is_bound()) {
811 PrintF("bound label to %d\n", L->pos());
812 } else if (L->is_linked()) {
813 Label l = *L;
814 PrintF("unbound label");
815 while (l.is_linked()) {
816 PrintF("@ %d ", l.pos());
817 Instr instr = instr_at(l.pos());
818 if ((instr & ~kImm16Mask) == 0) {
819 PrintF("value\n");
820 } else {
821 PrintF("%d\n", instr);
822 }
823 next(&l, internal_reference_positions_.find(l.pos()) !=
824 internal_reference_positions_.end());
825 }
826 } else {
827 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
828 }
829 }
830
831
bind_to(Label * L,int pos)832 void Assembler::bind_to(Label* L, int pos) {
833 DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
834 int32_t trampoline_pos = kInvalidSlotPos;
835 bool is_internal = false;
836 if (L->is_linked() && !trampoline_emitted_) {
837 unbound_labels_count_--;
838 next_buffer_check_ += kTrampolineSlotsSize;
839 }
840
841 while (L->is_linked()) {
842 int32_t fixup_pos = L->pos();
843 int32_t dist = pos - fixup_pos;
844 is_internal = internal_reference_positions_.find(fixup_pos) !=
845 internal_reference_positions_.end();
846 next(L, is_internal); // Call next before overwriting link with target at
847 // fixup_pos.
848 Instr instr = instr_at(fixup_pos);
849 if (is_internal) {
850 target_at_put(fixup_pos, pos, is_internal);
851 } else {
852 if (IsBranch(instr)) {
853 int branch_offset = BranchOffset(instr);
854 if (dist > branch_offset) {
855 if (trampoline_pos == kInvalidSlotPos) {
856 trampoline_pos = get_trampoline_entry(fixup_pos);
857 CHECK(trampoline_pos != kInvalidSlotPos);
858 }
859 CHECK((trampoline_pos - fixup_pos) <= branch_offset);
860 target_at_put(fixup_pos, trampoline_pos, false);
861 fixup_pos = trampoline_pos;
862 dist = pos - fixup_pos;
863 }
864 target_at_put(fixup_pos, pos, false);
865 } else {
866 target_at_put(fixup_pos, pos, false);
867 }
868 }
869 }
870 L->bind_to(pos);
871
872 // Keep track of the last bound label so we don't eliminate any instructions
873 // before a bound label.
874 if (pos > last_bound_pos_)
875 last_bound_pos_ = pos;
876 }
877
878
bind(Label * L)879 void Assembler::bind(Label* L) {
880 DCHECK(!L->is_bound()); // Label can only be bound once.
881 bind_to(L, pc_offset());
882 }
883
884
next(Label * L,bool is_internal)885 void Assembler::next(Label* L, bool is_internal) {
886 DCHECK(L->is_linked());
887 int link = target_at(L->pos(), is_internal);
888 if (link == kEndOfChain) {
889 L->Unuse();
890 } else {
891 DCHECK(link >= 0);
892 L->link_to(link);
893 }
894 }
895
896
is_near(Label * L)897 bool Assembler::is_near(Label* L) {
898 DCHECK(L->is_bound());
899 return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
900 }
901
902
is_near(Label * L,OffsetSize bits)903 bool Assembler::is_near(Label* L, OffsetSize bits) {
904 if (L == nullptr || !L->is_bound()) return true;
905 return pc_offset() - L->pos() < (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize;
906 }
907
908
is_near_branch(Label * L)909 bool Assembler::is_near_branch(Label* L) {
910 DCHECK(L->is_bound());
911 return IsMipsArchVariant(kMips32r6) ? is_near_r6(L) : is_near_pre_r6(L);
912 }
913
914
BranchOffset(Instr instr)915 int Assembler::BranchOffset(Instr instr) {
916 // At pre-R6 and for other R6 branches the offset is 16 bits.
917 int bits = OffsetSize::kOffset16;
918
919 if (IsMipsArchVariant(kMips32r6)) {
920 uint32_t opcode = GetOpcodeField(instr);
921 switch (opcode) {
922 // Checks BC or BALC.
923 case BC:
924 case BALC:
925 bits = OffsetSize::kOffset26;
926 break;
927
928 // Checks BEQZC or BNEZC.
929 case POP66:
930 case POP76:
931 if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
932 break;
933 default:
934 break;
935 }
936 }
937
938 return (1 << (bits + 2 - 1)) - 1;
939 }
940
941
942 // We have to use a temporary register for things that can be relocated even
943 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
944 // space. There is no guarantee that the relocated location can be similarly
945 // encoded.
MustUseReg(RelocInfo::Mode rmode)946 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
947 return !RelocInfo::IsNone(rmode);
948 }
949
GenInstrRegister(Opcode opcode,Register rs,Register rt,Register rd,uint16_t sa,SecondaryField func)950 void Assembler::GenInstrRegister(Opcode opcode,
951 Register rs,
952 Register rt,
953 Register rd,
954 uint16_t sa,
955 SecondaryField func) {
956 DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
957 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
958 | (rd.code() << kRdShift) | (sa << kSaShift) | func;
959 emit(instr);
960 }
961
962
GenInstrRegister(Opcode opcode,Register rs,Register rt,uint16_t msb,uint16_t lsb,SecondaryField func)963 void Assembler::GenInstrRegister(Opcode opcode,
964 Register rs,
965 Register rt,
966 uint16_t msb,
967 uint16_t lsb,
968 SecondaryField func) {
969 DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
970 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
971 | (msb << kRdShift) | (lsb << kSaShift) | func;
972 emit(instr);
973 }
974
975
GenInstrRegister(Opcode opcode,SecondaryField fmt,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)976 void Assembler::GenInstrRegister(Opcode opcode,
977 SecondaryField fmt,
978 FPURegister ft,
979 FPURegister fs,
980 FPURegister fd,
981 SecondaryField func) {
982 DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
983 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
984 | (fd.code() << kFdShift) | func;
985 emit(instr);
986 }
987
988
GenInstrRegister(Opcode opcode,FPURegister fr,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)989 void Assembler::GenInstrRegister(Opcode opcode,
990 FPURegister fr,
991 FPURegister ft,
992 FPURegister fs,
993 FPURegister fd,
994 SecondaryField func) {
995 DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
996 Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
997 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
998 emit(instr);
999 }
1000
1001
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPURegister fs,FPURegister fd,SecondaryField func)1002 void Assembler::GenInstrRegister(Opcode opcode,
1003 SecondaryField fmt,
1004 Register rt,
1005 FPURegister fs,
1006 FPURegister fd,
1007 SecondaryField func) {
1008 DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1009 Instr instr = opcode | fmt | (rt.code() << kRtShift)
1010 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1011 emit(instr);
1012 }
1013
1014
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPUControlRegister fs,SecondaryField func)1015 void Assembler::GenInstrRegister(Opcode opcode,
1016 SecondaryField fmt,
1017 Register rt,
1018 FPUControlRegister fs,
1019 SecondaryField func) {
1020 DCHECK(fs.is_valid() && rt.is_valid());
1021 Instr instr =
1022 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1023 emit(instr);
1024 }
1025
1026
1027 // Instructions with immediate value.
1028 // Registers are in the order of the instruction encoding, from left to right.
GenInstrImmediate(Opcode opcode,Register rs,Register rt,int32_t j,CompactBranchType is_compact_branch)1029 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1030 int32_t j,
1031 CompactBranchType is_compact_branch) {
1032 DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1033 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1034 | (j & kImm16Mask);
1035 emit(instr, is_compact_branch);
1036 }
1037
1038
GenInstrImmediate(Opcode opcode,Register rs,SecondaryField SF,int32_t j,CompactBranchType is_compact_branch)1039 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1040 int32_t j,
1041 CompactBranchType is_compact_branch) {
1042 DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1043 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
1044 emit(instr, is_compact_branch);
1045 }
1046
1047
GenInstrImmediate(Opcode opcode,Register rs,FPURegister ft,int32_t j,CompactBranchType is_compact_branch)1048 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1049 int32_t j,
1050 CompactBranchType is_compact_branch) {
1051 DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1052 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
1053 | (j & kImm16Mask);
1054 emit(instr, is_compact_branch);
1055 }
1056
1057
GenInstrImmediate(Opcode opcode,Register rs,int32_t offset21,CompactBranchType is_compact_branch)1058 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1059 CompactBranchType is_compact_branch) {
1060 DCHECK(rs.is_valid() && (is_int21(offset21)));
1061 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1062 emit(instr, is_compact_branch);
1063 }
1064
1065
GenInstrImmediate(Opcode opcode,Register rs,uint32_t offset21)1066 void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1067 uint32_t offset21) {
1068 DCHECK(rs.is_valid() && (is_uint21(offset21)));
1069 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1070 emit(instr);
1071 }
1072
1073
GenInstrImmediate(Opcode opcode,int32_t offset26,CompactBranchType is_compact_branch)1074 void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1075 CompactBranchType is_compact_branch) {
1076 DCHECK(is_int26(offset26));
1077 Instr instr = opcode | (offset26 & kImm26Mask);
1078 emit(instr, is_compact_branch);
1079 }
1080
1081
GenInstrJump(Opcode opcode,uint32_t address)1082 void Assembler::GenInstrJump(Opcode opcode,
1083 uint32_t address) {
1084 BlockTrampolinePoolScope block_trampoline_pool(this);
1085 DCHECK(is_uint26(address));
1086 Instr instr = opcode | address;
1087 emit(instr);
1088 BlockTrampolinePoolFor(1); // For associated delay slot.
1089 }
1090
1091
1092 // Returns the next free trampoline entry.
get_trampoline_entry(int32_t pos)1093 int32_t Assembler::get_trampoline_entry(int32_t pos) {
1094 int32_t trampoline_entry = kInvalidSlotPos;
1095
1096 if (!internal_trampoline_exception_) {
1097 if (trampoline_.start() > pos) {
1098 trampoline_entry = trampoline_.take_slot();
1099 }
1100
1101 if (kInvalidSlotPos == trampoline_entry) {
1102 internal_trampoline_exception_ = true;
1103 }
1104 }
1105 return trampoline_entry;
1106 }
1107
1108
jump_address(Label * L)1109 uint32_t Assembler::jump_address(Label* L) {
1110 int32_t target_pos;
1111
1112 if (L->is_bound()) {
1113 target_pos = L->pos();
1114 } else {
1115 if (L->is_linked()) {
1116 target_pos = L->pos(); // L's link.
1117 L->link_to(pc_offset());
1118 } else {
1119 L->link_to(pc_offset());
1120 return kEndOfJumpChain;
1121 }
1122 }
1123
1124 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
1125 DCHECK((imm & 3) == 0);
1126
1127 return imm;
1128 }
1129
1130
branch_offset_helper(Label * L,OffsetSize bits)1131 int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
1132 int32_t target_pos;
1133 int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1134
1135 if (L->is_bound()) {
1136 target_pos = L->pos();
1137 } else {
1138 if (L->is_linked()) {
1139 target_pos = L->pos();
1140 L->link_to(pc_offset() + pad);
1141 } else {
1142 L->link_to(pc_offset() + pad);
1143 if (!trampoline_emitted_) {
1144 unbound_labels_count_++;
1145 next_buffer_check_ -= kTrampolineSlotsSize;
1146 }
1147 return kEndOfChain;
1148 }
1149 }
1150
1151 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1152 DCHECK(is_intn(offset, bits + 2));
1153 DCHECK((offset & 3) == 0);
1154
1155 return offset;
1156 }
1157
1158
label_at_put(Label * L,int at_offset)1159 void Assembler::label_at_put(Label* L, int at_offset) {
1160 int target_pos;
1161 if (L->is_bound()) {
1162 target_pos = L->pos();
1163 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1164 } else {
1165 if (L->is_linked()) {
1166 target_pos = L->pos(); // L's link.
1167 int32_t imm18 = target_pos - at_offset;
1168 DCHECK((imm18 & 3) == 0);
1169 int32_t imm16 = imm18 >> 2;
1170 DCHECK(is_int16(imm16));
1171 instr_at_put(at_offset, (imm16 & kImm16Mask));
1172 } else {
1173 target_pos = kEndOfChain;
1174 instr_at_put(at_offset, 0);
1175 if (!trampoline_emitted_) {
1176 unbound_labels_count_++;
1177 next_buffer_check_ -= kTrampolineSlotsSize;
1178 }
1179 }
1180 L->link_to(at_offset);
1181 }
1182 }
1183
1184
1185 //------- Branch and jump instructions --------
1186
b(int16_t offset)1187 void Assembler::b(int16_t offset) {
1188 beq(zero_reg, zero_reg, offset);
1189 }
1190
1191
bal(int16_t offset)1192 void Assembler::bal(int16_t offset) {
1193 positions_recorder()->WriteRecordedPositions();
1194 bgezal(zero_reg, offset);
1195 }
1196
1197
bc(int32_t offset)1198 void Assembler::bc(int32_t offset) {
1199 DCHECK(IsMipsArchVariant(kMips32r6));
1200 GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1201 }
1202
1203
balc(int32_t offset)1204 void Assembler::balc(int32_t offset) {
1205 DCHECK(IsMipsArchVariant(kMips32r6));
1206 positions_recorder()->WriteRecordedPositions();
1207 GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1208 }
1209
1210
beq(Register rs,Register rt,int16_t offset)1211 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1212 BlockTrampolinePoolScope block_trampoline_pool(this);
1213 GenInstrImmediate(BEQ, rs, rt, offset);
1214 BlockTrampolinePoolFor(1); // For associated delay slot.
1215 }
1216
1217
bgez(Register rs,int16_t offset)1218 void Assembler::bgez(Register rs, int16_t offset) {
1219 BlockTrampolinePoolScope block_trampoline_pool(this);
1220 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1221 BlockTrampolinePoolFor(1); // For associated delay slot.
1222 }
1223
1224
bgezc(Register rt,int16_t offset)1225 void Assembler::bgezc(Register rt, int16_t offset) {
1226 DCHECK(IsMipsArchVariant(kMips32r6));
1227 DCHECK(!(rt.is(zero_reg)));
1228 GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1229 }
1230
1231
bgeuc(Register rs,Register rt,int16_t offset)1232 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1233 DCHECK(IsMipsArchVariant(kMips32r6));
1234 DCHECK(!(rs.is(zero_reg)));
1235 DCHECK(!(rt.is(zero_reg)));
1236 DCHECK(rs.code() != rt.code());
1237 GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1238 }
1239
1240
bgec(Register rs,Register rt,int16_t offset)1241 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1242 DCHECK(IsMipsArchVariant(kMips32r6));
1243 DCHECK(!(rs.is(zero_reg)));
1244 DCHECK(!(rt.is(zero_reg)));
1245 DCHECK(rs.code() != rt.code());
1246 GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1247 }
1248
1249
bgezal(Register rs,int16_t offset)1250 void Assembler::bgezal(Register rs, int16_t offset) {
1251 DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
1252 BlockTrampolinePoolScope block_trampoline_pool(this);
1253 positions_recorder()->WriteRecordedPositions();
1254 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1255 BlockTrampolinePoolFor(1); // For associated delay slot.
1256 }
1257
1258
bgtz(Register rs,int16_t offset)1259 void Assembler::bgtz(Register rs, int16_t offset) {
1260 BlockTrampolinePoolScope block_trampoline_pool(this);
1261 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1262 BlockTrampolinePoolFor(1); // For associated delay slot.
1263 }
1264
1265
bgtzc(Register rt,int16_t offset)1266 void Assembler::bgtzc(Register rt, int16_t offset) {
1267 DCHECK(IsMipsArchVariant(kMips32r6));
1268 DCHECK(!(rt.is(zero_reg)));
1269 GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1270 CompactBranchType::COMPACT_BRANCH);
1271 }
1272
1273
blez(Register rs,int16_t offset)1274 void Assembler::blez(Register rs, int16_t offset) {
1275 BlockTrampolinePoolScope block_trampoline_pool(this);
1276 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1277 BlockTrampolinePoolFor(1); // For associated delay slot.
1278 }
1279
1280
blezc(Register rt,int16_t offset)1281 void Assembler::blezc(Register rt, int16_t offset) {
1282 DCHECK(IsMipsArchVariant(kMips32r6));
1283 DCHECK(!(rt.is(zero_reg)));
1284 GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1285 CompactBranchType::COMPACT_BRANCH);
1286 }
1287
1288
bltzc(Register rt,int16_t offset)1289 void Assembler::bltzc(Register rt, int16_t offset) {
1290 DCHECK(IsMipsArchVariant(kMips32r6));
1291 DCHECK(!rt.is(zero_reg));
1292 GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1293 }
1294
1295
bltuc(Register rs,Register rt,int16_t offset)1296 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1297 DCHECK(IsMipsArchVariant(kMips32r6));
1298 DCHECK(!(rs.is(zero_reg)));
1299 DCHECK(!(rt.is(zero_reg)));
1300 DCHECK(rs.code() != rt.code());
1301 GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1302 }
1303
1304
bltc(Register rs,Register rt,int16_t offset)1305 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1306 DCHECK(IsMipsArchVariant(kMips32r6));
1307 DCHECK(!rs.is(zero_reg));
1308 DCHECK(!rt.is(zero_reg));
1309 DCHECK(rs.code() != rt.code());
1310 GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1311 }
1312
1313
bltz(Register rs,int16_t offset)1314 void Assembler::bltz(Register rs, int16_t offset) {
1315 BlockTrampolinePoolScope block_trampoline_pool(this);
1316 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1317 BlockTrampolinePoolFor(1); // For associated delay slot.
1318 }
1319
1320
bltzal(Register rs,int16_t offset)1321 void Assembler::bltzal(Register rs, int16_t offset) {
1322 DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
1323 BlockTrampolinePoolScope block_trampoline_pool(this);
1324 positions_recorder()->WriteRecordedPositions();
1325 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1326 BlockTrampolinePoolFor(1); // For associated delay slot.
1327 }
1328
1329
bne(Register rs,Register rt,int16_t offset)1330 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1331 BlockTrampolinePoolScope block_trampoline_pool(this);
1332 GenInstrImmediate(BNE, rs, rt, offset);
1333 BlockTrampolinePoolFor(1); // For associated delay slot.
1334 }
1335
1336
bovc(Register rs,Register rt,int16_t offset)1337 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1338 DCHECK(IsMipsArchVariant(kMips32r6));
1339 DCHECK(!rs.is(zero_reg));
1340 if (rs.code() >= rt.code()) {
1341 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1342 } else {
1343 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1344 }
1345 }
1346
1347
bnvc(Register rs,Register rt,int16_t offset)1348 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1349 DCHECK(IsMipsArchVariant(kMips32r6));
1350 DCHECK(!rs.is(zero_reg));
1351 if (rs.code() >= rt.code()) {
1352 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1353 } else {
1354 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1355 }
1356 }
1357
1358
blezalc(Register rt,int16_t offset)1359 void Assembler::blezalc(Register rt, int16_t offset) {
1360 DCHECK(IsMipsArchVariant(kMips32r6));
1361 DCHECK(!(rt.is(zero_reg)));
1362 positions_recorder()->WriteRecordedPositions();
1363 GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1364 CompactBranchType::COMPACT_BRANCH);
1365 }
1366
1367
bgezalc(Register rt,int16_t offset)1368 void Assembler::bgezalc(Register rt, int16_t offset) {
1369 DCHECK(IsMipsArchVariant(kMips32r6));
1370 DCHECK(!(rt.is(zero_reg)));
1371 positions_recorder()->WriteRecordedPositions();
1372 GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1373 }
1374
1375
bgezall(Register rs,int16_t offset)1376 void Assembler::bgezall(Register rs, int16_t offset) {
1377 DCHECK(!IsMipsArchVariant(kMips32r6));
1378 DCHECK(!(rs.is(zero_reg)));
1379 BlockTrampolinePoolScope block_trampoline_pool(this);
1380 positions_recorder()->WriteRecordedPositions();
1381 GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1382 BlockTrampolinePoolFor(1); // For associated delay slot.
1383 }
1384
1385
bltzalc(Register rt,int16_t offset)1386 void Assembler::bltzalc(Register rt, int16_t offset) {
1387 DCHECK(IsMipsArchVariant(kMips32r6));
1388 DCHECK(!(rt.is(zero_reg)));
1389 positions_recorder()->WriteRecordedPositions();
1390 GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1391 }
1392
1393
bgtzalc(Register rt,int16_t offset)1394 void Assembler::bgtzalc(Register rt, int16_t offset) {
1395 DCHECK(IsMipsArchVariant(kMips32r6));
1396 DCHECK(!(rt.is(zero_reg)));
1397 positions_recorder()->WriteRecordedPositions();
1398 GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1399 CompactBranchType::COMPACT_BRANCH);
1400 }
1401
1402
beqzalc(Register rt,int16_t offset)1403 void Assembler::beqzalc(Register rt, int16_t offset) {
1404 DCHECK(IsMipsArchVariant(kMips32r6));
1405 DCHECK(!(rt.is(zero_reg)));
1406 positions_recorder()->WriteRecordedPositions();
1407 GenInstrImmediate(ADDI, zero_reg, rt, offset,
1408 CompactBranchType::COMPACT_BRANCH);
1409 }
1410
1411
bnezalc(Register rt,int16_t offset)1412 void Assembler::bnezalc(Register rt, int16_t offset) {
1413 DCHECK(IsMipsArchVariant(kMips32r6));
1414 DCHECK(!(rt.is(zero_reg)));
1415 positions_recorder()->WriteRecordedPositions();
1416 GenInstrImmediate(DADDI, zero_reg, rt, offset,
1417 CompactBranchType::COMPACT_BRANCH);
1418 }
1419
1420
beqc(Register rs,Register rt,int16_t offset)1421 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1422 DCHECK(IsMipsArchVariant(kMips32r6));
1423 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1424 if (rs.code() < rt.code()) {
1425 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1426 } else {
1427 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1428 }
1429 }
1430
1431
beqzc(Register rs,int32_t offset)1432 void Assembler::beqzc(Register rs, int32_t offset) {
1433 DCHECK(IsMipsArchVariant(kMips32r6));
1434 DCHECK(!(rs.is(zero_reg)));
1435 GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
1436 }
1437
1438
bnec(Register rs,Register rt,int16_t offset)1439 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1440 DCHECK(IsMipsArchVariant(kMips32r6));
1441 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1442 if (rs.code() < rt.code()) {
1443 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1444 } else {
1445 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1446 }
1447 }
1448
1449
bnezc(Register rs,int32_t offset)1450 void Assembler::bnezc(Register rs, int32_t offset) {
1451 DCHECK(IsMipsArchVariant(kMips32r6));
1452 DCHECK(!(rs.is(zero_reg)));
1453 GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
1454 }
1455
1456
j(int32_t target)1457 void Assembler::j(int32_t target) {
1458 #if DEBUG
1459 // Get pc of delay slot.
1460 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1461 bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1462 (kImm26Bits + kImmFieldShift)) == 0;
1463 DCHECK(in_range && ((target & 3) == 0));
1464 #endif
1465 BlockTrampolinePoolScope block_trampoline_pool(this);
1466 GenInstrJump(J, (target >> 2) & kImm26Mask);
1467 BlockTrampolinePoolFor(1); // For associated delay slot.
1468 }
1469
1470
jr(Register rs)1471 void Assembler::jr(Register rs) {
1472 if (!IsMipsArchVariant(kMips32r6)) {
1473 BlockTrampolinePoolScope block_trampoline_pool(this);
1474 if (rs.is(ra)) {
1475 positions_recorder()->WriteRecordedPositions();
1476 }
1477 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1478 BlockTrampolinePoolFor(1); // For associated delay slot.
1479 } else {
1480 jalr(rs, zero_reg);
1481 }
1482 }
1483
1484
jal(int32_t target)1485 void Assembler::jal(int32_t target) {
1486 #ifdef DEBUG
1487 // Get pc of delay slot.
1488 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1489 bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1490 (kImm26Bits + kImmFieldShift)) == 0;
1491 DCHECK(in_range && ((target & 3) == 0));
1492 #endif
1493 BlockTrampolinePoolScope block_trampoline_pool(this);
1494 positions_recorder()->WriteRecordedPositions();
1495 GenInstrJump(JAL, (target >> 2) & kImm26Mask);
1496 BlockTrampolinePoolFor(1); // For associated delay slot.
1497 }
1498
1499
jalr(Register rs,Register rd)1500 void Assembler::jalr(Register rs, Register rd) {
1501 DCHECK(rs.code() != rd.code());
1502 BlockTrampolinePoolScope block_trampoline_pool(this);
1503 positions_recorder()->WriteRecordedPositions();
1504 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1505 BlockTrampolinePoolFor(1); // For associated delay slot.
1506 }
1507
1508
jic(Register rt,int16_t offset)1509 void Assembler::jic(Register rt, int16_t offset) {
1510 DCHECK(IsMipsArchVariant(kMips32r6));
1511 GenInstrImmediate(POP66, zero_reg, rt, offset);
1512 }
1513
1514
jialc(Register rt,int16_t offset)1515 void Assembler::jialc(Register rt, int16_t offset) {
1516 DCHECK(IsMipsArchVariant(kMips32r6));
1517 positions_recorder()->WriteRecordedPositions();
1518 GenInstrImmediate(POP76, zero_reg, rt, offset);
1519 }
1520
1521
1522 // -------Data-processing-instructions---------
1523
1524 // Arithmetic.
1525
addu(Register rd,Register rs,Register rt)1526 void Assembler::addu(Register rd, Register rs, Register rt) {
1527 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1528 }
1529
1530
addiu(Register rd,Register rs,int32_t j)1531 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1532 GenInstrImmediate(ADDIU, rs, rd, j);
1533 }
1534
1535
subu(Register rd,Register rs,Register rt)1536 void Assembler::subu(Register rd, Register rs, Register rt) {
1537 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1538 }
1539
1540
mul(Register rd,Register rs,Register rt)1541 void Assembler::mul(Register rd, Register rs, Register rt) {
1542 if (!IsMipsArchVariant(kMips32r6)) {
1543 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1544 } else {
1545 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1546 }
1547 }
1548
1549
mulu(Register rd,Register rs,Register rt)1550 void Assembler::mulu(Register rd, Register rs, Register rt) {
1551 DCHECK(IsMipsArchVariant(kMips32r6));
1552 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1553 }
1554
1555
muh(Register rd,Register rs,Register rt)1556 void Assembler::muh(Register rd, Register rs, Register rt) {
1557 DCHECK(IsMipsArchVariant(kMips32r6));
1558 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1559 }
1560
1561
muhu(Register rd,Register rs,Register rt)1562 void Assembler::muhu(Register rd, Register rs, Register rt) {
1563 DCHECK(IsMipsArchVariant(kMips32r6));
1564 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1565 }
1566
1567
mod(Register rd,Register rs,Register rt)1568 void Assembler::mod(Register rd, Register rs, Register rt) {
1569 DCHECK(IsMipsArchVariant(kMips32r6));
1570 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1571 }
1572
1573
modu(Register rd,Register rs,Register rt)1574 void Assembler::modu(Register rd, Register rs, Register rt) {
1575 DCHECK(IsMipsArchVariant(kMips32r6));
1576 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1577 }
1578
1579
mult(Register rs,Register rt)1580 void Assembler::mult(Register rs, Register rt) {
1581 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1582 }
1583
1584
multu(Register rs,Register rt)1585 void Assembler::multu(Register rs, Register rt) {
1586 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1587 }
1588
1589
div(Register rs,Register rt)1590 void Assembler::div(Register rs, Register rt) {
1591 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1592 }
1593
1594
div(Register rd,Register rs,Register rt)1595 void Assembler::div(Register rd, Register rs, Register rt) {
1596 DCHECK(IsMipsArchVariant(kMips32r6));
1597 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1598 }
1599
1600
divu(Register rs,Register rt)1601 void Assembler::divu(Register rs, Register rt) {
1602 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1603 }
1604
1605
divu(Register rd,Register rs,Register rt)1606 void Assembler::divu(Register rd, Register rs, Register rt) {
1607 DCHECK(IsMipsArchVariant(kMips32r6));
1608 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1609 }
1610
1611
1612 // Logical.
1613
and_(Register rd,Register rs,Register rt)1614 void Assembler::and_(Register rd, Register rs, Register rt) {
1615 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1616 }
1617
1618
andi(Register rt,Register rs,int32_t j)1619 void Assembler::andi(Register rt, Register rs, int32_t j) {
1620 DCHECK(is_uint16(j));
1621 GenInstrImmediate(ANDI, rs, rt, j);
1622 }
1623
1624
or_(Register rd,Register rs,Register rt)1625 void Assembler::or_(Register rd, Register rs, Register rt) {
1626 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1627 }
1628
1629
ori(Register rt,Register rs,int32_t j)1630 void Assembler::ori(Register rt, Register rs, int32_t j) {
1631 DCHECK(is_uint16(j));
1632 GenInstrImmediate(ORI, rs, rt, j);
1633 }
1634
1635
xor_(Register rd,Register rs,Register rt)1636 void Assembler::xor_(Register rd, Register rs, Register rt) {
1637 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1638 }
1639
1640
xori(Register rt,Register rs,int32_t j)1641 void Assembler::xori(Register rt, Register rs, int32_t j) {
1642 DCHECK(is_uint16(j));
1643 GenInstrImmediate(XORI, rs, rt, j);
1644 }
1645
1646
nor(Register rd,Register rs,Register rt)1647 void Assembler::nor(Register rd, Register rs, Register rt) {
1648 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1649 }
1650
1651
1652 // Shifts.
sll(Register rd,Register rt,uint16_t sa,bool coming_from_nop)1653 void Assembler::sll(Register rd,
1654 Register rt,
1655 uint16_t sa,
1656 bool coming_from_nop) {
1657 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1658 // generated using the sll instruction. They must be generated using
1659 // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1660 // instructions.
1661 DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1662 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
1663 }
1664
1665
sllv(Register rd,Register rt,Register rs)1666 void Assembler::sllv(Register rd, Register rt, Register rs) {
1667 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1668 }
1669
1670
srl(Register rd,Register rt,uint16_t sa)1671 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1672 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
1673 }
1674
1675
srlv(Register rd,Register rt,Register rs)1676 void Assembler::srlv(Register rd, Register rt, Register rs) {
1677 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1678 }
1679
1680
sra(Register rd,Register rt,uint16_t sa)1681 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1682 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
1683 }
1684
1685
srav(Register rd,Register rt,Register rs)1686 void Assembler::srav(Register rd, Register rt, Register rs) {
1687 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1688 }
1689
1690
rotr(Register rd,Register rt,uint16_t sa)1691 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1692 // Should be called via MacroAssembler::Ror.
1693 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1694 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1695 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1696 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1697 emit(instr);
1698 }
1699
1700
rotrv(Register rd,Register rt,Register rs)1701 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1702 // Should be called via MacroAssembler::Ror.
1703 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1704 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1705 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1706 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1707 emit(instr);
1708 }
1709
1710
lsa(Register rd,Register rt,Register rs,uint8_t sa)1711 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
1712 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1713 DCHECK(sa < 5 && sa > 0);
1714 DCHECK(IsMipsArchVariant(kMips32r6));
1715 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1716 (rd.code() << kRdShift) | (sa - 1) << kSaShift | LSA;
1717 emit(instr);
1718 }
1719
1720
1721 // ------------Memory-instructions-------------
1722
1723 // Helper for base-reg + offset, when offset is larger than int16.
LoadRegPlusOffsetToAt(const MemOperand & src)1724 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1725 DCHECK(!src.rm().is(at));
1726 lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
1727 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1728 addu(at, at, src.rm()); // Add base register.
1729 }
1730
1731
lb(Register rd,const MemOperand & rs)1732 void Assembler::lb(Register rd, const MemOperand& rs) {
1733 if (is_int16(rs.offset_)) {
1734 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1735 } else { // Offset > 16 bits, use multiple instructions to load.
1736 LoadRegPlusOffsetToAt(rs);
1737 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1738 }
1739 }
1740
1741
lbu(Register rd,const MemOperand & rs)1742 void Assembler::lbu(Register rd, const MemOperand& rs) {
1743 if (is_int16(rs.offset_)) {
1744 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1745 } else { // Offset > 16 bits, use multiple instructions to load.
1746 LoadRegPlusOffsetToAt(rs);
1747 GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1748 }
1749 }
1750
1751
lh(Register rd,const MemOperand & rs)1752 void Assembler::lh(Register rd, const MemOperand& rs) {
1753 if (is_int16(rs.offset_)) {
1754 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1755 } else { // Offset > 16 bits, use multiple instructions to load.
1756 LoadRegPlusOffsetToAt(rs);
1757 GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1758 }
1759 }
1760
1761
lhu(Register rd,const MemOperand & rs)1762 void Assembler::lhu(Register rd, const MemOperand& rs) {
1763 if (is_int16(rs.offset_)) {
1764 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1765 } else { // Offset > 16 bits, use multiple instructions to load.
1766 LoadRegPlusOffsetToAt(rs);
1767 GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1768 }
1769 }
1770
1771
lw(Register rd,const MemOperand & rs)1772 void Assembler::lw(Register rd, const MemOperand& rs) {
1773 if (is_int16(rs.offset_)) {
1774 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1775 } else { // Offset > 16 bits, use multiple instructions to load.
1776 LoadRegPlusOffsetToAt(rs);
1777 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1778 }
1779 }
1780
1781
lwl(Register rd,const MemOperand & rs)1782 void Assembler::lwl(Register rd, const MemOperand& rs) {
1783 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1784 }
1785
1786
lwr(Register rd,const MemOperand & rs)1787 void Assembler::lwr(Register rd, const MemOperand& rs) {
1788 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1789 }
1790
1791
sb(Register rd,const MemOperand & rs)1792 void Assembler::sb(Register rd, const MemOperand& rs) {
1793 if (is_int16(rs.offset_)) {
1794 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1795 } else { // Offset > 16 bits, use multiple instructions to store.
1796 LoadRegPlusOffsetToAt(rs);
1797 GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
1798 }
1799 }
1800
1801
sh(Register rd,const MemOperand & rs)1802 void Assembler::sh(Register rd, const MemOperand& rs) {
1803 if (is_int16(rs.offset_)) {
1804 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1805 } else { // Offset > 16 bits, use multiple instructions to store.
1806 LoadRegPlusOffsetToAt(rs);
1807 GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
1808 }
1809 }
1810
1811
sw(Register rd,const MemOperand & rs)1812 void Assembler::sw(Register rd, const MemOperand& rs) {
1813 if (is_int16(rs.offset_)) {
1814 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1815 } else { // Offset > 16 bits, use multiple instructions to store.
1816 LoadRegPlusOffsetToAt(rs);
1817 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1818 }
1819 }
1820
1821
swl(Register rd,const MemOperand & rs)1822 void Assembler::swl(Register rd, const MemOperand& rs) {
1823 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1824 }
1825
1826
swr(Register rd,const MemOperand & rs)1827 void Assembler::swr(Register rd, const MemOperand& rs) {
1828 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1829 }
1830
1831
lui(Register rd,int32_t j)1832 void Assembler::lui(Register rd, int32_t j) {
1833 DCHECK(is_uint16(j));
1834 GenInstrImmediate(LUI, zero_reg, rd, j);
1835 }
1836
1837
aui(Register rt,Register rs,int32_t j)1838 void Assembler::aui(Register rt, Register rs, int32_t j) {
1839 // This instruction uses same opcode as 'lui'. The difference in encoding is
1840 // 'lui' has zero reg. for rs field.
1841 DCHECK(!(rs.is(zero_reg)));
1842 DCHECK(is_uint16(j));
1843 GenInstrImmediate(LUI, rs, rt, j);
1844 }
1845
1846
1847 // ---------PC-Relative instructions-----------
1848
addiupc(Register rs,int32_t imm19)1849 void Assembler::addiupc(Register rs, int32_t imm19) {
1850 DCHECK(IsMipsArchVariant(kMips32r6));
1851 DCHECK(rs.is_valid() && is_int19(imm19));
1852 uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
1853 GenInstrImmediate(PCREL, rs, imm21);
1854 }
1855
1856
lwpc(Register rs,int32_t offset19)1857 void Assembler::lwpc(Register rs, int32_t offset19) {
1858 DCHECK(IsMipsArchVariant(kMips32r6));
1859 DCHECK(rs.is_valid() && is_int19(offset19));
1860 uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
1861 GenInstrImmediate(PCREL, rs, imm21);
1862 }
1863
1864
auipc(Register rs,int16_t imm16)1865 void Assembler::auipc(Register rs, int16_t imm16) {
1866 DCHECK(IsMipsArchVariant(kMips32r6));
1867 DCHECK(rs.is_valid());
1868 uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
1869 GenInstrImmediate(PCREL, rs, imm21);
1870 }
1871
1872
aluipc(Register rs,int16_t imm16)1873 void Assembler::aluipc(Register rs, int16_t imm16) {
1874 DCHECK(IsMipsArchVariant(kMips32r6));
1875 DCHECK(rs.is_valid());
1876 uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
1877 GenInstrImmediate(PCREL, rs, imm21);
1878 }
1879
1880
1881 // -------------Misc-instructions--------------
1882
1883 // Break / Trap instructions.
break_(uint32_t code,bool break_as_stop)1884 void Assembler::break_(uint32_t code, bool break_as_stop) {
1885 DCHECK((code & ~0xfffff) == 0);
1886 // We need to invalidate breaks that could be stops as well because the
1887 // simulator expects a char pointer after the stop instruction.
1888 // See constants-mips.h for explanation.
1889 DCHECK((break_as_stop &&
1890 code <= kMaxStopCode &&
1891 code > kMaxWatchpointCode) ||
1892 (!break_as_stop &&
1893 (code > kMaxStopCode ||
1894 code <= kMaxWatchpointCode)));
1895 Instr break_instr = SPECIAL | BREAK | (code << 6);
1896 emit(break_instr);
1897 }
1898
1899
stop(const char * msg,uint32_t code)1900 void Assembler::stop(const char* msg, uint32_t code) {
1901 DCHECK(code > kMaxWatchpointCode);
1902 DCHECK(code <= kMaxStopCode);
1903 #if V8_HOST_ARCH_MIPS
1904 break_(0x54321);
1905 #else // V8_HOST_ARCH_MIPS
1906 BlockTrampolinePoolFor(2);
1907 // The Simulator will handle the stop instruction and get the message address.
1908 // On MIPS stop() is just a special kind of break_().
1909 break_(code, true);
1910 emit(reinterpret_cast<Instr>(msg));
1911 #endif
1912 }
1913
1914
tge(Register rs,Register rt,uint16_t code)1915 void Assembler::tge(Register rs, Register rt, uint16_t code) {
1916 DCHECK(is_uint10(code));
1917 Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1918 | rt.code() << kRtShift | code << 6;
1919 emit(instr);
1920 }
1921
1922
tgeu(Register rs,Register rt,uint16_t code)1923 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1924 DCHECK(is_uint10(code));
1925 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1926 | rt.code() << kRtShift | code << 6;
1927 emit(instr);
1928 }
1929
1930
tlt(Register rs,Register rt,uint16_t code)1931 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
1932 DCHECK(is_uint10(code));
1933 Instr instr =
1934 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1935 emit(instr);
1936 }
1937
1938
tltu(Register rs,Register rt,uint16_t code)1939 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
1940 DCHECK(is_uint10(code));
1941 Instr instr =
1942 SPECIAL | TLTU | rs.code() << kRsShift
1943 | rt.code() << kRtShift | code << 6;
1944 emit(instr);
1945 }
1946
1947
teq(Register rs,Register rt,uint16_t code)1948 void Assembler::teq(Register rs, Register rt, uint16_t code) {
1949 DCHECK(is_uint10(code));
1950 Instr instr =
1951 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1952 emit(instr);
1953 }
1954
1955
tne(Register rs,Register rt,uint16_t code)1956 void Assembler::tne(Register rs, Register rt, uint16_t code) {
1957 DCHECK(is_uint10(code));
1958 Instr instr =
1959 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1960 emit(instr);
1961 }
1962
1963
1964 // Move from HI/LO register.
1965
mfhi(Register rd)1966 void Assembler::mfhi(Register rd) {
1967 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1968 }
1969
1970
mflo(Register rd)1971 void Assembler::mflo(Register rd) {
1972 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1973 }
1974
1975
1976 // Set on less than instructions.
slt(Register rd,Register rs,Register rt)1977 void Assembler::slt(Register rd, Register rs, Register rt) {
1978 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
1979 }
1980
1981
sltu(Register rd,Register rs,Register rt)1982 void Assembler::sltu(Register rd, Register rs, Register rt) {
1983 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
1984 }
1985
1986
slti(Register rt,Register rs,int32_t j)1987 void Assembler::slti(Register rt, Register rs, int32_t j) {
1988 GenInstrImmediate(SLTI, rs, rt, j);
1989 }
1990
1991
sltiu(Register rt,Register rs,int32_t j)1992 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
1993 GenInstrImmediate(SLTIU, rs, rt, j);
1994 }
1995
1996
1997 // Conditional move.
movz(Register rd,Register rs,Register rt)1998 void Assembler::movz(Register rd, Register rs, Register rt) {
1999 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2000 }
2001
2002
movn(Register rd,Register rs,Register rt)2003 void Assembler::movn(Register rd, Register rs, Register rt) {
2004 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2005 }
2006
2007
movt(Register rd,Register rs,uint16_t cc)2008 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2009 Register rt;
2010 rt.reg_code = (cc & 0x0007) << 2 | 1;
2011 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2012 }
2013
2014
movf(Register rd,Register rs,uint16_t cc)2015 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2016 Register rt;
2017 rt.reg_code = (cc & 0x0007) << 2 | 0;
2018 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2019 }
2020
2021
seleqz(Register rd,Register rs,Register rt)2022 void Assembler::seleqz(Register rd, Register rs, Register rt) {
2023 DCHECK(IsMipsArchVariant(kMips32r6));
2024 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2025 }
2026
2027
2028 // Bit twiddling.
clz(Register rd,Register rs)2029 void Assembler::clz(Register rd, Register rs) {
2030 if (!IsMipsArchVariant(kMips32r6)) {
2031 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2032 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2033 } else {
2034 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2035 }
2036 }
2037
2038
ins_(Register rt,Register rs,uint16_t pos,uint16_t size)2039 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2040 // Should be called via MacroAssembler::Ins.
2041 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2042 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2043 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2044 }
2045
2046
ext_(Register rt,Register rs,uint16_t pos,uint16_t size)2047 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2048 // Should be called via MacroAssembler::Ext.
2049 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2050 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2051 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2052 }
2053
2054
bitswap(Register rd,Register rt)2055 void Assembler::bitswap(Register rd, Register rt) {
2056 DCHECK(IsMipsArchVariant(kMips32r6));
2057 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2058 }
2059
2060
pref(int32_t hint,const MemOperand & rs)2061 void Assembler::pref(int32_t hint, const MemOperand& rs) {
2062 DCHECK(!IsMipsArchVariant(kLoongson));
2063 DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2064 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2065 | (rs.offset_);
2066 emit(instr);
2067 }
2068
2069
align(Register rd,Register rs,Register rt,uint8_t bp)2070 void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2071 DCHECK(IsMipsArchVariant(kMips32r6));
2072 DCHECK(is_uint3(bp));
2073 uint16_t sa = (ALIGN << kBp2Bits) | bp;
2074 GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2075 }
2076
2077
2078 // --------Coprocessor-instructions----------------
2079
2080 // Load, store, move.
lwc1(FPURegister fd,const MemOperand & src)2081 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2082 if (is_int16(src.offset_)) {
2083 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2084 } else { // Offset > 16 bits, use multiple instructions to load.
2085 LoadRegPlusOffsetToAt(src);
2086 GenInstrImmediate(LWC1, at, fd, 0);
2087 }
2088 }
2089
2090
ldc1(FPURegister fd,const MemOperand & src)2091 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
2092 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
2093 // load to two 32-bit loads.
2094 DCHECK(!src.rm().is(at));
2095 if (IsFp64Mode()) {
2096 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2097 GenInstrImmediate(LWC1, src.rm(), fd,
2098 src.offset_ + Register::kMantissaOffset);
2099 GenInstrImmediate(LW, src.rm(), at,
2100 src.offset_ + Register::kExponentOffset);
2101 mthc1(at, fd);
2102 } else { // Offset > 16 bits, use multiple instructions to load.
2103 LoadRegPlusOffsetToAt(src);
2104 GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
2105 GenInstrImmediate(LW, at, at, Register::kExponentOffset);
2106 mthc1(at, fd);
2107 }
2108 } else { // fp32 mode.
2109 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2110 GenInstrImmediate(LWC1, src.rm(), fd,
2111 src.offset_ + Register::kMantissaOffset);
2112 FPURegister nextfpreg;
2113 nextfpreg.setcode(fd.code() + 1);
2114 GenInstrImmediate(LWC1, src.rm(), nextfpreg,
2115 src.offset_ + Register::kExponentOffset);
2116 } else { // Offset > 16 bits, use multiple instructions to load.
2117 LoadRegPlusOffsetToAt(src);
2118 GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
2119 FPURegister nextfpreg;
2120 nextfpreg.setcode(fd.code() + 1);
2121 GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset);
2122 }
2123 }
2124 }
2125
2126
swc1(FPURegister fd,const MemOperand & src)2127 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
2128 if (is_int16(src.offset_)) {
2129 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
2130 } else { // Offset > 16 bits, use multiple instructions to load.
2131 LoadRegPlusOffsetToAt(src);
2132 GenInstrImmediate(SWC1, at, fd, 0);
2133 }
2134 }
2135
2136
sdc1(FPURegister fd,const MemOperand & src)2137 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
2138 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
2139 // store to two 32-bit stores.
2140 DCHECK(!src.rm().is(at));
2141 DCHECK(!src.rm().is(t8));
2142 if (IsFp64Mode()) {
2143 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2144 GenInstrImmediate(SWC1, src.rm(), fd,
2145 src.offset_ + Register::kMantissaOffset);
2146 mfhc1(at, fd);
2147 GenInstrImmediate(SW, src.rm(), at,
2148 src.offset_ + Register::kExponentOffset);
2149 } else { // Offset > 16 bits, use multiple instructions to load.
2150 LoadRegPlusOffsetToAt(src);
2151 GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
2152 mfhc1(t8, fd);
2153 GenInstrImmediate(SW, at, t8, Register::kExponentOffset);
2154 }
2155 } else { // fp32 mode.
2156 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2157 GenInstrImmediate(SWC1, src.rm(), fd,
2158 src.offset_ + Register::kMantissaOffset);
2159 FPURegister nextfpreg;
2160 nextfpreg.setcode(fd.code() + 1);
2161 GenInstrImmediate(SWC1, src.rm(), nextfpreg,
2162 src.offset_ + Register::kExponentOffset);
2163 } else { // Offset > 16 bits, use multiple instructions to load.
2164 LoadRegPlusOffsetToAt(src);
2165 GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
2166 FPURegister nextfpreg;
2167 nextfpreg.setcode(fd.code() + 1);
2168 GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset);
2169 }
2170 }
2171 }
2172
2173
mtc1(Register rt,FPURegister fs)2174 void Assembler::mtc1(Register rt, FPURegister fs) {
2175 GenInstrRegister(COP1, MTC1, rt, fs, f0);
2176 }
2177
2178
mthc1(Register rt,FPURegister fs)2179 void Assembler::mthc1(Register rt, FPURegister fs) {
2180 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2181 }
2182
2183
mfc1(Register rt,FPURegister fs)2184 void Assembler::mfc1(Register rt, FPURegister fs) {
2185 GenInstrRegister(COP1, MFC1, rt, fs, f0);
2186 }
2187
2188
mfhc1(Register rt,FPURegister fs)2189 void Assembler::mfhc1(Register rt, FPURegister fs) {
2190 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2191 }
2192
2193
ctc1(Register rt,FPUControlRegister fs)2194 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2195 GenInstrRegister(COP1, CTC1, rt, fs);
2196 }
2197
2198
cfc1(Register rt,FPUControlRegister fs)2199 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2200 GenInstrRegister(COP1, CFC1, rt, fs);
2201 }
2202
2203
DoubleAsTwoUInt32(double d,uint32_t * lo,uint32_t * hi)2204 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2205 uint64_t i;
2206 memcpy(&i, &d, 8);
2207
2208 *lo = i & 0xffffffff;
2209 *hi = i >> 32;
2210 }
2211
2212
movn_s(FPURegister fd,FPURegister fs,Register rt)2213 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
2214 DCHECK(!IsMipsArchVariant(kMips32r6));
2215 GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
2216 }
2217
2218
movn_d(FPURegister fd,FPURegister fs,Register rt)2219 void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
2220 DCHECK(!IsMipsArchVariant(kMips32r6));
2221 GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
2222 }
2223
2224
sel(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2225 void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
2226 FPURegister ft) {
2227 DCHECK(IsMipsArchVariant(kMips32r6));
2228 DCHECK((fmt == D) || (fmt == S));
2229
2230 GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
2231 }
2232
2233
sel_s(FPURegister fd,FPURegister fs,FPURegister ft)2234 void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2235 sel(S, fd, fs, ft);
2236 }
2237
2238
sel_d(FPURegister fd,FPURegister fs,FPURegister ft)2239 void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2240 sel(D, fd, fs, ft);
2241 }
2242
2243
seleqz(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2244 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
2245 FPURegister ft) {
2246 DCHECK(IsMipsArchVariant(kMips32r6));
2247 DCHECK((fmt == D) || (fmt == S));
2248 GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
2249 }
2250
2251
selnez(Register rd,Register rs,Register rt)2252 void Assembler::selnez(Register rd, Register rs, Register rt) {
2253 DCHECK(IsMipsArchVariant(kMips32r6));
2254 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2255 }
2256
2257
selnez(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2258 void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
2259 FPURegister ft) {
2260 DCHECK(IsMipsArchVariant(kMips32r6));
2261 DCHECK((fmt == D) || (fmt == S));
2262 GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
2263 }
2264
2265
seleqz_d(FPURegister fd,FPURegister fs,FPURegister ft)2266 void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2267 seleqz(D, fd, fs, ft);
2268 }
2269
2270
seleqz_s(FPURegister fd,FPURegister fs,FPURegister ft)2271 void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2272 seleqz(S, fd, fs, ft);
2273 }
2274
2275
selnez_d(FPURegister fd,FPURegister fs,FPURegister ft)2276 void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2277 selnez(D, fd, fs, ft);
2278 }
2279
2280
selnez_s(FPURegister fd,FPURegister fs,FPURegister ft)2281 void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2282 selnez(S, fd, fs, ft);
2283 }
2284
2285
movz_s(FPURegister fd,FPURegister fs,Register rt)2286 void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
2287 DCHECK(!IsMipsArchVariant(kMips32r6));
2288 GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
2289 }
2290
2291
movz_d(FPURegister fd,FPURegister fs,Register rt)2292 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
2293 DCHECK(!IsMipsArchVariant(kMips32r6));
2294 GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
2295 }
2296
2297
movt_s(FPURegister fd,FPURegister fs,uint16_t cc)2298 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2299 DCHECK(!IsMipsArchVariant(kMips32r6));
2300 FPURegister ft;
2301 ft.reg_code = (cc & 0x0007) << 2 | 1;
2302 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2303 }
2304
2305
movt_d(FPURegister fd,FPURegister fs,uint16_t cc)2306 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2307 DCHECK(!IsMipsArchVariant(kMips32r6));
2308 FPURegister ft;
2309 ft.reg_code = (cc & 0x0007) << 2 | 1;
2310 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2311 }
2312
2313
movf_s(FPURegister fd,FPURegister fs,uint16_t cc)2314 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2315 DCHECK(!IsMipsArchVariant(kMips32r6));
2316 FPURegister ft;
2317 ft.reg_code = (cc & 0x0007) << 2 | 0;
2318 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2319 }
2320
2321
movf_d(FPURegister fd,FPURegister fs,uint16_t cc)2322 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2323 DCHECK(!IsMipsArchVariant(kMips32r6));
2324 FPURegister ft;
2325 ft.reg_code = (cc & 0x0007) << 2 | 0;
2326 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2327 }
2328
2329
2330 // Arithmetic.
2331
add_s(FPURegister fd,FPURegister fs,FPURegister ft)2332 void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2333 GenInstrRegister(COP1, S, ft, fs, fd, ADD_S);
2334 }
2335
2336
add_d(FPURegister fd,FPURegister fs,FPURegister ft)2337 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2338 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2339 }
2340
2341
sub_s(FPURegister fd,FPURegister fs,FPURegister ft)2342 void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2343 GenInstrRegister(COP1, S, ft, fs, fd, SUB_S);
2344 }
2345
2346
sub_d(FPURegister fd,FPURegister fs,FPURegister ft)2347 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2348 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2349 }
2350
2351
mul_s(FPURegister fd,FPURegister fs,FPURegister ft)2352 void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2353 GenInstrRegister(COP1, S, ft, fs, fd, MUL_S);
2354 }
2355
2356
mul_d(FPURegister fd,FPURegister fs,FPURegister ft)2357 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2358 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2359 }
2360
2361
madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2362 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2363 FPURegister ft) {
2364 DCHECK(IsMipsArchVariant(kMips32r2));
2365 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2366 }
2367
2368
div_s(FPURegister fd,FPURegister fs,FPURegister ft)2369 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2370 GenInstrRegister(COP1, S, ft, fs, fd, DIV_S);
2371 }
2372
2373
div_d(FPURegister fd,FPURegister fs,FPURegister ft)2374 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2375 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2376 }
2377
2378
abs_s(FPURegister fd,FPURegister fs)2379 void Assembler::abs_s(FPURegister fd, FPURegister fs) {
2380 GenInstrRegister(COP1, S, f0, fs, fd, ABS_S);
2381 }
2382
2383
abs_d(FPURegister fd,FPURegister fs)2384 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2385 GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2386 }
2387
2388
mov_d(FPURegister fd,FPURegister fs)2389 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2390 GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2391 }
2392
2393
mov_s(FPURegister fd,FPURegister fs)2394 void Assembler::mov_s(FPURegister fd, FPURegister fs) {
2395 GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
2396 }
2397
2398
neg_s(FPURegister fd,FPURegister fs)2399 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
2400 GenInstrRegister(COP1, S, f0, fs, fd, NEG_S);
2401 }
2402
2403
neg_d(FPURegister fd,FPURegister fs)2404 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2405 GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2406 }
2407
2408
sqrt_s(FPURegister fd,FPURegister fs)2409 void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
2410 GenInstrRegister(COP1, S, f0, fs, fd, SQRT_S);
2411 }
2412
2413
sqrt_d(FPURegister fd,FPURegister fs)2414 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2415 GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2416 }
2417
2418
rsqrt_s(FPURegister fd,FPURegister fs)2419 void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
2420 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2421 GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
2422 }
2423
2424
rsqrt_d(FPURegister fd,FPURegister fs)2425 void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
2426 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2427 GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
2428 }
2429
2430
recip_d(FPURegister fd,FPURegister fs)2431 void Assembler::recip_d(FPURegister fd, FPURegister fs) {
2432 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2433 GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
2434 }
2435
2436
recip_s(FPURegister fd,FPURegister fs)2437 void Assembler::recip_s(FPURegister fd, FPURegister fs) {
2438 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2439 GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
2440 }
2441
2442
2443 // Conversions.
2444
cvt_w_s(FPURegister fd,FPURegister fs)2445 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2446 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2447 }
2448
2449
cvt_w_d(FPURegister fd,FPURegister fs)2450 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2451 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2452 }
2453
2454
trunc_w_s(FPURegister fd,FPURegister fs)2455 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2456 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2457 }
2458
2459
trunc_w_d(FPURegister fd,FPURegister fs)2460 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2461 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2462 }
2463
2464
round_w_s(FPURegister fd,FPURegister fs)2465 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2466 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2467 }
2468
2469
round_w_d(FPURegister fd,FPURegister fs)2470 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2471 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2472 }
2473
2474
floor_w_s(FPURegister fd,FPURegister fs)2475 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2476 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2477 }
2478
2479
floor_w_d(FPURegister fd,FPURegister fs)2480 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2481 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2482 }
2483
2484
ceil_w_s(FPURegister fd,FPURegister fs)2485 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2486 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2487 }
2488
2489
ceil_w_d(FPURegister fd,FPURegister fs)2490 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2491 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2492 }
2493
2494
rint_s(FPURegister fd,FPURegister fs)2495 void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
2496
2497
rint(SecondaryField fmt,FPURegister fd,FPURegister fs)2498 void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
2499 DCHECK(IsMipsArchVariant(kMips32r6));
2500 DCHECK((fmt == D) || (fmt == S));
2501 GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
2502 }
2503
2504
rint_d(FPURegister fd,FPURegister fs)2505 void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
2506
2507
cvt_l_s(FPURegister fd,FPURegister fs)2508 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2509 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2510 IsFp64Mode());
2511 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2512 }
2513
2514
cvt_l_d(FPURegister fd,FPURegister fs)2515 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2516 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2517 IsFp64Mode());
2518 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2519 }
2520
2521
trunc_l_s(FPURegister fd,FPURegister fs)2522 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2523 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2524 IsFp64Mode());
2525 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2526 }
2527
2528
trunc_l_d(FPURegister fd,FPURegister fs)2529 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2530 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2531 IsFp64Mode());
2532 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2533 }
2534
2535
round_l_s(FPURegister fd,FPURegister fs)2536 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2537 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2538 IsFp64Mode());
2539 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2540 }
2541
2542
round_l_d(FPURegister fd,FPURegister fs)2543 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2544 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2545 IsFp64Mode());
2546 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2547 }
2548
2549
floor_l_s(FPURegister fd,FPURegister fs)2550 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2551 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2552 IsFp64Mode());
2553 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2554 }
2555
2556
floor_l_d(FPURegister fd,FPURegister fs)2557 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
2558 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2559 IsFp64Mode());
2560 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
2561 }
2562
2563
ceil_l_s(FPURegister fd,FPURegister fs)2564 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2565 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2566 IsFp64Mode());
2567 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2568 }
2569
2570
ceil_l_d(FPURegister fd,FPURegister fs)2571 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2572 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2573 IsFp64Mode());
2574 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2575 }
2576
2577
class_s(FPURegister fd,FPURegister fs)2578 void Assembler::class_s(FPURegister fd, FPURegister fs) {
2579 DCHECK(IsMipsArchVariant(kMips32r6));
2580 GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
2581 }
2582
2583
class_d(FPURegister fd,FPURegister fs)2584 void Assembler::class_d(FPURegister fd, FPURegister fs) {
2585 DCHECK(IsMipsArchVariant(kMips32r6));
2586 GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
2587 }
2588
2589
min(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2590 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
2591 FPURegister ft) {
2592 DCHECK(IsMipsArchVariant(kMips32r6));
2593 DCHECK((fmt == D) || (fmt == S));
2594 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2595 }
2596
2597
mina(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2598 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
2599 FPURegister ft) {
2600 DCHECK(IsMipsArchVariant(kMips32r6));
2601 DCHECK((fmt == D) || (fmt == S));
2602 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2603 }
2604
2605
max(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2606 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
2607 FPURegister ft) {
2608 DCHECK(IsMipsArchVariant(kMips32r6));
2609 DCHECK((fmt == D) || (fmt == S));
2610 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2611 }
2612
2613
maxa(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2614 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
2615 FPURegister ft) {
2616 DCHECK(IsMipsArchVariant(kMips32r6));
2617 DCHECK((fmt == D) || (fmt == S));
2618 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2619 }
2620
2621
min_s(FPURegister fd,FPURegister fs,FPURegister ft)2622 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2623 min(S, fd, fs, ft);
2624 }
2625
2626
min_d(FPURegister fd,FPURegister fs,FPURegister ft)2627 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2628 min(D, fd, fs, ft);
2629 }
2630
2631
max_s(FPURegister fd,FPURegister fs,FPURegister ft)2632 void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2633 max(S, fd, fs, ft);
2634 }
2635
2636
max_d(FPURegister fd,FPURegister fs,FPURegister ft)2637 void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2638 max(D, fd, fs, ft);
2639 }
2640
2641
mina_s(FPURegister fd,FPURegister fs,FPURegister ft)2642 void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2643 mina(S, fd, fs, ft);
2644 }
2645
2646
mina_d(FPURegister fd,FPURegister fs,FPURegister ft)2647 void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2648 mina(D, fd, fs, ft);
2649 }
2650
2651
maxa_s(FPURegister fd,FPURegister fs,FPURegister ft)2652 void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2653 maxa(S, fd, fs, ft);
2654 }
2655
2656
maxa_d(FPURegister fd,FPURegister fs,FPURegister ft)2657 void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2658 maxa(D, fd, fs, ft);
2659 }
2660
2661
cvt_s_w(FPURegister fd,FPURegister fs)2662 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
2663 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
2664 }
2665
2666
cvt_s_l(FPURegister fd,FPURegister fs)2667 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
2668 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2669 IsFp64Mode());
2670 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
2671 }
2672
2673
cvt_s_d(FPURegister fd,FPURegister fs)2674 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
2675 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
2676 }
2677
2678
cvt_d_w(FPURegister fd,FPURegister fs)2679 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
2680 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
2681 }
2682
2683
cvt_d_l(FPURegister fd,FPURegister fs)2684 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
2685 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2686 IsFp64Mode());
2687 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
2688 }
2689
2690
cvt_d_s(FPURegister fd,FPURegister fs)2691 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
2692 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
2693 }
2694
2695
2696 // Conditions for >= MIPSr6.
cmp(FPUCondition cond,SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2697 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
2698 FPURegister fd, FPURegister fs, FPURegister ft) {
2699 DCHECK(IsMipsArchVariant(kMips32r6));
2700 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2701 Instr instr = COP1 | fmt | ft.code() << kFtShift |
2702 fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
2703 emit(instr);
2704 }
2705
2706
cmp_s(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)2707 void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
2708 FPURegister ft) {
2709 cmp(cond, W, fd, fs, ft);
2710 }
2711
cmp_d(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)2712 void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
2713 FPURegister ft) {
2714 cmp(cond, L, fd, fs, ft);
2715 }
2716
2717
bc1eqz(int16_t offset,FPURegister ft)2718 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
2719 DCHECK(IsMipsArchVariant(kMips32r6));
2720 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
2721 emit(instr);
2722 }
2723
2724
bc1nez(int16_t offset,FPURegister ft)2725 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
2726 DCHECK(IsMipsArchVariant(kMips32r6));
2727 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
2728 emit(instr);
2729 }
2730
2731
2732 // Conditions for < MIPSr6.
c(FPUCondition cond,SecondaryField fmt,FPURegister fs,FPURegister ft,uint16_t cc)2733 void Assembler::c(FPUCondition cond, SecondaryField fmt,
2734 FPURegister fs, FPURegister ft, uint16_t cc) {
2735 DCHECK(is_uint3(cc));
2736 DCHECK(fmt == S || fmt == D);
2737 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2738 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
2739 | cc << 8 | 3 << 4 | cond;
2740 emit(instr);
2741 }
2742
2743
c_s(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)2744 void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
2745 uint16_t cc) {
2746 c(cond, S, fs, ft, cc);
2747 }
2748
2749
c_d(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)2750 void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
2751 uint16_t cc) {
2752 c(cond, D, fs, ft, cc);
2753 }
2754
2755
fcmp(FPURegister src1,const double src2,FPUCondition cond)2756 void Assembler::fcmp(FPURegister src1, const double src2,
2757 FPUCondition cond) {
2758 DCHECK(src2 == 0.0);
2759 mtc1(zero_reg, f14);
2760 cvt_d_w(f14, f14);
2761 c(cond, D, src1, f14, 0);
2762 }
2763
2764
bc1f(int16_t offset,uint16_t cc)2765 void Assembler::bc1f(int16_t offset, uint16_t cc) {
2766 DCHECK(is_uint3(cc));
2767 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
2768 emit(instr);
2769 }
2770
2771
bc1t(int16_t offset,uint16_t cc)2772 void Assembler::bc1t(int16_t offset, uint16_t cc) {
2773 DCHECK(is_uint3(cc));
2774 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
2775 emit(instr);
2776 }
2777
2778
RelocateInternalReference(RelocInfo::Mode rmode,byte * pc,intptr_t pc_delta)2779 int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
2780 intptr_t pc_delta) {
2781 Instr instr = instr_at(pc);
2782
2783 if (RelocInfo::IsInternalReference(rmode)) {
2784 int32_t* p = reinterpret_cast<int32_t*>(pc);
2785 if (*p == 0) {
2786 return 0; // Number of instructions patched.
2787 }
2788 *p += pc_delta;
2789 return 1; // Number of instructions patched.
2790 } else {
2791 DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
2792 if (IsLui(instr)) {
2793 Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
2794 Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
2795 DCHECK(IsOri(instr_ori));
2796 int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
2797 imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
2798 if (imm == kEndOfJumpChain) {
2799 return 0; // Number of instructions patched.
2800 }
2801 imm += pc_delta;
2802 DCHECK((imm & 3) == 0);
2803
2804 instr_lui &= ~kImm16Mask;
2805 instr_ori &= ~kImm16Mask;
2806
2807 instr_at_put(pc + 0 * Assembler::kInstrSize,
2808 instr_lui | ((imm >> kLuiShift) & kImm16Mask));
2809 instr_at_put(pc + 1 * Assembler::kInstrSize,
2810 instr_ori | (imm & kImm16Mask));
2811 return 2; // Number of instructions patched.
2812 } else {
2813 UNREACHABLE();
2814 return 0;
2815 }
2816 }
2817 }
2818
2819
GrowBuffer()2820 void Assembler::GrowBuffer() {
2821 if (!own_buffer_) FATAL("external code buffer is too small");
2822
2823 // Compute new buffer size.
2824 CodeDesc desc; // The new buffer.
2825 if (buffer_size_ < 1 * MB) {
2826 desc.buffer_size = 2*buffer_size_;
2827 } else {
2828 desc.buffer_size = buffer_size_ + 1*MB;
2829 }
2830 CHECK_GT(desc.buffer_size, 0); // No overflow.
2831
2832 // Set up new buffer.
2833 desc.buffer = NewArray<byte>(desc.buffer_size);
2834 desc.origin = this;
2835
2836 desc.instr_size = pc_offset();
2837 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2838
2839 // Copy the data.
2840 int pc_delta = desc.buffer - buffer_;
2841 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2842 MemMove(desc.buffer, buffer_, desc.instr_size);
2843 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2844 desc.reloc_size);
2845
2846 // Switch buffers.
2847 DeleteArray(buffer_);
2848 buffer_ = desc.buffer;
2849 buffer_size_ = desc.buffer_size;
2850 pc_ += pc_delta;
2851 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2852 reloc_info_writer.last_pc() + pc_delta);
2853
2854 // Relocate runtime entries.
2855 for (RelocIterator it(desc); !it.done(); it.next()) {
2856 RelocInfo::Mode rmode = it.rinfo()->rmode();
2857 if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED ||
2858 rmode == RelocInfo::INTERNAL_REFERENCE) {
2859 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
2860 RelocateInternalReference(rmode, p, pc_delta);
2861 }
2862 }
2863 DCHECK(!overflow());
2864 }
2865
2866
db(uint8_t data)2867 void Assembler::db(uint8_t data) {
2868 CheckForEmitInForbiddenSlot();
2869 EmitHelper(data);
2870 }
2871
2872
dd(uint32_t data)2873 void Assembler::dd(uint32_t data) {
2874 CheckForEmitInForbiddenSlot();
2875 EmitHelper(data);
2876 }
2877
2878
dq(uint64_t data)2879 void Assembler::dq(uint64_t data) {
2880 CheckForEmitInForbiddenSlot();
2881 EmitHelper(data);
2882 }
2883
2884
dd(Label * label)2885 void Assembler::dd(Label* label) {
2886 uint32_t data;
2887 CheckForEmitInForbiddenSlot();
2888 if (label->is_bound()) {
2889 data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
2890 } else {
2891 data = jump_address(label);
2892 internal_reference_positions_.insert(label->pos());
2893 }
2894 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2895 EmitHelper(data);
2896 }
2897
2898
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)2899 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2900 // We do not try to reuse pool constants.
2901 RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
2902 if (rmode >= RelocInfo::COMMENT &&
2903 rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL) {
2904 // Adjust code for new modes.
2905 DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
2906 || RelocInfo::IsComment(rmode)
2907 || RelocInfo::IsPosition(rmode));
2908 // These modes do not need an entry in the constant pool.
2909 }
2910 if (!RelocInfo::IsNone(rinfo.rmode())) {
2911 // Don't record external references unless the heap will be serialized.
2912 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
2913 !serializer_enabled() && !emit_debug_code()) {
2914 return;
2915 }
2916 DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
2917 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2918 RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode,
2919 RecordedAstId().ToInt(), NULL);
2920 ClearRecordedAstId();
2921 reloc_info_writer.Write(&reloc_info_with_ast_id);
2922 } else {
2923 reloc_info_writer.Write(&rinfo);
2924 }
2925 }
2926 }
2927
2928
BlockTrampolinePoolFor(int instructions)2929 void Assembler::BlockTrampolinePoolFor(int instructions) {
2930 CheckTrampolinePoolQuick(instructions);
2931 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2932 }
2933
2934
CheckTrampolinePool()2935 void Assembler::CheckTrampolinePool() {
2936 // Some small sequences of instructions must not be broken up by the
2937 // insertion of a trampoline pool; such sequences are protected by setting
2938 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2939 // which are both checked here. Also, recursive calls to CheckTrampolinePool
2940 // are blocked by trampoline_pool_blocked_nesting_.
2941 if ((trampoline_pool_blocked_nesting_ > 0) ||
2942 (pc_offset() < no_trampoline_pool_before_)) {
2943 // Emission is currently blocked; make sure we try again as soon as
2944 // possible.
2945 if (trampoline_pool_blocked_nesting_ > 0) {
2946 next_buffer_check_ = pc_offset() + kInstrSize;
2947 } else {
2948 next_buffer_check_ = no_trampoline_pool_before_;
2949 }
2950 return;
2951 }
2952
2953 DCHECK(!trampoline_emitted_);
2954 DCHECK(unbound_labels_count_ >= 0);
2955 if (unbound_labels_count_ > 0) {
2956 // First we emit jump (2 instructions), then we emit trampoline pool.
2957 { BlockTrampolinePoolScope block_trampoline_pool(this);
2958 Label after_pool;
2959 if (IsMipsArchVariant(kMips32r6)) {
2960 bc(&after_pool);
2961 } else {
2962 b(&after_pool);
2963 nop();
2964 }
2965
2966 int pool_start = pc_offset();
2967 for (int i = 0; i < unbound_labels_count_; i++) {
2968 uint32_t imm32;
2969 imm32 = jump_address(&after_pool);
2970 { BlockGrowBufferScope block_buf_growth(this);
2971 // Buffer growth (and relocation) must be blocked for internal
2972 // references until associated instructions are emitted and available
2973 // to be patched.
2974 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
2975 lui(at, (imm32 & kHiMask) >> kLuiShift);
2976 ori(at, at, (imm32 & kImm16Mask));
2977 }
2978 jr(at);
2979 nop();
2980 }
2981 bind(&after_pool);
2982 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2983
2984 trampoline_emitted_ = true;
2985 // As we are only going to emit trampoline once, we need to prevent any
2986 // further emission.
2987 next_buffer_check_ = kMaxInt;
2988 }
2989 } else {
2990 // Number of branches to unbound label at this point is zero, so we can
2991 // move next buffer check to maximum.
2992 next_buffer_check_ = pc_offset() +
2993 kMaxBranchOffset - kTrampolineSlotsSize * 16;
2994 }
2995 return;
2996 }
2997
2998
target_address_at(Address pc)2999 Address Assembler::target_address_at(Address pc) {
3000 Instr instr1 = instr_at(pc);
3001 Instr instr2 = instr_at(pc + kInstrSize);
3002 // Interpret 2 instructions generated by li: lui/ori
3003 if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
3004 // Assemble the 32 bit value.
3005 return reinterpret_cast<Address>(
3006 (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
3007 }
3008
3009 // We should never get here, force a bad address if we do.
3010 UNREACHABLE();
3011 return (Address)0x0;
3012 }
3013
3014
3015 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
3016 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
3017 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
3018 // OS::nan_value() returns a qNaN.
QuietNaN(HeapObject * object)3019 void Assembler::QuietNaN(HeapObject* object) {
3020 HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
3021 }
3022
3023
3024 // On Mips, a target address is stored in a lui/ori instruction pair, each
3025 // of which load 16 bits of the 32-bit address to a register.
3026 // Patching the address must replace both instr, and flush the i-cache.
3027 //
3028 // There is an optimization below, which emits a nop when the address
3029 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
3030 // and possibly removed.
set_target_address_at(Isolate * isolate,Address pc,Address target,ICacheFlushMode icache_flush_mode)3031 void Assembler::set_target_address_at(Isolate* isolate, Address pc,
3032 Address target,
3033 ICacheFlushMode icache_flush_mode) {
3034 Instr instr2 = instr_at(pc + kInstrSize);
3035 uint32_t rt_code = GetRtField(instr2);
3036 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
3037 uint32_t itarget = reinterpret_cast<uint32_t>(target);
3038
3039 #ifdef DEBUG
3040 // Check we have the result from a li macro-instruction, using instr pair.
3041 Instr instr1 = instr_at(pc);
3042 CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
3043 #endif
3044
3045 // Must use 2 instructions to insure patchable code => just use lui and ori.
3046 // lui rt, upper-16.
3047 // ori rt rt, lower-16.
3048 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
3049 *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
3050
3051
3052 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
3053 Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t));
3054 }
3055 }
3056
3057 } // namespace internal
3058 } // namespace v8
3059
3060 #endif // V8_TARGET_ARCH_MIPS
3061