1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 #include "src/mips64/assembler-mips64.h"
36 
37 #if V8_TARGET_ARCH_MIPS64
38 
39 #include "src/base/cpu.h"
40 #include "src/mips64/assembler-mips64-inl.h"
41 
42 namespace v8 {
43 namespace internal {
44 
45 
46 // Get the CPU features enabled by the build. For cross compilation the
47 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
48 // can be defined to enable FPU instructions when building the
49 // snapshot.
CpuFeaturesImpliedByCompiler()50 static unsigned CpuFeaturesImpliedByCompiler() {
51   unsigned answer = 0;
52 #ifdef CAN_USE_FPU_INSTRUCTIONS
53   answer |= 1u << FPU;
54 #endif  // def CAN_USE_FPU_INSTRUCTIONS
55 
56   // If the compiler is allowed to use FPU then we can use FPU too in our code
57   // generation even when generating snapshots.  This won't work for cross
58   // compilation.
59 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
60   answer |= 1u << FPU;
61 #endif
62 
63   return answer;
64 }
65 
66 
ProbeImpl(bool cross_compile)67 void CpuFeatures::ProbeImpl(bool cross_compile) {
68   supported_ |= CpuFeaturesImpliedByCompiler();
69 
70   // Only use statically determined features for cross compile (snapshot).
71   if (cross_compile) return;
72 
73   // If the compiler is allowed to use fpu then we can use fpu too in our
74   // code generation.
75 #ifndef __mips__
76   // For the simulator build, use FPU.
77   supported_ |= 1u << FPU;
78 #else
79   // Probe for additional features at runtime.
80   base::CPU cpu;
81   if (cpu.has_fpu()) supported_ |= 1u << FPU;
82 #endif
83 }
84 
85 
PrintTarget()86 void CpuFeatures::PrintTarget() { }
PrintFeatures()87 void CpuFeatures::PrintFeatures() { }
88 
89 
ToNumber(Register reg)90 int ToNumber(Register reg) {
91   DCHECK(reg.is_valid());
92   const int kNumbers[] = {
93     0,    // zero_reg
94     1,    // at
95     2,    // v0
96     3,    // v1
97     4,    // a0
98     5,    // a1
99     6,    // a2
100     7,    // a3
101     8,    // a4
102     9,    // a5
103     10,   // a6
104     11,   // a7
105     12,   // t0
106     13,   // t1
107     14,   // t2
108     15,   // t3
109     16,   // s0
110     17,   // s1
111     18,   // s2
112     19,   // s3
113     20,   // s4
114     21,   // s5
115     22,   // s6
116     23,   // s7
117     24,   // t8
118     25,   // t9
119     26,   // k0
120     27,   // k1
121     28,   // gp
122     29,   // sp
123     30,   // fp
124     31,   // ra
125   };
126   return kNumbers[reg.code()];
127 }
128 
129 
ToRegister(int num)130 Register ToRegister(int num) {
131   DCHECK(num >= 0 && num < kNumRegisters);
132   const Register kRegisters[] = {
133     zero_reg,
134     at,
135     v0, v1,
136     a0, a1, a2, a3, a4, a5, a6, a7,
137     t0, t1, t2, t3,
138     s0, s1, s2, s3, s4, s5, s6, s7,
139     t8, t9,
140     k0, k1,
141     gp,
142     sp,
143     fp,
144     ra
145   };
146   return kRegisters[num];
147 }
148 
149 
150 // -----------------------------------------------------------------------------
151 // Implementation of RelocInfo.
152 
153 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
154                                   1 << RelocInfo::INTERNAL_REFERENCE |
155                                   1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
156 
157 
IsCodedSpecially()158 bool RelocInfo::IsCodedSpecially() {
159   // The deserializer needs to know whether a pointer is specially coded.  Being
160   // specially coded on MIPS means that it is a lui/ori instruction, and that is
161   // always the case inside code objects.
162   return true;
163 }
164 
165 
IsInConstantPool()166 bool RelocInfo::IsInConstantPool() {
167   return false;
168 }
169 
wasm_memory_reference()170 Address RelocInfo::wasm_memory_reference() {
171   DCHECK(IsWasmMemoryReference(rmode_));
172   return Assembler::target_address_at(pc_, host_);
173 }
174 
wasm_global_reference()175 Address RelocInfo::wasm_global_reference() {
176   DCHECK(IsWasmGlobalReference(rmode_));
177   return Assembler::target_address_at(pc_, host_);
178 }
179 
wasm_memory_size_reference()180 uint32_t RelocInfo::wasm_memory_size_reference() {
181   DCHECK(IsWasmMemorySizeReference(rmode_));
182   return static_cast<uint32_t>(
183       reinterpret_cast<intptr_t>((Assembler::target_address_at(pc_, host_))));
184 }
185 
unchecked_update_wasm_memory_reference(Address address,ICacheFlushMode flush_mode)186 void RelocInfo::unchecked_update_wasm_memory_reference(
187     Address address, ICacheFlushMode flush_mode) {
188   Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
189 }
190 
unchecked_update_wasm_memory_size(uint32_t size,ICacheFlushMode flush_mode)191 void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
192                                                   ICacheFlushMode flush_mode) {
193   Assembler::set_target_address_at(isolate_, pc_, host_,
194                                    reinterpret_cast<Address>(size), flush_mode);
195 }
196 
197 // -----------------------------------------------------------------------------
198 // Implementation of Operand and MemOperand.
199 // See assembler-mips-inl.h for inlined constructors.
200 
Operand(Handle<Object> handle)201 Operand::Operand(Handle<Object> handle) {
202   AllowDeferredHandleDereference using_raw_address;
203   rm_ = no_reg;
204   // Verify all Objects referred by code are NOT in new space.
205   Object* obj = *handle;
206   if (obj->IsHeapObject()) {
207     imm64_ = reinterpret_cast<intptr_t>(handle.location());
208     rmode_ = RelocInfo::EMBEDDED_OBJECT;
209   } else {
210     // No relocation needed.
211     imm64_ = reinterpret_cast<intptr_t>(obj);
212     rmode_ = RelocInfo::NONE64;
213   }
214 }
215 
216 
MemOperand(Register rm,int32_t offset)217 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
218   offset_ = offset;
219 }
220 
221 
MemOperand(Register rm,int32_t unit,int32_t multiplier,OffsetAddend offset_addend)222 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
223                        OffsetAddend offset_addend)
224     : Operand(rm) {
225   offset_ = unit * multiplier + offset_addend;
226 }
227 
228 
229 // -----------------------------------------------------------------------------
230 // Specific instructions, constants, and masks.
231 
232 static const int kNegOffset = 0x00008000;
233 // daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
234 // operations as post-increment of sp.
235 const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
236                               (Register::kCode_sp << kRtShift) |
237                               (kPointerSize & kImm16Mask);  // NOLINT
238 // daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
239 const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
240                                (Register::kCode_sp << kRtShift) |
241                                (-kPointerSize & kImm16Mask);  // NOLINT
242 // sd(r, MemOperand(sp, 0))
243 const Instr kPushRegPattern =
244     SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask);  // NOLINT
245 //  ld(r, MemOperand(sp, 0))
246 const Instr kPopRegPattern =
247     LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask);  // NOLINT
248 
249 const Instr kLwRegFpOffsetPattern =
250     LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask);  // NOLINT
251 
252 const Instr kSwRegFpOffsetPattern =
253     SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask);  // NOLINT
254 
255 const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
256                                        (kNegOffset & kImm16Mask);  // NOLINT
257 
258 const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
259                                        (kNegOffset & kImm16Mask);  // NOLINT
260 // A mask for the Rt register for push, pop, lw, sw instructions.
261 const Instr kRtMask = kRtFieldMask;
262 const Instr kLwSwInstrTypeMask = 0xffe00000;
263 const Instr kLwSwInstrArgumentMask  = ~kLwSwInstrTypeMask;
264 const Instr kLwSwOffsetMask = kImm16Mask;
265 
Assembler(Isolate * isolate,void * buffer,int buffer_size)266 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
267     : AssemblerBase(isolate, buffer, buffer_size),
268       recorded_ast_id_(TypeFeedbackId::None()) {
269   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
270 
271   last_trampoline_pool_end_ = 0;
272   no_trampoline_pool_before_ = 0;
273   trampoline_pool_blocked_nesting_ = 0;
274   // We leave space (16 * kTrampolineSlotsSize)
275   // for BlockTrampolinePoolScope buffer.
276   next_buffer_check_ = FLAG_force_long_branches
277       ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
278   internal_trampoline_exception_ = false;
279   last_bound_pos_ = 0;
280 
281   trampoline_emitted_ = FLAG_force_long_branches;
282   unbound_labels_count_ = 0;
283   block_buffer_growth_ = false;
284 
285   ClearRecordedAstId();
286 }
287 
288 
GetCode(CodeDesc * desc)289 void Assembler::GetCode(CodeDesc* desc) {
290   EmitForbiddenSlotInstruction();
291   DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
292   // Set up code descriptor.
293   desc->buffer = buffer_;
294   desc->buffer_size = buffer_size_;
295   desc->instr_size = pc_offset();
296   desc->reloc_size =
297       static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
298   desc->origin = this;
299   desc->constant_pool_size = 0;
300   desc->unwinding_info_size = 0;
301   desc->unwinding_info = nullptr;
302 }
303 
304 
Align(int m)305 void Assembler::Align(int m) {
306   DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
307   EmitForbiddenSlotInstruction();
308   while ((pc_offset() & (m - 1)) != 0) {
309     nop();
310   }
311 }
312 
313 
CodeTargetAlign()314 void Assembler::CodeTargetAlign() {
315   // No advantage to aligning branch/call targets to more than
316   // single instruction, that I am aware of.
317   Align(4);
318 }
319 
320 
GetRtReg(Instr instr)321 Register Assembler::GetRtReg(Instr instr) {
322   Register rt;
323   rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
324   return rt;
325 }
326 
327 
GetRsReg(Instr instr)328 Register Assembler::GetRsReg(Instr instr) {
329   Register rs;
330   rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
331   return rs;
332 }
333 
334 
GetRdReg(Instr instr)335 Register Assembler::GetRdReg(Instr instr) {
336   Register rd;
337   rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
338   return rd;
339 }
340 
341 
GetRt(Instr instr)342 uint32_t Assembler::GetRt(Instr instr) {
343   return (instr & kRtFieldMask) >> kRtShift;
344 }
345 
346 
GetRtField(Instr instr)347 uint32_t Assembler::GetRtField(Instr instr) {
348   return instr & kRtFieldMask;
349 }
350 
351 
GetRs(Instr instr)352 uint32_t Assembler::GetRs(Instr instr) {
353   return (instr & kRsFieldMask) >> kRsShift;
354 }
355 
356 
GetRsField(Instr instr)357 uint32_t Assembler::GetRsField(Instr instr) {
358   return instr & kRsFieldMask;
359 }
360 
361 
GetRd(Instr instr)362 uint32_t Assembler::GetRd(Instr instr) {
363   return  (instr & kRdFieldMask) >> kRdShift;
364 }
365 
366 
GetRdField(Instr instr)367 uint32_t Assembler::GetRdField(Instr instr) {
368   return  instr & kRdFieldMask;
369 }
370 
371 
GetSa(Instr instr)372 uint32_t Assembler::GetSa(Instr instr) {
373   return (instr & kSaFieldMask) >> kSaShift;
374 }
375 
376 
GetSaField(Instr instr)377 uint32_t Assembler::GetSaField(Instr instr) {
378   return instr & kSaFieldMask;
379 }
380 
381 
GetOpcodeField(Instr instr)382 uint32_t Assembler::GetOpcodeField(Instr instr) {
383   return instr & kOpcodeMask;
384 }
385 
386 
GetFunction(Instr instr)387 uint32_t Assembler::GetFunction(Instr instr) {
388   return (instr & kFunctionFieldMask) >> kFunctionShift;
389 }
390 
391 
GetFunctionField(Instr instr)392 uint32_t Assembler::GetFunctionField(Instr instr) {
393   return instr & kFunctionFieldMask;
394 }
395 
396 
GetImmediate16(Instr instr)397 uint32_t Assembler::GetImmediate16(Instr instr) {
398   return instr & kImm16Mask;
399 }
400 
401 
GetLabelConst(Instr instr)402 uint32_t Assembler::GetLabelConst(Instr instr) {
403   return instr & ~kImm16Mask;
404 }
405 
406 
IsPop(Instr instr)407 bool Assembler::IsPop(Instr instr) {
408   return (instr & ~kRtMask) == kPopRegPattern;
409 }
410 
411 
IsPush(Instr instr)412 bool Assembler::IsPush(Instr instr) {
413   return (instr & ~kRtMask) == kPushRegPattern;
414 }
415 
416 
IsSwRegFpOffset(Instr instr)417 bool Assembler::IsSwRegFpOffset(Instr instr) {
418   return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
419 }
420 
421 
IsLwRegFpOffset(Instr instr)422 bool Assembler::IsLwRegFpOffset(Instr instr) {
423   return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
424 }
425 
426 
IsSwRegFpNegOffset(Instr instr)427 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
428   return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
429           kSwRegFpNegOffsetPattern);
430 }
431 
432 
IsLwRegFpNegOffset(Instr instr)433 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
434   return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
435           kLwRegFpNegOffsetPattern);
436 }
437 
438 
439 // Labels refer to positions in the (to be) generated code.
440 // There are bound, linked, and unused labels.
441 //
442 // Bound labels refer to known positions in the already
443 // generated code. pos() is the position the label refers to.
444 //
445 // Linked labels refer to unknown positions in the code
446 // to be generated; pos() is the position of the last
447 // instruction using the label.
448 
449 // The link chain is terminated by a value in the instruction of -1,
450 // which is an otherwise illegal value (branch -1 is inf loop).
451 // The instruction 16-bit offset field addresses 32-bit words, but in
452 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
453 
454 const int kEndOfChain = -4;
455 // Determines the end of the Jump chain (a subset of the label link chain).
456 const int kEndOfJumpChain = 0;
457 
458 
IsBranch(Instr instr)459 bool Assembler::IsBranch(Instr instr) {
460   uint32_t opcode   = GetOpcodeField(instr);
461   uint32_t rt_field = GetRtField(instr);
462   uint32_t rs_field = GetRsField(instr);
463   // Checks if the instruction is a branch.
464   bool isBranch =
465       opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
466       opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
467       (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
468                             rt_field == BLTZAL || rt_field == BGEZAL)) ||
469       (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
470       (opcode == COP1 && rs_field == BC1EQZ) ||
471       (opcode == COP1 && rs_field == BC1NEZ);
472   if (!isBranch && kArchVariant == kMips64r6) {
473     // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
474     // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
475     isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
476                 opcode == BALC ||
477                 (opcode == POP66 && rs_field != 0) ||  // BEQZC
478                 (opcode == POP76 && rs_field != 0);    // BNEZC
479   }
480   return isBranch;
481 }
482 
483 
IsBc(Instr instr)484 bool Assembler::IsBc(Instr instr) {
485   uint32_t opcode = GetOpcodeField(instr);
486   // Checks if the instruction is a BC or BALC.
487   return opcode == BC || opcode == BALC;
488 }
489 
490 
IsBzc(Instr instr)491 bool Assembler::IsBzc(Instr instr) {
492   uint32_t opcode = GetOpcodeField(instr);
493   // Checks if the instruction is BEQZC or BNEZC.
494   return (opcode == POP66 && GetRsField(instr) != 0) ||
495          (opcode == POP76 && GetRsField(instr) != 0);
496 }
497 
498 
IsEmittedConstant(Instr instr)499 bool Assembler::IsEmittedConstant(Instr instr) {
500   uint32_t label_constant = GetLabelConst(instr);
501   return label_constant == 0;  // Emitted label const in reg-exp engine.
502 }
503 
504 
IsBeq(Instr instr)505 bool Assembler::IsBeq(Instr instr) {
506   return GetOpcodeField(instr) == BEQ;
507 }
508 
509 
IsBne(Instr instr)510 bool Assembler::IsBne(Instr instr) {
511   return GetOpcodeField(instr) == BNE;
512 }
513 
514 
IsBeqzc(Instr instr)515 bool Assembler::IsBeqzc(Instr instr) {
516   uint32_t opcode = GetOpcodeField(instr);
517   return opcode == POP66 && GetRsField(instr) != 0;
518 }
519 
520 
IsBnezc(Instr instr)521 bool Assembler::IsBnezc(Instr instr) {
522   uint32_t opcode = GetOpcodeField(instr);
523   return opcode == POP76 && GetRsField(instr) != 0;
524 }
525 
526 
IsBeqc(Instr instr)527 bool Assembler::IsBeqc(Instr instr) {
528   uint32_t opcode = GetOpcodeField(instr);
529   uint32_t rs = GetRsField(instr);
530   uint32_t rt = GetRtField(instr);
531   return opcode == POP10 && rs != 0 && rs < rt;  // && rt != 0
532 }
533 
534 
IsBnec(Instr instr)535 bool Assembler::IsBnec(Instr instr) {
536   uint32_t opcode = GetOpcodeField(instr);
537   uint32_t rs = GetRsField(instr);
538   uint32_t rt = GetRtField(instr);
539   return opcode == POP30 && rs != 0 && rs < rt;  // && rt != 0
540 }
541 
542 
IsJump(Instr instr)543 bool Assembler::IsJump(Instr instr) {
544   uint32_t opcode   = GetOpcodeField(instr);
545   uint32_t rt_field = GetRtField(instr);
546   uint32_t rd_field = GetRdField(instr);
547   uint32_t function_field = GetFunctionField(instr);
548   // Checks if the instruction is a jump.
549   return opcode == J || opcode == JAL ||
550       (opcode == SPECIAL && rt_field == 0 &&
551       ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
552 }
553 
554 
IsJ(Instr instr)555 bool Assembler::IsJ(Instr instr) {
556   uint32_t opcode = GetOpcodeField(instr);
557   // Checks if the instruction is a jump.
558   return opcode == J;
559 }
560 
561 
IsJal(Instr instr)562 bool Assembler::IsJal(Instr instr) {
563   return GetOpcodeField(instr) == JAL;
564 }
565 
566 
IsJr(Instr instr)567 bool Assembler::IsJr(Instr instr) {
568   return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
569 }
570 
571 
IsJalr(Instr instr)572 bool Assembler::IsJalr(Instr instr) {
573   return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
574 }
575 
576 
IsLui(Instr instr)577 bool Assembler::IsLui(Instr instr) {
578   uint32_t opcode = GetOpcodeField(instr);
579   // Checks if the instruction is a load upper immediate.
580   return opcode == LUI;
581 }
582 
583 
IsOri(Instr instr)584 bool Assembler::IsOri(Instr instr) {
585   uint32_t opcode = GetOpcodeField(instr);
586   // Checks if the instruction is a load upper immediate.
587   return opcode == ORI;
588 }
589 
590 
IsNop(Instr instr,unsigned int type)591 bool Assembler::IsNop(Instr instr, unsigned int type) {
592   // See Assembler::nop(type).
593   DCHECK(type < 32);
594   uint32_t opcode = GetOpcodeField(instr);
595   uint32_t function = GetFunctionField(instr);
596   uint32_t rt = GetRt(instr);
597   uint32_t rd = GetRd(instr);
598   uint32_t sa = GetSa(instr);
599 
600   // Traditional mips nop == sll(zero_reg, zero_reg, 0)
601   // When marking non-zero type, use sll(zero_reg, at, type)
602   // to avoid use of mips ssnop and ehb special encodings
603   // of the sll instruction.
604 
605   Register nop_rt_reg = (type == 0) ? zero_reg : at;
606   bool ret = (opcode == SPECIAL && function == SLL &&
607               rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
608               rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
609               sa == type);
610 
611   return ret;
612 }
613 
614 
GetBranchOffset(Instr instr)615 int32_t Assembler::GetBranchOffset(Instr instr) {
616   DCHECK(IsBranch(instr));
617   return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
618 }
619 
620 
IsLw(Instr instr)621 bool Assembler::IsLw(Instr instr) {
622   return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
623 }
624 
625 
GetLwOffset(Instr instr)626 int16_t Assembler::GetLwOffset(Instr instr) {
627   DCHECK(IsLw(instr));
628   return ((instr & kImm16Mask));
629 }
630 
631 
SetLwOffset(Instr instr,int16_t offset)632 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
633   DCHECK(IsLw(instr));
634 
635   // We actually create a new lw instruction based on the original one.
636   Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
637       | (offset & kImm16Mask);
638 
639   return temp_instr;
640 }
641 
642 
IsSw(Instr instr)643 bool Assembler::IsSw(Instr instr) {
644   return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
645 }
646 
647 
SetSwOffset(Instr instr,int16_t offset)648 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
649   DCHECK(IsSw(instr));
650   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
651 }
652 
653 
IsAddImmediate(Instr instr)654 bool Assembler::IsAddImmediate(Instr instr) {
655   return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
656 }
657 
658 
SetAddImmediateOffset(Instr instr,int16_t offset)659 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
660   DCHECK(IsAddImmediate(instr));
661   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
662 }
663 
664 
IsAndImmediate(Instr instr)665 bool Assembler::IsAndImmediate(Instr instr) {
666   return GetOpcodeField(instr) == ANDI;
667 }
668 
669 
OffsetSizeInBits(Instr instr)670 static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
671   if (kArchVariant == kMips64r6) {
672     if (Assembler::IsBc(instr)) {
673       return Assembler::OffsetSize::kOffset26;
674     } else if (Assembler::IsBzc(instr)) {
675       return Assembler::OffsetSize::kOffset21;
676     }
677   }
678   return Assembler::OffsetSize::kOffset16;
679 }
680 
681 
AddBranchOffset(int pos,Instr instr)682 static inline int32_t AddBranchOffset(int pos, Instr instr) {
683   int bits = OffsetSizeInBits(instr);
684   const int32_t mask = (1 << bits) - 1;
685   bits = 32 - bits;
686 
687   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
688   // the compiler uses arithmetic shifts for signed integers.
689   int32_t imm = ((instr & mask) << bits) >> (bits - 2);
690 
691   if (imm == kEndOfChain) {
692     // EndOfChain sentinel is returned directly, not relative to pc or pos.
693     return kEndOfChain;
694   } else {
695     return pos + Assembler::kBranchPCOffset + imm;
696   }
697 }
698 
699 
target_at(int pos,bool is_internal)700 int Assembler::target_at(int pos, bool is_internal) {
701   if (is_internal) {
702     int64_t* p = reinterpret_cast<int64_t*>(buffer_ + pos);
703     int64_t address = *p;
704     if (address == kEndOfJumpChain) {
705       return kEndOfChain;
706     } else {
707       int64_t instr_address = reinterpret_cast<int64_t>(p);
708       DCHECK(instr_address - address < INT_MAX);
709       int delta = static_cast<int>(instr_address - address);
710       DCHECK(pos > delta);
711       return pos - delta;
712     }
713   }
714   Instr instr = instr_at(pos);
715   if ((instr & ~kImm16Mask) == 0) {
716     // Emitted label constant, not part of a branch.
717     if (instr == 0) {
718        return kEndOfChain;
719      } else {
720        int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
721        return (imm18 + pos);
722      }
723   }
724   // Check we have a branch or jump instruction.
725   DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr));
726   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
727   // the compiler uses arithmetic shifts for signed integers.
728   if (IsBranch(instr)) {
729     return AddBranchOffset(pos, instr);
730   } else if (IsLui(instr)) {
731     Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
732     Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
733     Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
734     DCHECK(IsOri(instr_ori));
735     DCHECK(IsOri(instr_ori2));
736 
737     // TODO(plind) create named constants for shift values.
738     int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
739     imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
740     imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
741     // Sign extend address;
742     imm >>= 16;
743 
744     if (imm == kEndOfJumpChain) {
745       // EndOfChain sentinel is returned directly, not relative to pc or pos.
746       return kEndOfChain;
747     } else {
748       uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
749       DCHECK(instr_address - imm < INT_MAX);
750       int delta = static_cast<int>(instr_address - imm);
751       DCHECK(pos > delta);
752       return pos - delta;
753     }
754   } else {
755     DCHECK(IsJ(instr) || IsJal(instr));
756     int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
757     if (imm28 == kEndOfJumpChain) {
758       // EndOfChain sentinel is returned directly, not relative to pc or pos.
759       return kEndOfChain;
760     } else {
761       // Sign extend 28-bit offset.
762       int32_t delta = static_cast<int32_t>((imm28 << 4) >> 4);
763       return pos + delta;
764     }
765   }
766 }
767 
768 
SetBranchOffset(int32_t pos,int32_t target_pos,Instr instr)769 static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
770                                     Instr instr) {
771   int32_t bits = OffsetSizeInBits(instr);
772   int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
773   DCHECK((imm & 3) == 0);
774   imm >>= 2;
775 
776   const int32_t mask = (1 << bits) - 1;
777   instr &= ~mask;
778   DCHECK(is_intn(imm, bits));
779 
780   return instr | (imm & mask);
781 }
782 
783 
target_at_put(int pos,int target_pos,bool is_internal)784 void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
785   if (is_internal) {
786     uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
787     *reinterpret_cast<uint64_t*>(buffer_ + pos) = imm;
788     return;
789   }
790   Instr instr = instr_at(pos);
791   if ((instr & ~kImm16Mask) == 0) {
792     DCHECK(target_pos == kEndOfChain || target_pos >= 0);
793     // Emitted label constant, not part of a branch.
794     // Make label relative to Code* of generated Code object.
795     instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
796     return;
797   }
798 
799   if (IsBranch(instr)) {
800     instr = SetBranchOffset(pos, target_pos, instr);
801     instr_at_put(pos, instr);
802   } else if (IsLui(instr)) {
803     Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
804     Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
805     Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
806     DCHECK(IsOri(instr_ori));
807     DCHECK(IsOri(instr_ori2));
808 
809     uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
810     DCHECK((imm & 3) == 0);
811 
812     instr_lui &= ~kImm16Mask;
813     instr_ori &= ~kImm16Mask;
814     instr_ori2 &= ~kImm16Mask;
815 
816     instr_at_put(pos + 0 * Assembler::kInstrSize,
817                  instr_lui | ((imm >> 32) & kImm16Mask));
818     instr_at_put(pos + 1 * Assembler::kInstrSize,
819                  instr_ori | ((imm >> 16) & kImm16Mask));
820     instr_at_put(pos + 3 * Assembler::kInstrSize,
821                  instr_ori2 | (imm & kImm16Mask));
822   } else if (IsJ(instr) || IsJal(instr)) {
823     int32_t imm28 = target_pos - pos;
824     DCHECK((imm28 & 3) == 0);
825 
826     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
827     DCHECK(is_uint26(imm26));
828     // Place 26-bit signed offset with markings.
829     // When code is committed it will be resolved to j/jal.
830     int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark;
831     instr_at_put(pos, mark | (imm26 & kImm26Mask));
832   } else {
833     int32_t imm28 = target_pos - pos;
834     DCHECK((imm28 & 3) == 0);
835 
836     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
837     DCHECK(is_uint26(imm26));
838     // Place raw 26-bit signed offset.
839     // When code is committed it will be resolved to j/jal.
840     instr &= ~kImm26Mask;
841     instr_at_put(pos, instr | (imm26 & kImm26Mask));
842   }
843 }
844 
845 
print(Label * L)846 void Assembler::print(Label* L) {
847   if (L->is_unused()) {
848     PrintF("unused label\n");
849   } else if (L->is_bound()) {
850     PrintF("bound label to %d\n", L->pos());
851   } else if (L->is_linked()) {
852     Label l = *L;
853     PrintF("unbound label");
854     while (l.is_linked()) {
855       PrintF("@ %d ", l.pos());
856       Instr instr = instr_at(l.pos());
857       if ((instr & ~kImm16Mask) == 0) {
858         PrintF("value\n");
859       } else {
860         PrintF("%d\n", instr);
861       }
862       next(&l, internal_reference_positions_.find(l.pos()) !=
863                    internal_reference_positions_.end());
864     }
865   } else {
866     PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
867   }
868 }
869 
870 
bind_to(Label * L,int pos)871 void Assembler::bind_to(Label* L, int pos) {
872   DCHECK(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
873   int trampoline_pos = kInvalidSlotPos;
874   bool is_internal = false;
875   if (L->is_linked() && !trampoline_emitted_) {
876     unbound_labels_count_--;
877     next_buffer_check_ += kTrampolineSlotsSize;
878   }
879 
880   while (L->is_linked()) {
881     int fixup_pos = L->pos();
882     int dist = pos - fixup_pos;
883     is_internal = internal_reference_positions_.find(fixup_pos) !=
884                   internal_reference_positions_.end();
885     next(L, is_internal);  // Call next before overwriting link with target at
886                            // fixup_pos.
887     Instr instr = instr_at(fixup_pos);
888     if (is_internal) {
889       target_at_put(fixup_pos, pos, is_internal);
890     } else {
891       if (IsBranch(instr)) {
892         int branch_offset = BranchOffset(instr);
893         if (dist > branch_offset) {
894           if (trampoline_pos == kInvalidSlotPos) {
895             trampoline_pos = get_trampoline_entry(fixup_pos);
896             CHECK(trampoline_pos != kInvalidSlotPos);
897           }
898           CHECK((trampoline_pos - fixup_pos) <= branch_offset);
899           target_at_put(fixup_pos, trampoline_pos, false);
900           fixup_pos = trampoline_pos;
901           dist = pos - fixup_pos;
902         }
903         target_at_put(fixup_pos, pos, false);
904       } else {
905         DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
906                IsEmittedConstant(instr));
907         target_at_put(fixup_pos, pos, false);
908       }
909     }
910   }
911   L->bind_to(pos);
912 
913   // Keep track of the last bound label so we don't eliminate any instructions
914   // before a bound label.
915   if (pos > last_bound_pos_)
916     last_bound_pos_ = pos;
917 }
918 
919 
bind(Label * L)920 void Assembler::bind(Label* L) {
921   DCHECK(!L->is_bound());  // Label can only be bound once.
922   bind_to(L, pc_offset());
923 }
924 
925 
next(Label * L,bool is_internal)926 void Assembler::next(Label* L, bool is_internal) {
927   DCHECK(L->is_linked());
928   int link = target_at(L->pos(), is_internal);
929   if (link == kEndOfChain) {
930     L->Unuse();
931   } else {
932     DCHECK(link >= 0);
933     L->link_to(link);
934   }
935 }
936 
937 
is_near(Label * L)938 bool Assembler::is_near(Label* L) {
939   DCHECK(L->is_bound());
940   return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
941 }
942 
943 
is_near(Label * L,OffsetSize bits)944 bool Assembler::is_near(Label* L, OffsetSize bits) {
945   if (L == nullptr || !L->is_bound()) return true;
946   return ((pc_offset() - L->pos()) <
947           (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
948 }
949 
950 
is_near_branch(Label * L)951 bool Assembler::is_near_branch(Label* L) {
952   DCHECK(L->is_bound());
953   return kArchVariant == kMips64r6 ? is_near_r6(L) : is_near_pre_r6(L);
954 }
955 
956 
BranchOffset(Instr instr)957 int Assembler::BranchOffset(Instr instr) {
958   // At pre-R6 and for other R6 branches the offset is 16 bits.
959   int bits = OffsetSize::kOffset16;
960 
961   if (kArchVariant == kMips64r6) {
962     uint32_t opcode = GetOpcodeField(instr);
963     switch (opcode) {
964       // Checks BC or BALC.
965       case BC:
966       case BALC:
967         bits = OffsetSize::kOffset26;
968         break;
969 
970       // Checks BEQZC or BNEZC.
971       case POP66:
972       case POP76:
973         if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
974         break;
975       default:
976         break;
977     }
978   }
979 
980   return (1 << (bits + 2 - 1)) - 1;
981 }
982 
983 
984 // We have to use a temporary register for things that can be relocated even
985 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
986 // space.  There is no guarantee that the relocated location can be similarly
987 // encoded.
MustUseReg(RelocInfo::Mode rmode)988 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
989   return !RelocInfo::IsNone(rmode);
990 }
991 
GenInstrRegister(Opcode opcode,Register rs,Register rt,Register rd,uint16_t sa,SecondaryField func)992 void Assembler::GenInstrRegister(Opcode opcode,
993                                  Register rs,
994                                  Register rt,
995                                  Register rd,
996                                  uint16_t sa,
997                                  SecondaryField func) {
998   DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
999   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1000       | (rd.code() << kRdShift) | (sa << kSaShift) | func;
1001   emit(instr);
1002 }
1003 
1004 
GenInstrRegister(Opcode opcode,Register rs,Register rt,uint16_t msb,uint16_t lsb,SecondaryField func)1005 void Assembler::GenInstrRegister(Opcode opcode,
1006                                  Register rs,
1007                                  Register rt,
1008                                  uint16_t msb,
1009                                  uint16_t lsb,
1010                                  SecondaryField func) {
1011   DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
1012   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1013       | (msb << kRdShift) | (lsb << kSaShift) | func;
1014   emit(instr);
1015 }
1016 
1017 
GenInstrRegister(Opcode opcode,SecondaryField fmt,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1018 void Assembler::GenInstrRegister(Opcode opcode,
1019                                  SecondaryField fmt,
1020                                  FPURegister ft,
1021                                  FPURegister fs,
1022                                  FPURegister fd,
1023                                  SecondaryField func) {
1024   DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1025   Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
1026       | (fd.code() << kFdShift) | func;
1027   emit(instr);
1028 }
1029 
1030 
GenInstrRegister(Opcode opcode,FPURegister fr,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1031 void Assembler::GenInstrRegister(Opcode opcode,
1032                                  FPURegister fr,
1033                                  FPURegister ft,
1034                                  FPURegister fs,
1035                                  FPURegister fd,
1036                                  SecondaryField func) {
1037   DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1038   Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
1039       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1040   emit(instr);
1041 }
1042 
1043 
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPURegister fs,FPURegister fd,SecondaryField func)1044 void Assembler::GenInstrRegister(Opcode opcode,
1045                                  SecondaryField fmt,
1046                                  Register rt,
1047                                  FPURegister fs,
1048                                  FPURegister fd,
1049                                  SecondaryField func) {
1050   DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1051   Instr instr = opcode | fmt | (rt.code() << kRtShift)
1052       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1053   emit(instr);
1054 }
1055 
1056 
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPUControlRegister fs,SecondaryField func)1057 void Assembler::GenInstrRegister(Opcode opcode,
1058                                  SecondaryField fmt,
1059                                  Register rt,
1060                                  FPUControlRegister fs,
1061                                  SecondaryField func) {
1062   DCHECK(fs.is_valid() && rt.is_valid());
1063   Instr instr =
1064       opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1065   emit(instr);
1066 }
1067 
1068 
1069 // Instructions with immediate value.
1070 // Registers are in the order of the instruction encoding, from left to right.
GenInstrImmediate(Opcode opcode,Register rs,Register rt,int32_t j,CompactBranchType is_compact_branch)1071 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1072                                   int32_t j,
1073                                   CompactBranchType is_compact_branch) {
1074   DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1075   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1076       | (j & kImm16Mask);
1077   emit(instr, is_compact_branch);
1078 }
1079 
1080 
GenInstrImmediate(Opcode opcode,Register rs,SecondaryField SF,int32_t j,CompactBranchType is_compact_branch)1081 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1082                                   int32_t j,
1083                                   CompactBranchType is_compact_branch) {
1084   DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1085   Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
1086   emit(instr, is_compact_branch);
1087 }
1088 
1089 
GenInstrImmediate(Opcode opcode,Register rs,FPURegister ft,int32_t j,CompactBranchType is_compact_branch)1090 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1091                                   int32_t j,
1092                                   CompactBranchType is_compact_branch) {
1093   DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1094   Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
1095       | (j & kImm16Mask);
1096   emit(instr, is_compact_branch);
1097 }
1098 
1099 
GenInstrImmediate(Opcode opcode,Register rs,int32_t offset21,CompactBranchType is_compact_branch)1100 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1101                                   CompactBranchType is_compact_branch) {
1102   DCHECK(rs.is_valid() && (is_int21(offset21)));
1103   Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1104   emit(instr, is_compact_branch);
1105 }
1106 
1107 
GenInstrImmediate(Opcode opcode,Register rs,uint32_t offset21)1108 void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1109                                   uint32_t offset21) {
1110   DCHECK(rs.is_valid() && (is_uint21(offset21)));
1111   Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1112   emit(instr);
1113 }
1114 
1115 
GenInstrImmediate(Opcode opcode,int32_t offset26,CompactBranchType is_compact_branch)1116 void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1117                                   CompactBranchType is_compact_branch) {
1118   DCHECK(is_int26(offset26));
1119   Instr instr = opcode | (offset26 & kImm26Mask);
1120   emit(instr, is_compact_branch);
1121 }
1122 
1123 
GenInstrJump(Opcode opcode,uint32_t address)1124 void Assembler::GenInstrJump(Opcode opcode,
1125                              uint32_t address) {
1126   BlockTrampolinePoolScope block_trampoline_pool(this);
1127   DCHECK(is_uint26(address));
1128   Instr instr = opcode | address;
1129   emit(instr);
1130   BlockTrampolinePoolFor(1);  // For associated delay slot.
1131 }
1132 
1133 
1134 // Returns the next free trampoline entry.
get_trampoline_entry(int32_t pos)1135 int32_t Assembler::get_trampoline_entry(int32_t pos) {
1136   int32_t trampoline_entry = kInvalidSlotPos;
1137   if (!internal_trampoline_exception_) {
1138     if (trampoline_.start() > pos) {
1139      trampoline_entry = trampoline_.take_slot();
1140     }
1141 
1142     if (kInvalidSlotPos == trampoline_entry) {
1143       internal_trampoline_exception_ = true;
1144     }
1145   }
1146   return trampoline_entry;
1147 }
1148 
1149 
jump_address(Label * L)1150 uint64_t Assembler::jump_address(Label* L) {
1151   int64_t target_pos;
1152   if (L->is_bound()) {
1153     target_pos = L->pos();
1154   } else {
1155     if (L->is_linked()) {
1156       target_pos = L->pos();  // L's link.
1157       L->link_to(pc_offset());
1158     } else {
1159       L->link_to(pc_offset());
1160       return kEndOfJumpChain;
1161     }
1162   }
1163   uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
1164   DCHECK((imm & 3) == 0);
1165 
1166   return imm;
1167 }
1168 
1169 
jump_offset(Label * L)1170 uint64_t Assembler::jump_offset(Label* L) {
1171   int64_t target_pos;
1172   int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1173 
1174   if (L->is_bound()) {
1175     target_pos = L->pos();
1176   } else {
1177     if (L->is_linked()) {
1178       target_pos = L->pos();  // L's link.
1179       L->link_to(pc_offset() + pad);
1180     } else {
1181       L->link_to(pc_offset() + pad);
1182       return kEndOfJumpChain;
1183     }
1184   }
1185   int64_t imm = target_pos - (pc_offset() + pad);
1186   DCHECK((imm & 3) == 0);
1187 
1188   return static_cast<uint64_t>(imm);
1189 }
1190 
1191 
branch_offset_helper(Label * L,OffsetSize bits)1192 int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
1193   int32_t target_pos;
1194   int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1195 
1196   if (L->is_bound()) {
1197     target_pos = L->pos();
1198   } else {
1199     if (L->is_linked()) {
1200       target_pos = L->pos();
1201       L->link_to(pc_offset() + pad);
1202     } else {
1203       L->link_to(pc_offset() + pad);
1204       if (!trampoline_emitted_) {
1205         unbound_labels_count_++;
1206         next_buffer_check_ -= kTrampolineSlotsSize;
1207       }
1208       return kEndOfChain;
1209     }
1210   }
1211 
1212   int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1213   DCHECK(is_intn(offset, bits + 2));
1214   DCHECK((offset & 3) == 0);
1215 
1216   return offset;
1217 }
1218 
1219 
label_at_put(Label * L,int at_offset)1220 void Assembler::label_at_put(Label* L, int at_offset) {
1221   int target_pos;
1222   if (L->is_bound()) {
1223     target_pos = L->pos();
1224     instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1225   } else {
1226     if (L->is_linked()) {
1227       target_pos = L->pos();  // L's link.
1228       int32_t imm18 = target_pos - at_offset;
1229       DCHECK((imm18 & 3) == 0);
1230       int32_t imm16 = imm18 >> 2;
1231       DCHECK(is_int16(imm16));
1232       instr_at_put(at_offset, (imm16 & kImm16Mask));
1233     } else {
1234       target_pos = kEndOfChain;
1235       instr_at_put(at_offset, 0);
1236       if (!trampoline_emitted_) {
1237         unbound_labels_count_++;
1238         next_buffer_check_ -= kTrampolineSlotsSize;
1239       }
1240     }
1241     L->link_to(at_offset);
1242   }
1243 }
1244 
1245 
1246 //------- Branch and jump instructions --------
1247 
b(int16_t offset)1248 void Assembler::b(int16_t offset) {
1249   beq(zero_reg, zero_reg, offset);
1250 }
1251 
1252 
bal(int16_t offset)1253 void Assembler::bal(int16_t offset) {
1254   bgezal(zero_reg, offset);
1255 }
1256 
1257 
bc(int32_t offset)1258 void Assembler::bc(int32_t offset) {
1259   DCHECK(kArchVariant == kMips64r6);
1260   GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1261 }
1262 
1263 
balc(int32_t offset)1264 void Assembler::balc(int32_t offset) {
1265   DCHECK(kArchVariant == kMips64r6);
1266   GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1267 }
1268 
1269 
beq(Register rs,Register rt,int16_t offset)1270 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1271   BlockTrampolinePoolScope block_trampoline_pool(this);
1272   GenInstrImmediate(BEQ, rs, rt, offset);
1273   BlockTrampolinePoolFor(1);  // For associated delay slot.
1274 }
1275 
1276 
bgez(Register rs,int16_t offset)1277 void Assembler::bgez(Register rs, int16_t offset) {
1278   BlockTrampolinePoolScope block_trampoline_pool(this);
1279   GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1280   BlockTrampolinePoolFor(1);  // For associated delay slot.
1281 }
1282 
1283 
bgezc(Register rt,int16_t offset)1284 void Assembler::bgezc(Register rt, int16_t offset) {
1285   DCHECK(kArchVariant == kMips64r6);
1286   DCHECK(!(rt.is(zero_reg)));
1287   GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1288 }
1289 
1290 
bgeuc(Register rs,Register rt,int16_t offset)1291 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1292   DCHECK(kArchVariant == kMips64r6);
1293   DCHECK(!(rs.is(zero_reg)));
1294   DCHECK(!(rt.is(zero_reg)));
1295   DCHECK(rs.code() != rt.code());
1296   GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1297 }
1298 
1299 
bgec(Register rs,Register rt,int16_t offset)1300 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1301   DCHECK(kArchVariant == kMips64r6);
1302   DCHECK(!(rs.is(zero_reg)));
1303   DCHECK(!(rt.is(zero_reg)));
1304   DCHECK(rs.code() != rt.code());
1305   GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1306 }
1307 
1308 
bgezal(Register rs,int16_t offset)1309 void Assembler::bgezal(Register rs, int16_t offset) {
1310   DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
1311   BlockTrampolinePoolScope block_trampoline_pool(this);
1312   GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1313   BlockTrampolinePoolFor(1);  // For associated delay slot.
1314 }
1315 
1316 
bgtz(Register rs,int16_t offset)1317 void Assembler::bgtz(Register rs, int16_t offset) {
1318   BlockTrampolinePoolScope block_trampoline_pool(this);
1319   GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1320   BlockTrampolinePoolFor(1);  // For associated delay slot.
1321 }
1322 
1323 
bgtzc(Register rt,int16_t offset)1324 void Assembler::bgtzc(Register rt, int16_t offset) {
1325   DCHECK(kArchVariant == kMips64r6);
1326   DCHECK(!(rt.is(zero_reg)));
1327   GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1328                     CompactBranchType::COMPACT_BRANCH);
1329 }
1330 
1331 
blez(Register rs,int16_t offset)1332 void Assembler::blez(Register rs, int16_t offset) {
1333   BlockTrampolinePoolScope block_trampoline_pool(this);
1334   GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1335   BlockTrampolinePoolFor(1);  // For associated delay slot.
1336 }
1337 
1338 
blezc(Register rt,int16_t offset)1339 void Assembler::blezc(Register rt, int16_t offset) {
1340   DCHECK(kArchVariant == kMips64r6);
1341   DCHECK(!(rt.is(zero_reg)));
1342   GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1343                     CompactBranchType::COMPACT_BRANCH);
1344 }
1345 
1346 
bltzc(Register rt,int16_t offset)1347 void Assembler::bltzc(Register rt, int16_t offset) {
1348   DCHECK(kArchVariant == kMips64r6);
1349   DCHECK(!rt.is(zero_reg));
1350   GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1351 }
1352 
1353 
bltuc(Register rs,Register rt,int16_t offset)1354 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1355   DCHECK(kArchVariant == kMips64r6);
1356   DCHECK(!(rs.is(zero_reg)));
1357   DCHECK(!(rt.is(zero_reg)));
1358   DCHECK(rs.code() != rt.code());
1359   GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1360 }
1361 
1362 
bltc(Register rs,Register rt,int16_t offset)1363 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1364   DCHECK(kArchVariant == kMips64r6);
1365   DCHECK(!rs.is(zero_reg));
1366   DCHECK(!rt.is(zero_reg));
1367   DCHECK(rs.code() != rt.code());
1368   GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1369 }
1370 
1371 
bltz(Register rs,int16_t offset)1372 void Assembler::bltz(Register rs, int16_t offset) {
1373   BlockTrampolinePoolScope block_trampoline_pool(this);
1374   GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1375   BlockTrampolinePoolFor(1);  // For associated delay slot.
1376 }
1377 
1378 
bltzal(Register rs,int16_t offset)1379 void Assembler::bltzal(Register rs, int16_t offset) {
1380   DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
1381   BlockTrampolinePoolScope block_trampoline_pool(this);
1382   GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1383   BlockTrampolinePoolFor(1);  // For associated delay slot.
1384 }
1385 
1386 
bne(Register rs,Register rt,int16_t offset)1387 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1388   BlockTrampolinePoolScope block_trampoline_pool(this);
1389   GenInstrImmediate(BNE, rs, rt, offset);
1390   BlockTrampolinePoolFor(1);  // For associated delay slot.
1391 }
1392 
1393 
bovc(Register rs,Register rt,int16_t offset)1394 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1395   DCHECK(kArchVariant == kMips64r6);
1396   if (rs.code() >= rt.code()) {
1397     GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1398   } else {
1399     GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1400   }
1401 }
1402 
1403 
bnvc(Register rs,Register rt,int16_t offset)1404 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1405   DCHECK(kArchVariant == kMips64r6);
1406   if (rs.code() >= rt.code()) {
1407     GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1408   } else {
1409     GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1410   }
1411 }
1412 
1413 
blezalc(Register rt,int16_t offset)1414 void Assembler::blezalc(Register rt, int16_t offset) {
1415   DCHECK(kArchVariant == kMips64r6);
1416   DCHECK(!(rt.is(zero_reg)));
1417   GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1418                     CompactBranchType::COMPACT_BRANCH);
1419 }
1420 
1421 
bgezalc(Register rt,int16_t offset)1422 void Assembler::bgezalc(Register rt, int16_t offset) {
1423   DCHECK(kArchVariant == kMips64r6);
1424   DCHECK(!(rt.is(zero_reg)));
1425   GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1426 }
1427 
1428 
bgezall(Register rs,int16_t offset)1429 void Assembler::bgezall(Register rs, int16_t offset) {
1430   DCHECK(kArchVariant != kMips64r6);
1431   DCHECK(!(rs.is(zero_reg)));
1432   BlockTrampolinePoolScope block_trampoline_pool(this);
1433   GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1434   BlockTrampolinePoolFor(1);  // For associated delay slot.
1435 }
1436 
1437 
bltzalc(Register rt,int16_t offset)1438 void Assembler::bltzalc(Register rt, int16_t offset) {
1439   DCHECK(kArchVariant == kMips64r6);
1440   DCHECK(!(rt.is(zero_reg)));
1441   GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1442 }
1443 
1444 
bgtzalc(Register rt,int16_t offset)1445 void Assembler::bgtzalc(Register rt, int16_t offset) {
1446   DCHECK(kArchVariant == kMips64r6);
1447   DCHECK(!(rt.is(zero_reg)));
1448   GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1449                     CompactBranchType::COMPACT_BRANCH);
1450 }
1451 
1452 
beqzalc(Register rt,int16_t offset)1453 void Assembler::beqzalc(Register rt, int16_t offset) {
1454   DCHECK(kArchVariant == kMips64r6);
1455   DCHECK(!(rt.is(zero_reg)));
1456   GenInstrImmediate(ADDI, zero_reg, rt, offset,
1457                     CompactBranchType::COMPACT_BRANCH);
1458 }
1459 
1460 
bnezalc(Register rt,int16_t offset)1461 void Assembler::bnezalc(Register rt, int16_t offset) {
1462   DCHECK(kArchVariant == kMips64r6);
1463   DCHECK(!(rt.is(zero_reg)));
1464   GenInstrImmediate(DADDI, zero_reg, rt, offset,
1465                     CompactBranchType::COMPACT_BRANCH);
1466 }
1467 
1468 
beqc(Register rs,Register rt,int16_t offset)1469 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1470   DCHECK(kArchVariant == kMips64r6);
1471   DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1472   if (rs.code() < rt.code()) {
1473     GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1474   } else {
1475     GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1476   }
1477 }
1478 
1479 
beqzc(Register rs,int32_t offset)1480 void Assembler::beqzc(Register rs, int32_t offset) {
1481   DCHECK(kArchVariant == kMips64r6);
1482   DCHECK(!(rs.is(zero_reg)));
1483   GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
1484 }
1485 
1486 
bnec(Register rs,Register rt,int16_t offset)1487 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1488   DCHECK(kArchVariant == kMips64r6);
1489   DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1490   if (rs.code() < rt.code()) {
1491     GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1492   } else {
1493     GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1494   }
1495 }
1496 
1497 
bnezc(Register rs,int32_t offset)1498 void Assembler::bnezc(Register rs, int32_t offset) {
1499   DCHECK(kArchVariant == kMips64r6);
1500   DCHECK(!(rs.is(zero_reg)));
1501   GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
1502 }
1503 
1504 
j(int64_t target)1505 void Assembler::j(int64_t target) {
1506   BlockTrampolinePoolScope block_trampoline_pool(this);
1507   GenInstrJump(J, static_cast<uint32_t>(target >> 2) & kImm26Mask);
1508   BlockTrampolinePoolFor(1);  // For associated delay slot.
1509 }
1510 
1511 
j(Label * target)1512 void Assembler::j(Label* target) {
1513   uint64_t imm = jump_offset(target);
1514   if (target->is_bound()) {
1515     BlockTrampolinePoolScope block_trampoline_pool(this);
1516     GenInstrJump(static_cast<Opcode>(kJRawMark),
1517                  static_cast<uint32_t>(imm >> 2) & kImm26Mask);
1518     BlockTrampolinePoolFor(1);  // For associated delay slot.
1519   } else {
1520     j(imm);
1521   }
1522 }
1523 
1524 
jal(Label * target)1525 void Assembler::jal(Label* target) {
1526   uint64_t imm = jump_offset(target);
1527   if (target->is_bound()) {
1528     BlockTrampolinePoolScope block_trampoline_pool(this);
1529     GenInstrJump(static_cast<Opcode>(kJalRawMark),
1530                  static_cast<uint32_t>(imm >> 2) & kImm26Mask);
1531     BlockTrampolinePoolFor(1);  // For associated delay slot.
1532   } else {
1533     jal(imm);
1534   }
1535 }
1536 
1537 
jr(Register rs)1538 void Assembler::jr(Register rs) {
1539   if (kArchVariant != kMips64r6) {
1540     BlockTrampolinePoolScope block_trampoline_pool(this);
1541     GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1542     BlockTrampolinePoolFor(1);  // For associated delay slot.
1543   } else {
1544     jalr(rs, zero_reg);
1545   }
1546 }
1547 
1548 
jal(int64_t target)1549 void Assembler::jal(int64_t target) {
1550   BlockTrampolinePoolScope block_trampoline_pool(this);
1551   GenInstrJump(JAL, static_cast<uint32_t>(target >> 2) & kImm26Mask);
1552   BlockTrampolinePoolFor(1);  // For associated delay slot.
1553 }
1554 
1555 
jalr(Register rs,Register rd)1556 void Assembler::jalr(Register rs, Register rd) {
1557   DCHECK(rs.code() != rd.code());
1558   BlockTrampolinePoolScope block_trampoline_pool(this);
1559   GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1560   BlockTrampolinePoolFor(1);  // For associated delay slot.
1561 }
1562 
1563 
jic(Register rt,int16_t offset)1564 void Assembler::jic(Register rt, int16_t offset) {
1565   DCHECK(kArchVariant == kMips64r6);
1566   GenInstrImmediate(POP66, zero_reg, rt, offset);
1567 }
1568 
1569 
jialc(Register rt,int16_t offset)1570 void Assembler::jialc(Register rt, int16_t offset) {
1571   DCHECK(kArchVariant == kMips64r6);
1572   GenInstrImmediate(POP76, zero_reg, rt, offset);
1573 }
1574 
1575 
1576 // -------Data-processing-instructions---------
1577 
1578 // Arithmetic.
1579 
addu(Register rd,Register rs,Register rt)1580 void Assembler::addu(Register rd, Register rs, Register rt) {
1581   GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1582 }
1583 
1584 
addiu(Register rd,Register rs,int32_t j)1585 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1586   GenInstrImmediate(ADDIU, rs, rd, j);
1587 }
1588 
1589 
subu(Register rd,Register rs,Register rt)1590 void Assembler::subu(Register rd, Register rs, Register rt) {
1591   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1592 }
1593 
1594 
mul(Register rd,Register rs,Register rt)1595 void Assembler::mul(Register rd, Register rs, Register rt) {
1596   if (kArchVariant == kMips64r6) {
1597       GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1598   } else {
1599       GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1600   }
1601 }
1602 
1603 
muh(Register rd,Register rs,Register rt)1604 void Assembler::muh(Register rd, Register rs, Register rt) {
1605   DCHECK(kArchVariant == kMips64r6);
1606   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1607 }
1608 
1609 
mulu(Register rd,Register rs,Register rt)1610 void Assembler::mulu(Register rd, Register rs, Register rt) {
1611   DCHECK(kArchVariant == kMips64r6);
1612   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1613 }
1614 
1615 
muhu(Register rd,Register rs,Register rt)1616 void Assembler::muhu(Register rd, Register rs, Register rt) {
1617   DCHECK(kArchVariant == kMips64r6);
1618   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1619 }
1620 
1621 
dmul(Register rd,Register rs,Register rt)1622 void Assembler::dmul(Register rd, Register rs, Register rt) {
1623   DCHECK(kArchVariant == kMips64r6);
1624   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
1625 }
1626 
1627 
dmuh(Register rd,Register rs,Register rt)1628 void Assembler::dmuh(Register rd, Register rs, Register rt) {
1629   DCHECK(kArchVariant == kMips64r6);
1630   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
1631 }
1632 
1633 
dmulu(Register rd,Register rs,Register rt)1634 void Assembler::dmulu(Register rd, Register rs, Register rt) {
1635   DCHECK(kArchVariant == kMips64r6);
1636   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
1637 }
1638 
1639 
dmuhu(Register rd,Register rs,Register rt)1640 void Assembler::dmuhu(Register rd, Register rs, Register rt) {
1641   DCHECK(kArchVariant == kMips64r6);
1642   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
1643 }
1644 
1645 
mult(Register rs,Register rt)1646 void Assembler::mult(Register rs, Register rt) {
1647   DCHECK(kArchVariant != kMips64r6);
1648   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1649 }
1650 
1651 
multu(Register rs,Register rt)1652 void Assembler::multu(Register rs, Register rt) {
1653   DCHECK(kArchVariant != kMips64r6);
1654   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1655 }
1656 
1657 
daddiu(Register rd,Register rs,int32_t j)1658 void Assembler::daddiu(Register rd, Register rs, int32_t j) {
1659   GenInstrImmediate(DADDIU, rs, rd, j);
1660 }
1661 
1662 
div(Register rs,Register rt)1663 void Assembler::div(Register rs, Register rt) {
1664   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1665 }
1666 
1667 
div(Register rd,Register rs,Register rt)1668 void Assembler::div(Register rd, Register rs, Register rt) {
1669   DCHECK(kArchVariant == kMips64r6);
1670   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1671 }
1672 
1673 
mod(Register rd,Register rs,Register rt)1674 void Assembler::mod(Register rd, Register rs, Register rt) {
1675   DCHECK(kArchVariant == kMips64r6);
1676   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1677 }
1678 
1679 
divu(Register rs,Register rt)1680 void Assembler::divu(Register rs, Register rt) {
1681   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1682 }
1683 
1684 
divu(Register rd,Register rs,Register rt)1685 void Assembler::divu(Register rd, Register rs, Register rt) {
1686   DCHECK(kArchVariant == kMips64r6);
1687   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1688 }
1689 
1690 
modu(Register rd,Register rs,Register rt)1691 void Assembler::modu(Register rd, Register rs, Register rt) {
1692   DCHECK(kArchVariant == kMips64r6);
1693   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1694 }
1695 
1696 
daddu(Register rd,Register rs,Register rt)1697 void Assembler::daddu(Register rd, Register rs, Register rt) {
1698   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
1699 }
1700 
1701 
dsubu(Register rd,Register rs,Register rt)1702 void Assembler::dsubu(Register rd, Register rs, Register rt) {
1703   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
1704 }
1705 
1706 
dmult(Register rs,Register rt)1707 void Assembler::dmult(Register rs, Register rt) {
1708   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
1709 }
1710 
1711 
dmultu(Register rs,Register rt)1712 void Assembler::dmultu(Register rs, Register rt) {
1713   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
1714 }
1715 
1716 
ddiv(Register rs,Register rt)1717 void Assembler::ddiv(Register rs, Register rt) {
1718   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
1719 }
1720 
1721 
ddiv(Register rd,Register rs,Register rt)1722 void Assembler::ddiv(Register rd, Register rs, Register rt) {
1723   DCHECK(kArchVariant == kMips64r6);
1724   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
1725 }
1726 
1727 
dmod(Register rd,Register rs,Register rt)1728 void Assembler::dmod(Register rd, Register rs, Register rt) {
1729   DCHECK(kArchVariant == kMips64r6);
1730   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
1731 }
1732 
1733 
ddivu(Register rs,Register rt)1734 void Assembler::ddivu(Register rs, Register rt) {
1735   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
1736 }
1737 
1738 
ddivu(Register rd,Register rs,Register rt)1739 void Assembler::ddivu(Register rd, Register rs, Register rt) {
1740   DCHECK(kArchVariant == kMips64r6);
1741   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
1742 }
1743 
1744 
dmodu(Register rd,Register rs,Register rt)1745 void Assembler::dmodu(Register rd, Register rs, Register rt) {
1746   DCHECK(kArchVariant == kMips64r6);
1747   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
1748 }
1749 
1750 
1751 // Logical.
1752 
and_(Register rd,Register rs,Register rt)1753 void Assembler::and_(Register rd, Register rs, Register rt) {
1754   GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1755 }
1756 
1757 
andi(Register rt,Register rs,int32_t j)1758 void Assembler::andi(Register rt, Register rs, int32_t j) {
1759   DCHECK(is_uint16(j));
1760   GenInstrImmediate(ANDI, rs, rt, j);
1761 }
1762 
1763 
or_(Register rd,Register rs,Register rt)1764 void Assembler::or_(Register rd, Register rs, Register rt) {
1765   GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1766 }
1767 
1768 
ori(Register rt,Register rs,int32_t j)1769 void Assembler::ori(Register rt, Register rs, int32_t j) {
1770   DCHECK(is_uint16(j));
1771   GenInstrImmediate(ORI, rs, rt, j);
1772 }
1773 
1774 
xor_(Register rd,Register rs,Register rt)1775 void Assembler::xor_(Register rd, Register rs, Register rt) {
1776   GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1777 }
1778 
1779 
xori(Register rt,Register rs,int32_t j)1780 void Assembler::xori(Register rt, Register rs, int32_t j) {
1781   DCHECK(is_uint16(j));
1782   GenInstrImmediate(XORI, rs, rt, j);
1783 }
1784 
1785 
nor(Register rd,Register rs,Register rt)1786 void Assembler::nor(Register rd, Register rs, Register rt) {
1787   GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1788 }
1789 
1790 
1791 // Shifts.
sll(Register rd,Register rt,uint16_t sa,bool coming_from_nop)1792 void Assembler::sll(Register rd,
1793                     Register rt,
1794                     uint16_t sa,
1795                     bool coming_from_nop) {
1796   // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1797   // generated using the sll instruction. They must be generated using
1798   // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1799   // instructions.
1800   DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1801   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
1802 }
1803 
1804 
sllv(Register rd,Register rt,Register rs)1805 void Assembler::sllv(Register rd, Register rt, Register rs) {
1806   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1807 }
1808 
1809 
srl(Register rd,Register rt,uint16_t sa)1810 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1811   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
1812 }
1813 
1814 
srlv(Register rd,Register rt,Register rs)1815 void Assembler::srlv(Register rd, Register rt, Register rs) {
1816   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1817 }
1818 
1819 
sra(Register rd,Register rt,uint16_t sa)1820 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1821   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
1822 }
1823 
1824 
srav(Register rd,Register rt,Register rs)1825 void Assembler::srav(Register rd, Register rt, Register rs) {
1826   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1827 }
1828 
1829 
rotr(Register rd,Register rt,uint16_t sa)1830 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1831   // Should be called via MacroAssembler::Ror.
1832   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1833   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
1834   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1835       | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1836   emit(instr);
1837 }
1838 
1839 
rotrv(Register rd,Register rt,Register rs)1840 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1841   // Should be called via MacroAssembler::Ror.
1842   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1843   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
1844   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1845      | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1846   emit(instr);
1847 }
1848 
1849 
dsll(Register rd,Register rt,uint16_t sa)1850 void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
1851   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL);
1852 }
1853 
1854 
dsllv(Register rd,Register rt,Register rs)1855 void Assembler::dsllv(Register rd, Register rt, Register rs) {
1856   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
1857 }
1858 
1859 
dsrl(Register rd,Register rt,uint16_t sa)1860 void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
1861   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL);
1862 }
1863 
1864 
dsrlv(Register rd,Register rt,Register rs)1865 void Assembler::dsrlv(Register rd, Register rt, Register rs) {
1866   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
1867 }
1868 
1869 
drotr(Register rd,Register rt,uint16_t sa)1870 void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
1871   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1872   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1873       | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
1874   emit(instr);
1875 }
1876 
drotr32(Register rd,Register rt,uint16_t sa)1877 void Assembler::drotr32(Register rd, Register rt, uint16_t sa) {
1878   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1879   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
1880                 (rd.code() << kRdShift) | (sa << kSaShift) | DSRL32;
1881   emit(instr);
1882 }
1883 
drotrv(Register rd,Register rt,Register rs)1884 void Assembler::drotrv(Register rd, Register rt, Register rs) {
1885   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1886   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1887       | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
1888   emit(instr);
1889 }
1890 
1891 
dsra(Register rd,Register rt,uint16_t sa)1892 void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
1893   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA);
1894 }
1895 
1896 
dsrav(Register rd,Register rt,Register rs)1897 void Assembler::dsrav(Register rd, Register rt, Register rs) {
1898   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
1899 }
1900 
1901 
dsll32(Register rd,Register rt,uint16_t sa)1902 void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
1903   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL32);
1904 }
1905 
1906 
dsrl32(Register rd,Register rt,uint16_t sa)1907 void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
1908   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL32);
1909 }
1910 
1911 
dsra32(Register rd,Register rt,uint16_t sa)1912 void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
1913   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA32);
1914 }
1915 
1916 
lsa(Register rd,Register rt,Register rs,uint8_t sa)1917 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
1918   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1919   DCHECK(sa <= 3);
1920   DCHECK(kArchVariant == kMips64r6);
1921   Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
1922                 rd.code() << kRdShift | sa << kSaShift | LSA;
1923   emit(instr);
1924 }
1925 
1926 
dlsa(Register rd,Register rt,Register rs,uint8_t sa)1927 void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
1928   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1929   DCHECK(sa <= 3);
1930   DCHECK(kArchVariant == kMips64r6);
1931   Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
1932                 rd.code() << kRdShift | sa << kSaShift | DLSA;
1933   emit(instr);
1934 }
1935 
1936 
1937 // ------------Memory-instructions-------------
1938 
1939 // Helper for base-reg + offset, when offset is larger than int16.
LoadRegPlusOffsetToAt(const MemOperand & src)1940 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1941   DCHECK(!src.rm().is(at));
1942   DCHECK(is_int32(src.offset_));
1943   daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask);
1944   dsll(at, at, kLuiShift);
1945   ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
1946   daddu(at, at, src.rm());  // Add base register.
1947 }
1948 
1949 
lb(Register rd,const MemOperand & rs)1950 void Assembler::lb(Register rd, const MemOperand& rs) {
1951   if (is_int16(rs.offset_)) {
1952     GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1953   } else {  // Offset > 16 bits, use multiple instructions to load.
1954     LoadRegPlusOffsetToAt(rs);
1955     GenInstrImmediate(LB, at, rd, 0);  // Equiv to lb(rd, MemOperand(at, 0));
1956   }
1957 }
1958 
1959 
lbu(Register rd,const MemOperand & rs)1960 void Assembler::lbu(Register rd, const MemOperand& rs) {
1961   if (is_int16(rs.offset_)) {
1962     GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1963   } else {  // Offset > 16 bits, use multiple instructions to load.
1964     LoadRegPlusOffsetToAt(rs);
1965     GenInstrImmediate(LBU, at, rd, 0);  // Equiv to lbu(rd, MemOperand(at, 0));
1966   }
1967 }
1968 
1969 
lh(Register rd,const MemOperand & rs)1970 void Assembler::lh(Register rd, const MemOperand& rs) {
1971   if (is_int16(rs.offset_)) {
1972     GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1973   } else {  // Offset > 16 bits, use multiple instructions to load.
1974     LoadRegPlusOffsetToAt(rs);
1975     GenInstrImmediate(LH, at, rd, 0);  // Equiv to lh(rd, MemOperand(at, 0));
1976   }
1977 }
1978 
1979 
lhu(Register rd,const MemOperand & rs)1980 void Assembler::lhu(Register rd, const MemOperand& rs) {
1981   if (is_int16(rs.offset_)) {
1982     GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1983   } else {  // Offset > 16 bits, use multiple instructions to load.
1984     LoadRegPlusOffsetToAt(rs);
1985     GenInstrImmediate(LHU, at, rd, 0);  // Equiv to lhu(rd, MemOperand(at, 0));
1986   }
1987 }
1988 
1989 
lw(Register rd,const MemOperand & rs)1990 void Assembler::lw(Register rd, const MemOperand& rs) {
1991   if (is_int16(rs.offset_)) {
1992     GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1993   } else {  // Offset > 16 bits, use multiple instructions to load.
1994     LoadRegPlusOffsetToAt(rs);
1995     GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
1996   }
1997 }
1998 
1999 
lwu(Register rd,const MemOperand & rs)2000 void Assembler::lwu(Register rd, const MemOperand& rs) {
2001   if (is_int16(rs.offset_)) {
2002     GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
2003   } else {  // Offset > 16 bits, use multiple instructions to load.
2004     LoadRegPlusOffsetToAt(rs);
2005     GenInstrImmediate(LWU, at, rd, 0);  // Equiv to lwu(rd, MemOperand(at, 0));
2006   }
2007 }
2008 
2009 
lwl(Register rd,const MemOperand & rs)2010 void Assembler::lwl(Register rd, const MemOperand& rs) {
2011   DCHECK(is_int16(rs.offset_));
2012   DCHECK(kArchVariant == kMips64r2);
2013   GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
2014 }
2015 
2016 
lwr(Register rd,const MemOperand & rs)2017 void Assembler::lwr(Register rd, const MemOperand& rs) {
2018   DCHECK(is_int16(rs.offset_));
2019   DCHECK(kArchVariant == kMips64r2);
2020   GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
2021 }
2022 
2023 
sb(Register rd,const MemOperand & rs)2024 void Assembler::sb(Register rd, const MemOperand& rs) {
2025   if (is_int16(rs.offset_)) {
2026     GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
2027   } else {  // Offset > 16 bits, use multiple instructions to store.
2028     LoadRegPlusOffsetToAt(rs);
2029     GenInstrImmediate(SB, at, rd, 0);  // Equiv to sb(rd, MemOperand(at, 0));
2030   }
2031 }
2032 
2033 
sh(Register rd,const MemOperand & rs)2034 void Assembler::sh(Register rd, const MemOperand& rs) {
2035   if (is_int16(rs.offset_)) {
2036     GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
2037   } else {  // Offset > 16 bits, use multiple instructions to store.
2038     LoadRegPlusOffsetToAt(rs);
2039     GenInstrImmediate(SH, at, rd, 0);  // Equiv to sh(rd, MemOperand(at, 0));
2040   }
2041 }
2042 
2043 
sw(Register rd,const MemOperand & rs)2044 void Assembler::sw(Register rd, const MemOperand& rs) {
2045   if (is_int16(rs.offset_)) {
2046     GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
2047   } else {  // Offset > 16 bits, use multiple instructions to store.
2048     LoadRegPlusOffsetToAt(rs);
2049     GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
2050   }
2051 }
2052 
2053 
swl(Register rd,const MemOperand & rs)2054 void Assembler::swl(Register rd, const MemOperand& rs) {
2055   DCHECK(is_int16(rs.offset_));
2056   DCHECK(kArchVariant == kMips64r2);
2057   GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
2058 }
2059 
2060 
swr(Register rd,const MemOperand & rs)2061 void Assembler::swr(Register rd, const MemOperand& rs) {
2062   DCHECK(is_int16(rs.offset_));
2063   DCHECK(kArchVariant == kMips64r2);
2064   GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
2065 }
2066 
2067 
lui(Register rd,int32_t j)2068 void Assembler::lui(Register rd, int32_t j) {
2069   DCHECK(is_uint16(j));
2070   GenInstrImmediate(LUI, zero_reg, rd, j);
2071 }
2072 
2073 
aui(Register rt,Register rs,int32_t j)2074 void Assembler::aui(Register rt, Register rs, int32_t j) {
2075   // This instruction uses same opcode as 'lui'. The difference in encoding is
2076   // 'lui' has zero reg. for rs field.
2077   DCHECK(is_uint16(j));
2078   GenInstrImmediate(LUI, rs, rt, j);
2079 }
2080 
2081 
daui(Register rt,Register rs,int32_t j)2082 void Assembler::daui(Register rt, Register rs, int32_t j) {
2083   DCHECK(is_uint16(j));
2084   DCHECK(!rs.is(zero_reg));
2085   GenInstrImmediate(DAUI, rs, rt, j);
2086 }
2087 
2088 
dahi(Register rs,int32_t j)2089 void Assembler::dahi(Register rs, int32_t j) {
2090   DCHECK(is_uint16(j));
2091   GenInstrImmediate(REGIMM, rs, DAHI, j);
2092 }
2093 
2094 
dati(Register rs,int32_t j)2095 void Assembler::dati(Register rs, int32_t j) {
2096   DCHECK(is_uint16(j));
2097   GenInstrImmediate(REGIMM, rs, DATI, j);
2098 }
2099 
2100 
ldl(Register rd,const MemOperand & rs)2101 void Assembler::ldl(Register rd, const MemOperand& rs) {
2102   DCHECK(is_int16(rs.offset_));
2103   DCHECK(kArchVariant == kMips64r2);
2104   GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
2105 }
2106 
2107 
ldr(Register rd,const MemOperand & rs)2108 void Assembler::ldr(Register rd, const MemOperand& rs) {
2109   DCHECK(is_int16(rs.offset_));
2110   DCHECK(kArchVariant == kMips64r2);
2111   GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
2112 }
2113 
2114 
sdl(Register rd,const MemOperand & rs)2115 void Assembler::sdl(Register rd, const MemOperand& rs) {
2116   DCHECK(is_int16(rs.offset_));
2117   DCHECK(kArchVariant == kMips64r2);
2118   GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
2119 }
2120 
2121 
sdr(Register rd,const MemOperand & rs)2122 void Assembler::sdr(Register rd, const MemOperand& rs) {
2123   DCHECK(is_int16(rs.offset_));
2124   DCHECK(kArchVariant == kMips64r2);
2125   GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
2126 }
2127 
2128 
ld(Register rd,const MemOperand & rs)2129 void Assembler::ld(Register rd, const MemOperand& rs) {
2130   if (is_int16(rs.offset_)) {
2131     GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
2132   } else {  // Offset > 16 bits, use multiple instructions to load.
2133     LoadRegPlusOffsetToAt(rs);
2134     GenInstrImmediate(LD, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
2135   }
2136 }
2137 
2138 
sd(Register rd,const MemOperand & rs)2139 void Assembler::sd(Register rd, const MemOperand& rs) {
2140   if (is_int16(rs.offset_)) {
2141     GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
2142   } else {  // Offset > 16 bits, use multiple instructions to store.
2143     LoadRegPlusOffsetToAt(rs);
2144     GenInstrImmediate(SD, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
2145   }
2146 }
2147 
2148 
2149 // ---------PC-Relative instructions-----------
2150 
addiupc(Register rs,int32_t imm19)2151 void Assembler::addiupc(Register rs, int32_t imm19) {
2152   DCHECK(kArchVariant == kMips64r6);
2153   DCHECK(rs.is_valid() && is_int19(imm19));
2154   uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
2155   GenInstrImmediate(PCREL, rs, imm21);
2156 }
2157 
2158 
lwpc(Register rs,int32_t offset19)2159 void Assembler::lwpc(Register rs, int32_t offset19) {
2160   DCHECK(kArchVariant == kMips64r6);
2161   DCHECK(rs.is_valid() && is_int19(offset19));
2162   uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
2163   GenInstrImmediate(PCREL, rs, imm21);
2164 }
2165 
2166 
lwupc(Register rs,int32_t offset19)2167 void Assembler::lwupc(Register rs, int32_t offset19) {
2168   DCHECK(kArchVariant == kMips64r6);
2169   DCHECK(rs.is_valid() && is_int19(offset19));
2170   uint32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
2171   GenInstrImmediate(PCREL, rs, imm21);
2172 }
2173 
2174 
ldpc(Register rs,int32_t offset18)2175 void Assembler::ldpc(Register rs, int32_t offset18) {
2176   DCHECK(kArchVariant == kMips64r6);
2177   DCHECK(rs.is_valid() && is_int18(offset18));
2178   uint32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
2179   GenInstrImmediate(PCREL, rs, imm21);
2180 }
2181 
2182 
auipc(Register rs,int16_t imm16)2183 void Assembler::auipc(Register rs, int16_t imm16) {
2184   DCHECK(kArchVariant == kMips64r6);
2185   DCHECK(rs.is_valid());
2186   uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
2187   GenInstrImmediate(PCREL, rs, imm21);
2188 }
2189 
2190 
aluipc(Register rs,int16_t imm16)2191 void Assembler::aluipc(Register rs, int16_t imm16) {
2192   DCHECK(kArchVariant == kMips64r6);
2193   DCHECK(rs.is_valid());
2194   uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
2195   GenInstrImmediate(PCREL, rs, imm21);
2196 }
2197 
2198 
2199 // -------------Misc-instructions--------------
2200 
2201 // Break / Trap instructions.
break_(uint32_t code,bool break_as_stop)2202 void Assembler::break_(uint32_t code, bool break_as_stop) {
2203   DCHECK((code & ~0xfffff) == 0);
2204   // We need to invalidate breaks that could be stops as well because the
2205   // simulator expects a char pointer after the stop instruction.
2206   // See constants-mips.h for explanation.
2207   DCHECK((break_as_stop &&
2208           code <= kMaxStopCode &&
2209           code > kMaxWatchpointCode) ||
2210          (!break_as_stop &&
2211           (code > kMaxStopCode ||
2212            code <= kMaxWatchpointCode)));
2213   Instr break_instr = SPECIAL | BREAK | (code << 6);
2214   emit(break_instr);
2215 }
2216 
2217 
stop(const char * msg,uint32_t code)2218 void Assembler::stop(const char* msg, uint32_t code) {
2219   DCHECK(code > kMaxWatchpointCode);
2220   DCHECK(code <= kMaxStopCode);
2221 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
2222   break_(0x54321);
2223 #else  // V8_HOST_ARCH_MIPS
2224   BlockTrampolinePoolFor(3);
2225   // The Simulator will handle the stop instruction and get the message address.
2226   // On MIPS stop() is just a special kind of break_().
2227   break_(code, true);
2228   // Do not embed the message string address! We used to do this, but that
2229   // made snapshots created from position-independent executable builds
2230   // non-deterministic.
2231   // TODO(yangguo): remove this field entirely.
2232   nop();
2233 #endif
2234 }
2235 
2236 
tge(Register rs,Register rt,uint16_t code)2237 void Assembler::tge(Register rs, Register rt, uint16_t code) {
2238   DCHECK(is_uint10(code));
2239   Instr instr = SPECIAL | TGE | rs.code() << kRsShift
2240       | rt.code() << kRtShift | code << 6;
2241   emit(instr);
2242 }
2243 
2244 
tgeu(Register rs,Register rt,uint16_t code)2245 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2246   DCHECK(is_uint10(code));
2247   Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
2248       | rt.code() << kRtShift | code << 6;
2249   emit(instr);
2250 }
2251 
2252 
tlt(Register rs,Register rt,uint16_t code)2253 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2254   DCHECK(is_uint10(code));
2255   Instr instr =
2256       SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2257   emit(instr);
2258 }
2259 
2260 
tltu(Register rs,Register rt,uint16_t code)2261 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2262   DCHECK(is_uint10(code));
2263   Instr instr =
2264       SPECIAL | TLTU | rs.code() << kRsShift
2265       | rt.code() << kRtShift | code << 6;
2266   emit(instr);
2267 }
2268 
2269 
teq(Register rs,Register rt,uint16_t code)2270 void Assembler::teq(Register rs, Register rt, uint16_t code) {
2271   DCHECK(is_uint10(code));
2272   Instr instr =
2273       SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2274   emit(instr);
2275 }
2276 
2277 
tne(Register rs,Register rt,uint16_t code)2278 void Assembler::tne(Register rs, Register rt, uint16_t code) {
2279   DCHECK(is_uint10(code));
2280   Instr instr =
2281       SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2282   emit(instr);
2283 }
2284 
sync()2285 void Assembler::sync() {
2286   Instr sync_instr = SPECIAL | SYNC;
2287   emit(sync_instr);
2288 }
2289 
2290 // Move from HI/LO register.
2291 
mfhi(Register rd)2292 void Assembler::mfhi(Register rd) {
2293   GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2294 }
2295 
2296 
mflo(Register rd)2297 void Assembler::mflo(Register rd) {
2298   GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2299 }
2300 
2301 
2302 // Set on less than instructions.
slt(Register rd,Register rs,Register rt)2303 void Assembler::slt(Register rd, Register rs, Register rt) {
2304   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2305 }
2306 
2307 
sltu(Register rd,Register rs,Register rt)2308 void Assembler::sltu(Register rd, Register rs, Register rt) {
2309   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2310 }
2311 
2312 
slti(Register rt,Register rs,int32_t j)2313 void Assembler::slti(Register rt, Register rs, int32_t j) {
2314   GenInstrImmediate(SLTI, rs, rt, j);
2315 }
2316 
2317 
sltiu(Register rt,Register rs,int32_t j)2318 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2319   GenInstrImmediate(SLTIU, rs, rt, j);
2320 }
2321 
2322 
2323 // Conditional move.
movz(Register rd,Register rs,Register rt)2324 void Assembler::movz(Register rd, Register rs, Register rt) {
2325   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2326 }
2327 
2328 
movn(Register rd,Register rs,Register rt)2329 void Assembler::movn(Register rd, Register rs, Register rt) {
2330   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2331 }
2332 
2333 
movt(Register rd,Register rs,uint16_t cc)2334 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2335   Register rt;
2336   rt.reg_code = (cc & 0x0007) << 2 | 1;
2337   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2338 }
2339 
2340 
movf(Register rd,Register rs,uint16_t cc)2341 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2342   Register rt;
2343   rt.reg_code = (cc & 0x0007) << 2 | 0;
2344   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2345 }
2346 
2347 
min_s(FPURegister fd,FPURegister fs,FPURegister ft)2348 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2349   min(S, fd, fs, ft);
2350 }
2351 
2352 
min_d(FPURegister fd,FPURegister fs,FPURegister ft)2353 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2354   min(D, fd, fs, ft);
2355 }
2356 
2357 
max_s(FPURegister fd,FPURegister fs,FPURegister ft)2358 void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2359   max(S, fd, fs, ft);
2360 }
2361 
2362 
max_d(FPURegister fd,FPURegister fs,FPURegister ft)2363 void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2364   max(D, fd, fs, ft);
2365 }
2366 
2367 
mina_s(FPURegister fd,FPURegister fs,FPURegister ft)2368 void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2369   mina(S, fd, fs, ft);
2370 }
2371 
2372 
mina_d(FPURegister fd,FPURegister fs,FPURegister ft)2373 void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2374   mina(D, fd, fs, ft);
2375 }
2376 
2377 
maxa_s(FPURegister fd,FPURegister fs,FPURegister ft)2378 void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2379   maxa(S, fd, fs, ft);
2380 }
2381 
2382 
maxa_d(FPURegister fd,FPURegister fs,FPURegister ft)2383 void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2384   maxa(D, fd, fs, ft);
2385 }
2386 
2387 
max(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2388 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
2389                     FPURegister ft) {
2390   DCHECK(kArchVariant == kMips64r6);
2391   DCHECK((fmt == D) || (fmt == S));
2392   GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2393 }
2394 
2395 
min(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2396 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
2397                     FPURegister ft) {
2398   DCHECK(kArchVariant == kMips64r6);
2399   DCHECK((fmt == D) || (fmt == S));
2400   GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2401 }
2402 
2403 
2404 // GPR.
seleqz(Register rd,Register rs,Register rt)2405 void Assembler::seleqz(Register rd, Register rs, Register rt) {
2406   DCHECK(kArchVariant == kMips64r6);
2407   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2408 }
2409 
2410 
2411 // GPR.
selnez(Register rd,Register rs,Register rt)2412 void Assembler::selnez(Register rd, Register rs, Register rt) {
2413   DCHECK(kArchVariant == kMips64r6);
2414   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2415 }
2416 
2417 
2418 // Bit twiddling.
clz(Register rd,Register rs)2419 void Assembler::clz(Register rd, Register rs) {
2420   if (kArchVariant != kMips64r6) {
2421     // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2422     GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2423   } else {
2424     GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2425   }
2426 }
2427 
2428 
dclz(Register rd,Register rs)2429 void Assembler::dclz(Register rd, Register rs) {
2430   if (kArchVariant != kMips64r6) {
2431     // dclz instr requires same GPR number in 'rd' and 'rt' fields.
2432     GenInstrRegister(SPECIAL2, rs, rd, rd, 0, DCLZ);
2433   } else {
2434     GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, DCLZ_R6);
2435   }
2436 }
2437 
2438 
ins_(Register rt,Register rs,uint16_t pos,uint16_t size)2439 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2440   // Should be called via MacroAssembler::Ins.
2441   // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2442   DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
2443   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2444 }
2445 
2446 
dins_(Register rt,Register rs,uint16_t pos,uint16_t size)2447 void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2448   // Should be called via MacroAssembler::Dins.
2449   // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
2450   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2451   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, DINS);
2452 }
2453 
2454 
ext_(Register rt,Register rs,uint16_t pos,uint16_t size)2455 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2456   // Should be called via MacroAssembler::Ext.
2457   // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2458   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2459   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2460 }
2461 
2462 
dext_(Register rt,Register rs,uint16_t pos,uint16_t size)2463 void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2464   // Should be called via MacroAssembler::Dext.
2465   // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
2466   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2467   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
2468 }
2469 
2470 
dextm(Register rt,Register rs,uint16_t pos,uint16_t size)2471 void Assembler::dextm(Register rt, Register rs, uint16_t pos, uint16_t size) {
2472   // Should be called via MacroAssembler::Dextm.
2473   // Dextm instr has 'rt' field as dest, and two uint5: msb, lsb.
2474   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2475   GenInstrRegister(SPECIAL3, rs, rt, size - 1 - 32, pos, DEXTM);
2476 }
2477 
2478 
dextu(Register rt,Register rs,uint16_t pos,uint16_t size)2479 void Assembler::dextu(Register rt, Register rs, uint16_t pos, uint16_t size) {
2480   // Should be called via MacroAssembler::Dextu.
2481   // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
2482   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2483   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
2484 }
2485 
2486 
bitswap(Register rd,Register rt)2487 void Assembler::bitswap(Register rd, Register rt) {
2488   DCHECK(kArchVariant == kMips64r6);
2489   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2490 }
2491 
2492 
dbitswap(Register rd,Register rt)2493 void Assembler::dbitswap(Register rd, Register rt) {
2494   DCHECK(kArchVariant == kMips64r6);
2495   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, DBSHFL);
2496 }
2497 
2498 
pref(int32_t hint,const MemOperand & rs)2499 void Assembler::pref(int32_t hint, const MemOperand& rs) {
2500   DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2501   Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2502       | (rs.offset_);
2503   emit(instr);
2504 }
2505 
2506 
align(Register rd,Register rs,Register rt,uint8_t bp)2507 void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2508   DCHECK(kArchVariant == kMips64r6);
2509   DCHECK(is_uint3(bp));
2510   uint16_t sa = (ALIGN << kBp2Bits) | bp;
2511   GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2512 }
2513 
2514 
dalign(Register rd,Register rs,Register rt,uint8_t bp)2515 void Assembler::dalign(Register rd, Register rs, Register rt, uint8_t bp) {
2516   DCHECK(kArchVariant == kMips64r6);
2517   DCHECK(is_uint3(bp));
2518   uint16_t sa = (DALIGN << kBp3Bits) | bp;
2519   GenInstrRegister(SPECIAL3, rs, rt, rd, sa, DBSHFL);
2520 }
2521 
wsbh(Register rd,Register rt)2522 void Assembler::wsbh(Register rd, Register rt) {
2523   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2524   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
2525 }
2526 
dsbh(Register rd,Register rt)2527 void Assembler::dsbh(Register rd, Register rt) {
2528   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2529   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSBH, DBSHFL);
2530 }
2531 
dshd(Register rd,Register rt)2532 void Assembler::dshd(Register rd, Register rt) {
2533   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2534   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSHD, DBSHFL);
2535 }
2536 
seh(Register rd,Register rt)2537 void Assembler::seh(Register rd, Register rt) {
2538   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2539   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
2540 }
2541 
seb(Register rd,Register rt)2542 void Assembler::seb(Register rd, Register rt) {
2543   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2544   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
2545 }
2546 
2547 // --------Coprocessor-instructions----------------
2548 
2549 // Load, store, move.
lwc1(FPURegister fd,const MemOperand & src)2550 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2551   if (is_int16(src.offset_)) {
2552     GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2553   } else {  // Offset > 16 bits, use multiple instructions to load.
2554     LoadRegPlusOffsetToAt(src);
2555     GenInstrImmediate(LWC1, at, fd, 0);
2556   }
2557 }
2558 
2559 
ldc1(FPURegister fd,const MemOperand & src)2560 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
2561   if (is_int16(src.offset_)) {
2562     GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
2563   } else {  // Offset > 16 bits, use multiple instructions to load.
2564     LoadRegPlusOffsetToAt(src);
2565     GenInstrImmediate(LDC1, at, fd, 0);
2566   }
2567 }
2568 
2569 
swc1(FPURegister fd,const MemOperand & src)2570 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
2571   if (is_int16(src.offset_)) {
2572     GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
2573   } else {  // Offset > 16 bits, use multiple instructions to load.
2574     LoadRegPlusOffsetToAt(src);
2575     GenInstrImmediate(SWC1, at, fd, 0);
2576   }
2577 }
2578 
2579 
sdc1(FPURegister fd,const MemOperand & src)2580 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
2581   DCHECK(!src.rm().is(at));
2582   if (is_int16(src.offset_)) {
2583     GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
2584   } else {  // Offset > 16 bits, use multiple instructions to load.
2585     LoadRegPlusOffsetToAt(src);
2586     GenInstrImmediate(SDC1, at, fd, 0);
2587   }
2588 }
2589 
2590 
mtc1(Register rt,FPURegister fs)2591 void Assembler::mtc1(Register rt, FPURegister fs) {
2592   GenInstrRegister(COP1, MTC1, rt, fs, f0);
2593 }
2594 
2595 
mthc1(Register rt,FPURegister fs)2596 void Assembler::mthc1(Register rt, FPURegister fs) {
2597   GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2598 }
2599 
2600 
dmtc1(Register rt,FPURegister fs)2601 void Assembler::dmtc1(Register rt, FPURegister fs) {
2602   GenInstrRegister(COP1, DMTC1, rt, fs, f0);
2603 }
2604 
2605 
mfc1(Register rt,FPURegister fs)2606 void Assembler::mfc1(Register rt, FPURegister fs) {
2607   GenInstrRegister(COP1, MFC1, rt, fs, f0);
2608 }
2609 
2610 
mfhc1(Register rt,FPURegister fs)2611 void Assembler::mfhc1(Register rt, FPURegister fs) {
2612   GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2613 }
2614 
2615 
dmfc1(Register rt,FPURegister fs)2616 void Assembler::dmfc1(Register rt, FPURegister fs) {
2617   GenInstrRegister(COP1, DMFC1, rt, fs, f0);
2618 }
2619 
2620 
ctc1(Register rt,FPUControlRegister fs)2621 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2622   GenInstrRegister(COP1, CTC1, rt, fs);
2623 }
2624 
2625 
cfc1(Register rt,FPUControlRegister fs)2626 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2627   GenInstrRegister(COP1, CFC1, rt, fs);
2628 }
2629 
2630 
DoubleAsTwoUInt32(double d,uint32_t * lo,uint32_t * hi)2631 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2632   uint64_t i;
2633   memcpy(&i, &d, 8);
2634 
2635   *lo = i & 0xffffffff;
2636   *hi = i >> 32;
2637 }
2638 
2639 
sel(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2640 void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
2641                     FPURegister ft) {
2642   DCHECK(kArchVariant == kMips64r6);
2643   DCHECK((fmt == D) || (fmt == S));
2644 
2645   GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
2646 }
2647 
2648 
sel_s(FPURegister fd,FPURegister fs,FPURegister ft)2649 void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2650   sel(S, fd, fs, ft);
2651 }
2652 
2653 
sel_d(FPURegister fd,FPURegister fs,FPURegister ft)2654 void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2655   sel(D, fd, fs, ft);
2656 }
2657 
2658 
2659 // FPR.
seleqz(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2660 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
2661                        FPURegister ft) {
2662   DCHECK((fmt == D) || (fmt == S));
2663   GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
2664 }
2665 
2666 
seleqz_d(FPURegister fd,FPURegister fs,FPURegister ft)2667 void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2668   seleqz(D, fd, fs, ft);
2669 }
2670 
2671 
seleqz_s(FPURegister fd,FPURegister fs,FPURegister ft)2672 void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2673   seleqz(S, fd, fs, ft);
2674 }
2675 
2676 
selnez_d(FPURegister fd,FPURegister fs,FPURegister ft)2677 void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2678   selnez(D, fd, fs, ft);
2679 }
2680 
2681 
selnez_s(FPURegister fd,FPURegister fs,FPURegister ft)2682 void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2683   selnez(S, fd, fs, ft);
2684 }
2685 
2686 
movz_s(FPURegister fd,FPURegister fs,Register rt)2687 void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
2688   DCHECK(kArchVariant == kMips64r2);
2689   GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
2690 }
2691 
2692 
movz_d(FPURegister fd,FPURegister fs,Register rt)2693 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
2694   DCHECK(kArchVariant == kMips64r2);
2695   GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
2696 }
2697 
2698 
movt_s(FPURegister fd,FPURegister fs,uint16_t cc)2699 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2700   DCHECK(kArchVariant == kMips64r2);
2701   FPURegister ft;
2702   ft.reg_code = (cc & 0x0007) << 2 | 1;
2703   GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2704 }
2705 
2706 
movt_d(FPURegister fd,FPURegister fs,uint16_t cc)2707 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2708   DCHECK(kArchVariant == kMips64r2);
2709   FPURegister ft;
2710   ft.reg_code = (cc & 0x0007) << 2 | 1;
2711   GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2712 }
2713 
2714 
movf_s(FPURegister fd,FPURegister fs,uint16_t cc)2715 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2716   DCHECK(kArchVariant == kMips64r2);
2717   FPURegister ft;
2718   ft.reg_code = (cc & 0x0007) << 2 | 0;
2719   GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2720 }
2721 
2722 
movf_d(FPURegister fd,FPURegister fs,uint16_t cc)2723 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2724   DCHECK(kArchVariant == kMips64r2);
2725   FPURegister ft;
2726   ft.reg_code = (cc & 0x0007) << 2 | 0;
2727   GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2728 }
2729 
2730 
movn_s(FPURegister fd,FPURegister fs,Register rt)2731 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
2732   DCHECK(kArchVariant == kMips64r2);
2733   GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
2734 }
2735 
2736 
movn_d(FPURegister fd,FPURegister fs,Register rt)2737 void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
2738   DCHECK(kArchVariant == kMips64r2);
2739   GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
2740 }
2741 
2742 
2743 // FPR.
selnez(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2744 void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
2745                        FPURegister ft) {
2746   DCHECK(kArchVariant == kMips64r6);
2747   DCHECK((fmt == D) || (fmt == S));
2748   GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
2749 }
2750 
2751 
2752 // Arithmetic.
2753 
add_s(FPURegister fd,FPURegister fs,FPURegister ft)2754 void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2755   GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
2756 }
2757 
2758 
add_d(FPURegister fd,FPURegister fs,FPURegister ft)2759 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2760   GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2761 }
2762 
2763 
sub_s(FPURegister fd,FPURegister fs,FPURegister ft)2764 void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2765   GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
2766 }
2767 
2768 
sub_d(FPURegister fd,FPURegister fs,FPURegister ft)2769 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2770   GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2771 }
2772 
2773 
mul_s(FPURegister fd,FPURegister fs,FPURegister ft)2774 void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2775   GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
2776 }
2777 
2778 
mul_d(FPURegister fd,FPURegister fs,FPURegister ft)2779 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2780   GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2781 }
2782 
madd_s(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2783 void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2784                        FPURegister ft) {
2785   DCHECK(kArchVariant == kMips64r2);
2786   GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S);
2787 }
2788 
madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2789 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2790     FPURegister ft) {
2791   DCHECK(kArchVariant == kMips64r2);
2792   GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2793 }
2794 
msub_s(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2795 void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2796                        FPURegister ft) {
2797   DCHECK(kArchVariant == kMips64r2);
2798   GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S);
2799 }
2800 
msub_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2801 void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2802                        FPURegister ft) {
2803   DCHECK(kArchVariant == kMips64r2);
2804   GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D);
2805 }
2806 
maddf_s(FPURegister fd,FPURegister fs,FPURegister ft)2807 void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2808   DCHECK(kArchVariant == kMips64r6);
2809   GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
2810 }
2811 
maddf_d(FPURegister fd,FPURegister fs,FPURegister ft)2812 void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2813   DCHECK(kArchVariant == kMips64r6);
2814   GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
2815 }
2816 
msubf_s(FPURegister fd,FPURegister fs,FPURegister ft)2817 void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2818   DCHECK(kArchVariant == kMips64r6);
2819   GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
2820 }
2821 
msubf_d(FPURegister fd,FPURegister fs,FPURegister ft)2822 void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2823   DCHECK(kArchVariant == kMips64r6);
2824   GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
2825 }
2826 
div_s(FPURegister fd,FPURegister fs,FPURegister ft)2827 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2828   GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
2829 }
2830 
2831 
div_d(FPURegister fd,FPURegister fs,FPURegister ft)2832 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2833   GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2834 }
2835 
2836 
abs_s(FPURegister fd,FPURegister fs)2837 void Assembler::abs_s(FPURegister fd, FPURegister fs) {
2838   GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
2839 }
2840 
2841 
abs_d(FPURegister fd,FPURegister fs)2842 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2843   GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2844 }
2845 
2846 
mov_d(FPURegister fd,FPURegister fs)2847 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2848   GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2849 }
2850 
2851 
mov_s(FPURegister fd,FPURegister fs)2852 void Assembler::mov_s(FPURegister fd, FPURegister fs) {
2853   GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
2854 }
2855 
2856 
neg_s(FPURegister fd,FPURegister fs)2857 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
2858   GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
2859 }
2860 
2861 
neg_d(FPURegister fd,FPURegister fs)2862 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2863   GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2864 }
2865 
2866 
sqrt_s(FPURegister fd,FPURegister fs)2867 void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
2868   GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
2869 }
2870 
2871 
sqrt_d(FPURegister fd,FPURegister fs)2872 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2873   GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2874 }
2875 
2876 
rsqrt_s(FPURegister fd,FPURegister fs)2877 void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
2878   GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
2879 }
2880 
2881 
rsqrt_d(FPURegister fd,FPURegister fs)2882 void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
2883   GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
2884 }
2885 
2886 
recip_d(FPURegister fd,FPURegister fs)2887 void Assembler::recip_d(FPURegister fd, FPURegister fs) {
2888   GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
2889 }
2890 
2891 
recip_s(FPURegister fd,FPURegister fs)2892 void Assembler::recip_s(FPURegister fd, FPURegister fs) {
2893   GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
2894 }
2895 
2896 
2897 // Conversions.
cvt_w_s(FPURegister fd,FPURegister fs)2898 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2899   GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2900 }
2901 
2902 
cvt_w_d(FPURegister fd,FPURegister fs)2903 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2904   GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2905 }
2906 
2907 
trunc_w_s(FPURegister fd,FPURegister fs)2908 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2909   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2910 }
2911 
2912 
trunc_w_d(FPURegister fd,FPURegister fs)2913 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2914   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2915 }
2916 
2917 
round_w_s(FPURegister fd,FPURegister fs)2918 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2919   GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2920 }
2921 
2922 
round_w_d(FPURegister fd,FPURegister fs)2923 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2924   GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2925 }
2926 
2927 
floor_w_s(FPURegister fd,FPURegister fs)2928 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2929   GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2930 }
2931 
2932 
floor_w_d(FPURegister fd,FPURegister fs)2933 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2934   GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2935 }
2936 
2937 
ceil_w_s(FPURegister fd,FPURegister fs)2938 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2939   GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2940 }
2941 
2942 
ceil_w_d(FPURegister fd,FPURegister fs)2943 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2944   GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2945 }
2946 
2947 
rint_s(FPURegister fd,FPURegister fs)2948 void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
2949 
2950 
rint_d(FPURegister fd,FPURegister fs)2951 void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
2952 
2953 
rint(SecondaryField fmt,FPURegister fd,FPURegister fs)2954 void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
2955   DCHECK(kArchVariant == kMips64r6);
2956   GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
2957 }
2958 
2959 
cvt_l_s(FPURegister fd,FPURegister fs)2960 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2961   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2962   GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2963 }
2964 
2965 
cvt_l_d(FPURegister fd,FPURegister fs)2966 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2967   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2968   GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2969 }
2970 
2971 
trunc_l_s(FPURegister fd,FPURegister fs)2972 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2973   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2974   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2975 }
2976 
2977 
trunc_l_d(FPURegister fd,FPURegister fs)2978 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2979   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2980   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2981 }
2982 
2983 
round_l_s(FPURegister fd,FPURegister fs)2984 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2985   GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2986 }
2987 
2988 
round_l_d(FPURegister fd,FPURegister fs)2989 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2990   GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2991 }
2992 
2993 
floor_l_s(FPURegister fd,FPURegister fs)2994 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2995   GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2996 }
2997 
2998 
floor_l_d(FPURegister fd,FPURegister fs)2999 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
3000   GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
3001 }
3002 
3003 
ceil_l_s(FPURegister fd,FPURegister fs)3004 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
3005   GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
3006 }
3007 
3008 
ceil_l_d(FPURegister fd,FPURegister fs)3009 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
3010   GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
3011 }
3012 
3013 
class_s(FPURegister fd,FPURegister fs)3014 void Assembler::class_s(FPURegister fd, FPURegister fs) {
3015   DCHECK(kArchVariant == kMips64r6);
3016   GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
3017 }
3018 
3019 
class_d(FPURegister fd,FPURegister fs)3020 void Assembler::class_d(FPURegister fd, FPURegister fs) {
3021   DCHECK(kArchVariant == kMips64r6);
3022   GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
3023 }
3024 
3025 
mina(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)3026 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
3027                      FPURegister ft) {
3028   DCHECK(kArchVariant == kMips64r6);
3029   DCHECK((fmt == D) || (fmt == S));
3030   GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
3031 }
3032 
3033 
maxa(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)3034 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
3035                      FPURegister ft) {
3036   DCHECK(kArchVariant == kMips64r6);
3037   DCHECK((fmt == D) || (fmt == S));
3038   GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
3039 }
3040 
3041 
cvt_s_w(FPURegister fd,FPURegister fs)3042 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
3043   GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
3044 }
3045 
3046 
cvt_s_l(FPURegister fd,FPURegister fs)3047 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
3048   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3049   GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
3050 }
3051 
3052 
cvt_s_d(FPURegister fd,FPURegister fs)3053 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
3054   GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
3055 }
3056 
3057 
cvt_d_w(FPURegister fd,FPURegister fs)3058 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
3059   GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
3060 }
3061 
3062 
cvt_d_l(FPURegister fd,FPURegister fs)3063 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
3064   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3065   GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
3066 }
3067 
3068 
cvt_d_s(FPURegister fd,FPURegister fs)3069 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
3070   GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
3071 }
3072 
3073 
3074 // Conditions for >= MIPSr6.
cmp(FPUCondition cond,SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)3075 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
3076     FPURegister fd, FPURegister fs, FPURegister ft) {
3077   DCHECK(kArchVariant == kMips64r6);
3078   DCHECK((fmt & ~(31 << kRsShift)) == 0);
3079   Instr instr = COP1 | fmt | ft.code() << kFtShift |
3080       fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
3081   emit(instr);
3082 }
3083 
3084 
cmp_s(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)3085 void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
3086                       FPURegister ft) {
3087   cmp(cond, W, fd, fs, ft);
3088 }
3089 
cmp_d(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)3090 void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
3091                       FPURegister ft) {
3092   cmp(cond, L, fd, fs, ft);
3093 }
3094 
3095 
bc1eqz(int16_t offset,FPURegister ft)3096 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
3097   DCHECK(kArchVariant == kMips64r6);
3098   Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
3099   emit(instr);
3100 }
3101 
3102 
bc1nez(int16_t offset,FPURegister ft)3103 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
3104   DCHECK(kArchVariant == kMips64r6);
3105   Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
3106   emit(instr);
3107 }
3108 
3109 
3110 // Conditions for < MIPSr6.
c(FPUCondition cond,SecondaryField fmt,FPURegister fs,FPURegister ft,uint16_t cc)3111 void Assembler::c(FPUCondition cond, SecondaryField fmt,
3112     FPURegister fs, FPURegister ft, uint16_t cc) {
3113   DCHECK(kArchVariant != kMips64r6);
3114   DCHECK(is_uint3(cc));
3115   DCHECK(fmt == S || fmt == D);
3116   DCHECK((fmt & ~(31 << kRsShift)) == 0);
3117   Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
3118       | cc << 8 | 3 << 4 | cond;
3119   emit(instr);
3120 }
3121 
3122 
c_s(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)3123 void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
3124                     uint16_t cc) {
3125   c(cond, S, fs, ft, cc);
3126 }
3127 
3128 
c_d(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)3129 void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
3130                     uint16_t cc) {
3131   c(cond, D, fs, ft, cc);
3132 }
3133 
3134 
fcmp(FPURegister src1,const double src2,FPUCondition cond)3135 void Assembler::fcmp(FPURegister src1, const double src2,
3136       FPUCondition cond) {
3137   DCHECK(src2 == 0.0);
3138   mtc1(zero_reg, f14);
3139   cvt_d_w(f14, f14);
3140   c(cond, D, src1, f14, 0);
3141 }
3142 
3143 
bc1f(int16_t offset,uint16_t cc)3144 void Assembler::bc1f(int16_t offset, uint16_t cc) {
3145   DCHECK(is_uint3(cc));
3146   Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
3147   emit(instr);
3148 }
3149 
3150 
bc1t(int16_t offset,uint16_t cc)3151 void Assembler::bc1t(int16_t offset, uint16_t cc) {
3152   DCHECK(is_uint3(cc));
3153   Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
3154   emit(instr);
3155 }
3156 
3157 
RelocateInternalReference(RelocInfo::Mode rmode,byte * pc,intptr_t pc_delta)3158 int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
3159                                          intptr_t pc_delta) {
3160   if (RelocInfo::IsInternalReference(rmode)) {
3161     int64_t* p = reinterpret_cast<int64_t*>(pc);
3162     if (*p == kEndOfJumpChain) {
3163       return 0;  // Number of instructions patched.
3164     }
3165     *p += pc_delta;
3166     return 2;  // Number of instructions patched.
3167   }
3168   Instr instr = instr_at(pc);
3169   DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
3170   if (IsLui(instr)) {
3171     Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
3172     Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
3173     Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
3174     DCHECK(IsOri(instr_ori));
3175     DCHECK(IsOri(instr_ori2));
3176     // TODO(plind): symbolic names for the shifts.
3177     int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
3178     imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
3179     imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
3180     // Sign extend address.
3181     imm >>= 16;
3182 
3183     if (imm == kEndOfJumpChain) {
3184       return 0;  // Number of instructions patched.
3185     }
3186     imm += pc_delta;
3187     DCHECK((imm & 3) == 0);
3188 
3189     instr_lui &= ~kImm16Mask;
3190     instr_ori &= ~kImm16Mask;
3191     instr_ori2 &= ~kImm16Mask;
3192 
3193     instr_at_put(pc + 0 * Assembler::kInstrSize,
3194                  instr_lui | ((imm >> 32) & kImm16Mask));
3195     instr_at_put(pc + 1 * Assembler::kInstrSize,
3196                  instr_ori | (imm >> 16 & kImm16Mask));
3197     instr_at_put(pc + 3 * Assembler::kInstrSize,
3198                  instr_ori2 | (imm & kImm16Mask));
3199     return 4;  // Number of instructions patched.
3200   } else if (IsJ(instr) || IsJal(instr)) {
3201     // Regular j/jal relocation.
3202     uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
3203     imm28 += pc_delta;
3204     imm28 &= kImm28Mask;
3205     instr &= ~kImm26Mask;
3206     DCHECK((imm28 & 3) == 0);
3207     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
3208     instr_at_put(pc, instr | (imm26 & kImm26Mask));
3209     return 1;  // Number of instructions patched.
3210   } else {
3211     DCHECK(((instr & kJumpRawMask) == kJRawMark) ||
3212            ((instr & kJumpRawMask) == kJalRawMark));
3213     // Unbox raw offset and emit j/jal.
3214     int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
3215     // Sign extend 28-bit offset to 32-bit.
3216     imm28 = (imm28 << 4) >> 4;
3217     uint64_t target =
3218         static_cast<int64_t>(imm28) + reinterpret_cast<uint64_t>(pc);
3219     target &= kImm28Mask;
3220     DCHECK((imm28 & 3) == 0);
3221     uint32_t imm26 = static_cast<uint32_t>(target >> 2);
3222     // Check markings whether to emit j or jal.
3223     uint32_t unbox = (instr & kJRawMark) ? J : JAL;
3224     instr_at_put(pc, unbox | (imm26 & kImm26Mask));
3225     return 1;  // Number of instructions patched.
3226   }
3227 }
3228 
3229 
GrowBuffer()3230 void Assembler::GrowBuffer() {
3231   if (!own_buffer_) FATAL("external code buffer is too small");
3232 
3233   // Compute new buffer size.
3234   CodeDesc desc;  // The new buffer.
3235   if (buffer_size_ < 1 * MB) {
3236     desc.buffer_size = 2*buffer_size_;
3237   } else {
3238     desc.buffer_size = buffer_size_ + 1*MB;
3239   }
3240   CHECK_GT(desc.buffer_size, 0);  // No overflow.
3241 
3242   // Set up new buffer.
3243   desc.buffer = NewArray<byte>(desc.buffer_size);
3244   desc.origin = this;
3245 
3246   desc.instr_size = pc_offset();
3247   desc.reloc_size =
3248       static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
3249 
3250   // Copy the data.
3251   intptr_t pc_delta = desc.buffer - buffer_;
3252   intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
3253       (buffer_ + buffer_size_);
3254   MemMove(desc.buffer, buffer_, desc.instr_size);
3255   MemMove(reloc_info_writer.pos() + rc_delta,
3256               reloc_info_writer.pos(), desc.reloc_size);
3257 
3258   // Switch buffers.
3259   DeleteArray(buffer_);
3260   buffer_ = desc.buffer;
3261   buffer_size_ = desc.buffer_size;
3262   pc_ += pc_delta;
3263   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3264                                reloc_info_writer.last_pc() + pc_delta);
3265 
3266   // Relocate runtime entries.
3267   for (RelocIterator it(desc); !it.done(); it.next()) {
3268     RelocInfo::Mode rmode = it.rinfo()->rmode();
3269     if (rmode == RelocInfo::INTERNAL_REFERENCE) {
3270       byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
3271       RelocateInternalReference(rmode, p, pc_delta);
3272     }
3273   }
3274   DCHECK(!overflow());
3275 }
3276 
3277 
db(uint8_t data)3278 void Assembler::db(uint8_t data) {
3279   CheckForEmitInForbiddenSlot();
3280   EmitHelper(data);
3281 }
3282 
3283 
dd(uint32_t data)3284 void Assembler::dd(uint32_t data) {
3285   CheckForEmitInForbiddenSlot();
3286   EmitHelper(data);
3287 }
3288 
3289 
dq(uint64_t data)3290 void Assembler::dq(uint64_t data) {
3291   CheckForEmitInForbiddenSlot();
3292   EmitHelper(data);
3293 }
3294 
3295 
dd(Label * label)3296 void Assembler::dd(Label* label) {
3297   uint64_t data;
3298   CheckForEmitInForbiddenSlot();
3299   if (label->is_bound()) {
3300     data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
3301   } else {
3302     data = jump_address(label);
3303     unbound_labels_count_++;
3304     internal_reference_positions_.insert(label->pos());
3305   }
3306   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3307   EmitHelper(data);
3308 }
3309 
3310 
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)3311 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3312   // We do not try to reuse pool constants.
3313   RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
3314   if (rmode >= RelocInfo::COMMENT &&
3315       rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL) {
3316     // Adjust code for new modes.
3317     DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode));
3318     // These modes do not need an entry in the constant pool.
3319   }
3320   if (!RelocInfo::IsNone(rinfo.rmode())) {
3321     // Don't record external references unless the heap will be serialized.
3322     if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
3323         !serializer_enabled() && !emit_debug_code()) {
3324       return;
3325     }
3326     DCHECK(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
3327     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
3328       RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode,
3329                                        RecordedAstId().ToInt(), NULL);
3330       ClearRecordedAstId();
3331       reloc_info_writer.Write(&reloc_info_with_ast_id);
3332     } else {
3333       reloc_info_writer.Write(&rinfo);
3334     }
3335   }
3336 }
3337 
3338 
BlockTrampolinePoolFor(int instructions)3339 void Assembler::BlockTrampolinePoolFor(int instructions) {
3340   CheckTrampolinePoolQuick(instructions);
3341   BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
3342 }
3343 
3344 
CheckTrampolinePool()3345 void Assembler::CheckTrampolinePool() {
3346   // Some small sequences of instructions must not be broken up by the
3347   // insertion of a trampoline pool; such sequences are protected by setting
3348   // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
3349   // which are both checked here. Also, recursive calls to CheckTrampolinePool
3350   // are blocked by trampoline_pool_blocked_nesting_.
3351   if ((trampoline_pool_blocked_nesting_ > 0) ||
3352       (pc_offset() < no_trampoline_pool_before_)) {
3353     // Emission is currently blocked; make sure we try again as soon as
3354     // possible.
3355     if (trampoline_pool_blocked_nesting_ > 0) {
3356       next_buffer_check_ = pc_offset() + kInstrSize;
3357     } else {
3358       next_buffer_check_ = no_trampoline_pool_before_;
3359     }
3360     return;
3361   }
3362 
3363   DCHECK(!trampoline_emitted_);
3364   DCHECK(unbound_labels_count_ >= 0);
3365   if (unbound_labels_count_ > 0) {
3366     // First we emit jump (2 instructions), then we emit trampoline pool.
3367     { BlockTrampolinePoolScope block_trampoline_pool(this);
3368       Label after_pool;
3369       if (kArchVariant == kMips64r6) {
3370         bc(&after_pool);
3371       } else {
3372         b(&after_pool);
3373       }
3374       nop();
3375 
3376       int pool_start = pc_offset();
3377       for (int i = 0; i < unbound_labels_count_; i++) {
3378         { BlockGrowBufferScope block_buf_growth(this);
3379           // Buffer growth (and relocation) must be blocked for internal
3380           // references until associated instructions are emitted and available
3381           // to be patched.
3382           RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3383           j(&after_pool);
3384         }
3385         nop();
3386       }
3387       bind(&after_pool);
3388       trampoline_ = Trampoline(pool_start, unbound_labels_count_);
3389 
3390       trampoline_emitted_ = true;
3391       // As we are only going to emit trampoline once, we need to prevent any
3392       // further emission.
3393       next_buffer_check_ = kMaxInt;
3394     }
3395   } else {
3396     // Number of branches to unbound label at this point is zero, so we can
3397     // move next buffer check to maximum.
3398     next_buffer_check_ = pc_offset() +
3399         kMaxBranchOffset - kTrampolineSlotsSize * 16;
3400   }
3401   return;
3402 }
3403 
3404 
target_address_at(Address pc)3405 Address Assembler::target_address_at(Address pc) {
3406   Instr instr0 = instr_at(pc);
3407   Instr instr1 = instr_at(pc + 1 * kInstrSize);
3408   Instr instr3 = instr_at(pc + 3 * kInstrSize);
3409 
3410   // Interpret 4 instructions for address generated by li: See listing in
3411   // Assembler::set_target_address_at() just below.
3412   if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
3413       (GetOpcodeField(instr3) == ORI)) {
3414     // Assemble the 48 bit value.
3415      int64_t addr  = static_cast<int64_t>(
3416           ((uint64_t)(GetImmediate16(instr0)) << 32) |
3417           ((uint64_t)(GetImmediate16(instr1)) << 16) |
3418           ((uint64_t)(GetImmediate16(instr3))));
3419 
3420     // Sign extend to get canonical address.
3421     addr = (addr << 16) >> 16;
3422     return reinterpret_cast<Address>(addr);
3423   }
3424   // We should never get here, force a bad address if we do.
3425   UNREACHABLE();
3426   return (Address)0x0;
3427 }
3428 
3429 
3430 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
3431 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
3432 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
3433 // OS::nan_value() returns a qNaN.
QuietNaN(HeapObject * object)3434 void Assembler::QuietNaN(HeapObject* object) {
3435   HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
3436 }
3437 
3438 
3439 // On Mips64, a target address is stored in a 4-instruction sequence:
3440 //    0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
3441 //    1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
3442 //    2: dsll(rd, rd, 16);
3443 //    3: ori(rd, rd, j.imm32_ & kImm16Mask);
3444 //
3445 // Patching the address must replace all the lui & ori instructions,
3446 // and flush the i-cache.
3447 //
3448 // There is an optimization below, which emits a nop when the address
3449 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
3450 // and possibly removed.
set_target_address_at(Isolate * isolate,Address pc,Address target,ICacheFlushMode icache_flush_mode)3451 void Assembler::set_target_address_at(Isolate* isolate, Address pc,
3452                                       Address target,
3453                                       ICacheFlushMode icache_flush_mode) {
3454 // There is an optimization where only 4 instructions are used to load address
3455 // in code on MIP64 because only 48-bits of address is effectively used.
3456 // It relies on fact the upper [63:48] bits are not used for virtual address
3457 // translation and they have to be set according to value of bit 47 in order
3458 // get canonical address.
3459   Instr instr1 = instr_at(pc + kInstrSize);
3460   uint32_t rt_code = GetRt(instr1);
3461   uint32_t* p = reinterpret_cast<uint32_t*>(pc);
3462   uint64_t itarget = reinterpret_cast<uint64_t>(target);
3463 
3464 #ifdef DEBUG
3465   // Check we have the result from a li macro-instruction.
3466   Instr instr0 = instr_at(pc);
3467   Instr instr3 = instr_at(pc + kInstrSize * 3);
3468   CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
3469          GetOpcodeField(instr3) == ORI));
3470 #endif
3471 
3472   // Must use 4 instructions to insure patchable code.
3473   // lui rt, upper-16.
3474   // ori rt, rt, lower-16.
3475   // dsll rt, rt, 16.
3476   // ori rt rt, lower-16.
3477   *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
3478   *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
3479       | ((itarget >> 16) & kImm16Mask);
3480   *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
3481       | (itarget & kImm16Mask);
3482 
3483   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
3484     Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize);
3485   }
3486 }
3487 
3488 }  // namespace internal
3489 }  // namespace v8
3490 
3491 #endif  // V8_TARGET_ARCH_MIPS64
3492