1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 #include "src/mips64/assembler-mips64.h"
36 
37 #if V8_TARGET_ARCH_MIPS64
38 
39 #include "src/base/cpu.h"
40 #include "src/mips64/assembler-mips64-inl.h"
41 
42 namespace v8 {
43 namespace internal {
44 
45 
46 // Get the CPU features enabled by the build. For cross compilation the
47 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
48 // can be defined to enable FPU instructions when building the
49 // snapshot.
CpuFeaturesImpliedByCompiler()50 static unsigned CpuFeaturesImpliedByCompiler() {
51   unsigned answer = 0;
52 #ifdef CAN_USE_FPU_INSTRUCTIONS
53   answer |= 1u << FPU;
54 #endif  // def CAN_USE_FPU_INSTRUCTIONS
55 
56   // If the compiler is allowed to use FPU then we can use FPU too in our code
57   // generation even when generating snapshots.  This won't work for cross
58   // compilation.
59 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
60   answer |= 1u << FPU;
61 #endif
62 
63   return answer;
64 }
65 
66 
ProbeImpl(bool cross_compile)67 void CpuFeatures::ProbeImpl(bool cross_compile) {
68   supported_ |= CpuFeaturesImpliedByCompiler();
69 
70   // Only use statically determined features for cross compile (snapshot).
71   if (cross_compile) return;
72 
73   // If the compiler is allowed to use fpu then we can use fpu too in our
74   // code generation.
75 #ifndef __mips__
76   // For the simulator build, use FPU.
77   supported_ |= 1u << FPU;
78 #else
79   // Probe for additional features at runtime.
80   base::CPU cpu;
81   if (cpu.has_fpu()) supported_ |= 1u << FPU;
82 #endif
83 }
84 
85 
PrintTarget()86 void CpuFeatures::PrintTarget() { }
PrintFeatures()87 void CpuFeatures::PrintFeatures() { }
88 
89 
ToNumber(Register reg)90 int ToNumber(Register reg) {
91   DCHECK(reg.is_valid());
92   const int kNumbers[] = {
93     0,    // zero_reg
94     1,    // at
95     2,    // v0
96     3,    // v1
97     4,    // a0
98     5,    // a1
99     6,    // a2
100     7,    // a3
101     8,    // a4
102     9,    // a5
103     10,   // a6
104     11,   // a7
105     12,   // t0
106     13,   // t1
107     14,   // t2
108     15,   // t3
109     16,   // s0
110     17,   // s1
111     18,   // s2
112     19,   // s3
113     20,   // s4
114     21,   // s5
115     22,   // s6
116     23,   // s7
117     24,   // t8
118     25,   // t9
119     26,   // k0
120     27,   // k1
121     28,   // gp
122     29,   // sp
123     30,   // fp
124     31,   // ra
125   };
126   return kNumbers[reg.code()];
127 }
128 
129 
ToRegister(int num)130 Register ToRegister(int num) {
131   DCHECK(num >= 0 && num < kNumRegisters);
132   const Register kRegisters[] = {
133     zero_reg,
134     at,
135     v0, v1,
136     a0, a1, a2, a3, a4, a5, a6, a7,
137     t0, t1, t2, t3,
138     s0, s1, s2, s3, s4, s5, s6, s7,
139     t8, t9,
140     k0, k1,
141     gp,
142     sp,
143     fp,
144     ra
145   };
146   return kRegisters[num];
147 }
148 
149 
150 // -----------------------------------------------------------------------------
151 // Implementation of RelocInfo.
152 
153 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
154                                   1 << RelocInfo::INTERNAL_REFERENCE |
155                                   1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
156 
157 
IsCodedSpecially()158 bool RelocInfo::IsCodedSpecially() {
159   // The deserializer needs to know whether a pointer is specially coded.  Being
160   // specially coded on MIPS means that it is a lui/ori instruction, and that is
161   // always the case inside code objects.
162   return true;
163 }
164 
165 
IsInConstantPool()166 bool RelocInfo::IsInConstantPool() {
167   return false;
168 }
169 
170 
171 // -----------------------------------------------------------------------------
172 // Implementation of Operand and MemOperand.
173 // See assembler-mips-inl.h for inlined constructors.
174 
Operand(Handle<Object> handle)175 Operand::Operand(Handle<Object> handle) {
176   AllowDeferredHandleDereference using_raw_address;
177   rm_ = no_reg;
178   // Verify all Objects referred by code are NOT in new space.
179   Object* obj = *handle;
180   if (obj->IsHeapObject()) {
181     DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
182     imm64_ = reinterpret_cast<intptr_t>(handle.location());
183     rmode_ = RelocInfo::EMBEDDED_OBJECT;
184   } else {
185     // No relocation needed.
186     imm64_ = reinterpret_cast<intptr_t>(obj);
187     rmode_ = RelocInfo::NONE64;
188   }
189 }
190 
191 
MemOperand(Register rm,int32_t offset)192 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
193   offset_ = offset;
194 }
195 
196 
MemOperand(Register rm,int32_t unit,int32_t multiplier,OffsetAddend offset_addend)197 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
198                        OffsetAddend offset_addend)
199     : Operand(rm) {
200   offset_ = unit * multiplier + offset_addend;
201 }
202 
203 
204 // -----------------------------------------------------------------------------
205 // Specific instructions, constants, and masks.
206 
207 static const int kNegOffset = 0x00008000;
208 // daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
209 // operations as post-increment of sp.
210 const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
211                               (Register::kCode_sp << kRtShift) |
212                               (kPointerSize & kImm16Mask);  // NOLINT
213 // daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
214 const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
215                                (Register::kCode_sp << kRtShift) |
216                                (-kPointerSize & kImm16Mask);  // NOLINT
217 // sd(r, MemOperand(sp, 0))
218 const Instr kPushRegPattern =
219     SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask);  // NOLINT
220 //  ld(r, MemOperand(sp, 0))
221 const Instr kPopRegPattern =
222     LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask);  // NOLINT
223 
224 const Instr kLwRegFpOffsetPattern =
225     LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask);  // NOLINT
226 
227 const Instr kSwRegFpOffsetPattern =
228     SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask);  // NOLINT
229 
230 const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
231                                        (kNegOffset & kImm16Mask);  // NOLINT
232 
233 const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
234                                        (kNegOffset & kImm16Mask);  // NOLINT
235 // A mask for the Rt register for push, pop, lw, sw instructions.
236 const Instr kRtMask = kRtFieldMask;
237 const Instr kLwSwInstrTypeMask = 0xffe00000;
238 const Instr kLwSwInstrArgumentMask  = ~kLwSwInstrTypeMask;
239 const Instr kLwSwOffsetMask = kImm16Mask;
240 
241 
Assembler(Isolate * isolate,void * buffer,int buffer_size)242 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
243     : AssemblerBase(isolate, buffer, buffer_size),
244       recorded_ast_id_(TypeFeedbackId::None()),
245       positions_recorder_(this) {
246   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
247 
248   last_trampoline_pool_end_ = 0;
249   no_trampoline_pool_before_ = 0;
250   trampoline_pool_blocked_nesting_ = 0;
251   // We leave space (16 * kTrampolineSlotsSize)
252   // for BlockTrampolinePoolScope buffer.
253   next_buffer_check_ = FLAG_force_long_branches
254       ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
255   internal_trampoline_exception_ = false;
256   last_bound_pos_ = 0;
257 
258   trampoline_emitted_ = FLAG_force_long_branches;
259   unbound_labels_count_ = 0;
260   block_buffer_growth_ = false;
261 
262   ClearRecordedAstId();
263 }
264 
265 
GetCode(CodeDesc * desc)266 void Assembler::GetCode(CodeDesc* desc) {
267   EmitForbiddenSlotInstruction();
268   DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
269   // Set up code descriptor.
270   desc->buffer = buffer_;
271   desc->buffer_size = buffer_size_;
272   desc->instr_size = pc_offset();
273   desc->reloc_size =
274       static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
275   desc->origin = this;
276   desc->constant_pool_size = 0;
277 }
278 
279 
Align(int m)280 void Assembler::Align(int m) {
281   DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
282   EmitForbiddenSlotInstruction();
283   while ((pc_offset() & (m - 1)) != 0) {
284     nop();
285   }
286 }
287 
288 
CodeTargetAlign()289 void Assembler::CodeTargetAlign() {
290   // No advantage to aligning branch/call targets to more than
291   // single instruction, that I am aware of.
292   Align(4);
293 }
294 
295 
GetRtReg(Instr instr)296 Register Assembler::GetRtReg(Instr instr) {
297   Register rt;
298   rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
299   return rt;
300 }
301 
302 
GetRsReg(Instr instr)303 Register Assembler::GetRsReg(Instr instr) {
304   Register rs;
305   rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
306   return rs;
307 }
308 
309 
GetRdReg(Instr instr)310 Register Assembler::GetRdReg(Instr instr) {
311   Register rd;
312   rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
313   return rd;
314 }
315 
316 
GetRt(Instr instr)317 uint32_t Assembler::GetRt(Instr instr) {
318   return (instr & kRtFieldMask) >> kRtShift;
319 }
320 
321 
GetRtField(Instr instr)322 uint32_t Assembler::GetRtField(Instr instr) {
323   return instr & kRtFieldMask;
324 }
325 
326 
GetRs(Instr instr)327 uint32_t Assembler::GetRs(Instr instr) {
328   return (instr & kRsFieldMask) >> kRsShift;
329 }
330 
331 
GetRsField(Instr instr)332 uint32_t Assembler::GetRsField(Instr instr) {
333   return instr & kRsFieldMask;
334 }
335 
336 
GetRd(Instr instr)337 uint32_t Assembler::GetRd(Instr instr) {
338   return  (instr & kRdFieldMask) >> kRdShift;
339 }
340 
341 
GetRdField(Instr instr)342 uint32_t Assembler::GetRdField(Instr instr) {
343   return  instr & kRdFieldMask;
344 }
345 
346 
GetSa(Instr instr)347 uint32_t Assembler::GetSa(Instr instr) {
348   return (instr & kSaFieldMask) >> kSaShift;
349 }
350 
351 
GetSaField(Instr instr)352 uint32_t Assembler::GetSaField(Instr instr) {
353   return instr & kSaFieldMask;
354 }
355 
356 
GetOpcodeField(Instr instr)357 uint32_t Assembler::GetOpcodeField(Instr instr) {
358   return instr & kOpcodeMask;
359 }
360 
361 
GetFunction(Instr instr)362 uint32_t Assembler::GetFunction(Instr instr) {
363   return (instr & kFunctionFieldMask) >> kFunctionShift;
364 }
365 
366 
GetFunctionField(Instr instr)367 uint32_t Assembler::GetFunctionField(Instr instr) {
368   return instr & kFunctionFieldMask;
369 }
370 
371 
GetImmediate16(Instr instr)372 uint32_t Assembler::GetImmediate16(Instr instr) {
373   return instr & kImm16Mask;
374 }
375 
376 
GetLabelConst(Instr instr)377 uint32_t Assembler::GetLabelConst(Instr instr) {
378   return instr & ~kImm16Mask;
379 }
380 
381 
IsPop(Instr instr)382 bool Assembler::IsPop(Instr instr) {
383   return (instr & ~kRtMask) == kPopRegPattern;
384 }
385 
386 
IsPush(Instr instr)387 bool Assembler::IsPush(Instr instr) {
388   return (instr & ~kRtMask) == kPushRegPattern;
389 }
390 
391 
IsSwRegFpOffset(Instr instr)392 bool Assembler::IsSwRegFpOffset(Instr instr) {
393   return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
394 }
395 
396 
IsLwRegFpOffset(Instr instr)397 bool Assembler::IsLwRegFpOffset(Instr instr) {
398   return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
399 }
400 
401 
IsSwRegFpNegOffset(Instr instr)402 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
403   return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
404           kSwRegFpNegOffsetPattern);
405 }
406 
407 
IsLwRegFpNegOffset(Instr instr)408 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
409   return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
410           kLwRegFpNegOffsetPattern);
411 }
412 
413 
414 // Labels refer to positions in the (to be) generated code.
415 // There are bound, linked, and unused labels.
416 //
417 // Bound labels refer to known positions in the already
418 // generated code. pos() is the position the label refers to.
419 //
420 // Linked labels refer to unknown positions in the code
421 // to be generated; pos() is the position of the last
422 // instruction using the label.
423 
424 // The link chain is terminated by a value in the instruction of -1,
425 // which is an otherwise illegal value (branch -1 is inf loop).
426 // The instruction 16-bit offset field addresses 32-bit words, but in
427 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
428 
429 const int kEndOfChain = -4;
430 // Determines the end of the Jump chain (a subset of the label link chain).
431 const int kEndOfJumpChain = 0;
432 
433 
IsBranch(Instr instr)434 bool Assembler::IsBranch(Instr instr) {
435   uint32_t opcode   = GetOpcodeField(instr);
436   uint32_t rt_field = GetRtField(instr);
437   uint32_t rs_field = GetRsField(instr);
438   // Checks if the instruction is a branch.
439   bool isBranch =
440       opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
441       opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
442       (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
443                             rt_field == BLTZAL || rt_field == BGEZAL)) ||
444       (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
445       (opcode == COP1 && rs_field == BC1EQZ) ||
446       (opcode == COP1 && rs_field == BC1NEZ);
447   if (!isBranch && kArchVariant == kMips64r6) {
448     // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
449     // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
450     isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
451                 opcode == BALC ||
452                 (opcode == POP66 && rs_field != 0) ||  // BEQZC
453                 (opcode == POP76 && rs_field != 0);    // BNEZC
454   }
455   return isBranch;
456 }
457 
458 
IsBc(Instr instr)459 bool Assembler::IsBc(Instr instr) {
460   uint32_t opcode = GetOpcodeField(instr);
461   // Checks if the instruction is a BC or BALC.
462   return opcode == BC || opcode == BALC;
463 }
464 
465 
IsBzc(Instr instr)466 bool Assembler::IsBzc(Instr instr) {
467   uint32_t opcode = GetOpcodeField(instr);
468   // Checks if the instruction is BEQZC or BNEZC.
469   return (opcode == POP66 && GetRsField(instr) != 0) ||
470          (opcode == POP76 && GetRsField(instr) != 0);
471 }
472 
473 
IsEmittedConstant(Instr instr)474 bool Assembler::IsEmittedConstant(Instr instr) {
475   uint32_t label_constant = GetLabelConst(instr);
476   return label_constant == 0;  // Emitted label const in reg-exp engine.
477 }
478 
479 
IsBeq(Instr instr)480 bool Assembler::IsBeq(Instr instr) {
481   return GetOpcodeField(instr) == BEQ;
482 }
483 
484 
IsBne(Instr instr)485 bool Assembler::IsBne(Instr instr) {
486   return GetOpcodeField(instr) == BNE;
487 }
488 
489 
IsBeqzc(Instr instr)490 bool Assembler::IsBeqzc(Instr instr) {
491   uint32_t opcode = GetOpcodeField(instr);
492   return opcode == POP66 && GetRsField(instr) != 0;
493 }
494 
495 
IsBnezc(Instr instr)496 bool Assembler::IsBnezc(Instr instr) {
497   uint32_t opcode = GetOpcodeField(instr);
498   return opcode == POP76 && GetRsField(instr) != 0;
499 }
500 
501 
IsBeqc(Instr instr)502 bool Assembler::IsBeqc(Instr instr) {
503   uint32_t opcode = GetOpcodeField(instr);
504   uint32_t rs = GetRsField(instr);
505   uint32_t rt = GetRtField(instr);
506   return opcode == POP10 && rs != 0 && rs < rt;  // && rt != 0
507 }
508 
509 
IsBnec(Instr instr)510 bool Assembler::IsBnec(Instr instr) {
511   uint32_t opcode = GetOpcodeField(instr);
512   uint32_t rs = GetRsField(instr);
513   uint32_t rt = GetRtField(instr);
514   return opcode == POP30 && rs != 0 && rs < rt;  // && rt != 0
515 }
516 
517 
IsJump(Instr instr)518 bool Assembler::IsJump(Instr instr) {
519   uint32_t opcode   = GetOpcodeField(instr);
520   uint32_t rt_field = GetRtField(instr);
521   uint32_t rd_field = GetRdField(instr);
522   uint32_t function_field = GetFunctionField(instr);
523   // Checks if the instruction is a jump.
524   return opcode == J || opcode == JAL ||
525       (opcode == SPECIAL && rt_field == 0 &&
526       ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
527 }
528 
529 
IsJ(Instr instr)530 bool Assembler::IsJ(Instr instr) {
531   uint32_t opcode = GetOpcodeField(instr);
532   // Checks if the instruction is a jump.
533   return opcode == J;
534 }
535 
536 
IsJal(Instr instr)537 bool Assembler::IsJal(Instr instr) {
538   return GetOpcodeField(instr) == JAL;
539 }
540 
541 
IsJr(Instr instr)542 bool Assembler::IsJr(Instr instr) {
543   return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
544 }
545 
546 
IsJalr(Instr instr)547 bool Assembler::IsJalr(Instr instr) {
548   return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
549 }
550 
551 
IsLui(Instr instr)552 bool Assembler::IsLui(Instr instr) {
553   uint32_t opcode = GetOpcodeField(instr);
554   // Checks if the instruction is a load upper immediate.
555   return opcode == LUI;
556 }
557 
558 
IsOri(Instr instr)559 bool Assembler::IsOri(Instr instr) {
560   uint32_t opcode = GetOpcodeField(instr);
561   // Checks if the instruction is a load upper immediate.
562   return opcode == ORI;
563 }
564 
565 
IsNop(Instr instr,unsigned int type)566 bool Assembler::IsNop(Instr instr, unsigned int type) {
567   // See Assembler::nop(type).
568   DCHECK(type < 32);
569   uint32_t opcode = GetOpcodeField(instr);
570   uint32_t function = GetFunctionField(instr);
571   uint32_t rt = GetRt(instr);
572   uint32_t rd = GetRd(instr);
573   uint32_t sa = GetSa(instr);
574 
575   // Traditional mips nop == sll(zero_reg, zero_reg, 0)
576   // When marking non-zero type, use sll(zero_reg, at, type)
577   // to avoid use of mips ssnop and ehb special encodings
578   // of the sll instruction.
579 
580   Register nop_rt_reg = (type == 0) ? zero_reg : at;
581   bool ret = (opcode == SPECIAL && function == SLL &&
582               rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
583               rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
584               sa == type);
585 
586   return ret;
587 }
588 
589 
GetBranchOffset(Instr instr)590 int32_t Assembler::GetBranchOffset(Instr instr) {
591   DCHECK(IsBranch(instr));
592   return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
593 }
594 
595 
IsLw(Instr instr)596 bool Assembler::IsLw(Instr instr) {
597   return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
598 }
599 
600 
GetLwOffset(Instr instr)601 int16_t Assembler::GetLwOffset(Instr instr) {
602   DCHECK(IsLw(instr));
603   return ((instr & kImm16Mask));
604 }
605 
606 
SetLwOffset(Instr instr,int16_t offset)607 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
608   DCHECK(IsLw(instr));
609 
610   // We actually create a new lw instruction based on the original one.
611   Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
612       | (offset & kImm16Mask);
613 
614   return temp_instr;
615 }
616 
617 
IsSw(Instr instr)618 bool Assembler::IsSw(Instr instr) {
619   return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
620 }
621 
622 
SetSwOffset(Instr instr,int16_t offset)623 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
624   DCHECK(IsSw(instr));
625   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
626 }
627 
628 
IsAddImmediate(Instr instr)629 bool Assembler::IsAddImmediate(Instr instr) {
630   return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
631 }
632 
633 
SetAddImmediateOffset(Instr instr,int16_t offset)634 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
635   DCHECK(IsAddImmediate(instr));
636   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
637 }
638 
639 
IsAndImmediate(Instr instr)640 bool Assembler::IsAndImmediate(Instr instr) {
641   return GetOpcodeField(instr) == ANDI;
642 }
643 
644 
OffsetSizeInBits(Instr instr)645 static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
646   if (kArchVariant == kMips64r6) {
647     if (Assembler::IsBc(instr)) {
648       return Assembler::OffsetSize::kOffset26;
649     } else if (Assembler::IsBzc(instr)) {
650       return Assembler::OffsetSize::kOffset21;
651     }
652   }
653   return Assembler::OffsetSize::kOffset16;
654 }
655 
656 
AddBranchOffset(int pos,Instr instr)657 static inline int32_t AddBranchOffset(int pos, Instr instr) {
658   int bits = OffsetSizeInBits(instr);
659   const int32_t mask = (1 << bits) - 1;
660   bits = 32 - bits;
661 
662   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
663   // the compiler uses arithmetic shifts for signed integers.
664   int32_t imm = ((instr & mask) << bits) >> (bits - 2);
665 
666   if (imm == kEndOfChain) {
667     // EndOfChain sentinel is returned directly, not relative to pc or pos.
668     return kEndOfChain;
669   } else {
670     return pos + Assembler::kBranchPCOffset + imm;
671   }
672 }
673 
674 
target_at(int pos,bool is_internal)675 int Assembler::target_at(int pos, bool is_internal) {
676   if (is_internal) {
677     int64_t* p = reinterpret_cast<int64_t*>(buffer_ + pos);
678     int64_t address = *p;
679     if (address == kEndOfJumpChain) {
680       return kEndOfChain;
681     } else {
682       int64_t instr_address = reinterpret_cast<int64_t>(p);
683       DCHECK(instr_address - address < INT_MAX);
684       int delta = static_cast<int>(instr_address - address);
685       DCHECK(pos > delta);
686       return pos - delta;
687     }
688   }
689   Instr instr = instr_at(pos);
690   if ((instr & ~kImm16Mask) == 0) {
691     // Emitted label constant, not part of a branch.
692     if (instr == 0) {
693        return kEndOfChain;
694      } else {
695        int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
696        return (imm18 + pos);
697      }
698   }
699   // Check we have a branch or jump instruction.
700   DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr));
701   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
702   // the compiler uses arithmetic shifts for signed integers.
703   if (IsBranch(instr)) {
704     return AddBranchOffset(pos, instr);
705   } else if (IsLui(instr)) {
706     Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
707     Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
708     Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
709     DCHECK(IsOri(instr_ori));
710     DCHECK(IsOri(instr_ori2));
711 
712     // TODO(plind) create named constants for shift values.
713     int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
714     imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
715     imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
716     // Sign extend address;
717     imm >>= 16;
718 
719     if (imm == kEndOfJumpChain) {
720       // EndOfChain sentinel is returned directly, not relative to pc or pos.
721       return kEndOfChain;
722     } else {
723       uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
724       DCHECK(instr_address - imm < INT_MAX);
725       int delta = static_cast<int>(instr_address - imm);
726       DCHECK(pos > delta);
727       return pos - delta;
728     }
729   } else {
730     DCHECK(IsJ(instr) || IsJal(instr));
731     int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
732     if (imm28 == kEndOfJumpChain) {
733       // EndOfChain sentinel is returned directly, not relative to pc or pos.
734       return kEndOfChain;
735     } else {
736       // Sign extend 28-bit offset.
737       int32_t delta = static_cast<int32_t>((imm28 << 4) >> 4);
738       return pos + delta;
739     }
740   }
741 }
742 
743 
SetBranchOffset(int32_t pos,int32_t target_pos,Instr instr)744 static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
745                                     Instr instr) {
746   int32_t bits = OffsetSizeInBits(instr);
747   int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
748   DCHECK((imm & 3) == 0);
749   imm >>= 2;
750 
751   const int32_t mask = (1 << bits) - 1;
752   instr &= ~mask;
753   DCHECK(is_intn(imm, bits));
754 
755   return instr | (imm & mask);
756 }
757 
758 
target_at_put(int pos,int target_pos,bool is_internal)759 void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
760   if (is_internal) {
761     uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
762     *reinterpret_cast<uint64_t*>(buffer_ + pos) = imm;
763     return;
764   }
765   Instr instr = instr_at(pos);
766   if ((instr & ~kImm16Mask) == 0) {
767     DCHECK(target_pos == kEndOfChain || target_pos >= 0);
768     // Emitted label constant, not part of a branch.
769     // Make label relative to Code* of generated Code object.
770     instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
771     return;
772   }
773 
774   if (IsBranch(instr)) {
775     instr = SetBranchOffset(pos, target_pos, instr);
776     instr_at_put(pos, instr);
777   } else if (IsLui(instr)) {
778     Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
779     Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
780     Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
781     DCHECK(IsOri(instr_ori));
782     DCHECK(IsOri(instr_ori2));
783 
784     uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
785     DCHECK((imm & 3) == 0);
786 
787     instr_lui &= ~kImm16Mask;
788     instr_ori &= ~kImm16Mask;
789     instr_ori2 &= ~kImm16Mask;
790 
791     instr_at_put(pos + 0 * Assembler::kInstrSize,
792                  instr_lui | ((imm >> 32) & kImm16Mask));
793     instr_at_put(pos + 1 * Assembler::kInstrSize,
794                  instr_ori | ((imm >> 16) & kImm16Mask));
795     instr_at_put(pos + 3 * Assembler::kInstrSize,
796                  instr_ori2 | (imm & kImm16Mask));
797   } else if (IsJ(instr) || IsJal(instr)) {
798     int32_t imm28 = target_pos - pos;
799     DCHECK((imm28 & 3) == 0);
800 
801     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
802     DCHECK(is_uint26(imm26));
803     // Place 26-bit signed offset with markings.
804     // When code is committed it will be resolved to j/jal.
805     int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark;
806     instr_at_put(pos, mark | (imm26 & kImm26Mask));
807   } else {
808     int32_t imm28 = target_pos - pos;
809     DCHECK((imm28 & 3) == 0);
810 
811     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
812     DCHECK(is_uint26(imm26));
813     // Place raw 26-bit signed offset.
814     // When code is committed it will be resolved to j/jal.
815     instr &= ~kImm26Mask;
816     instr_at_put(pos, instr | (imm26 & kImm26Mask));
817   }
818 }
819 
820 
print(Label * L)821 void Assembler::print(Label* L) {
822   if (L->is_unused()) {
823     PrintF("unused label\n");
824   } else if (L->is_bound()) {
825     PrintF("bound label to %d\n", L->pos());
826   } else if (L->is_linked()) {
827     Label l = *L;
828     PrintF("unbound label");
829     while (l.is_linked()) {
830       PrintF("@ %d ", l.pos());
831       Instr instr = instr_at(l.pos());
832       if ((instr & ~kImm16Mask) == 0) {
833         PrintF("value\n");
834       } else {
835         PrintF("%d\n", instr);
836       }
837       next(&l, internal_reference_positions_.find(l.pos()) !=
838                    internal_reference_positions_.end());
839     }
840   } else {
841     PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
842   }
843 }
844 
845 
bind_to(Label * L,int pos)846 void Assembler::bind_to(Label* L, int pos) {
847   DCHECK(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
848   int trampoline_pos = kInvalidSlotPos;
849   bool is_internal = false;
850   if (L->is_linked() && !trampoline_emitted_) {
851     unbound_labels_count_--;
852     next_buffer_check_ += kTrampolineSlotsSize;
853   }
854 
855   while (L->is_linked()) {
856     int fixup_pos = L->pos();
857     int dist = pos - fixup_pos;
858     is_internal = internal_reference_positions_.find(fixup_pos) !=
859                   internal_reference_positions_.end();
860     next(L, is_internal);  // Call next before overwriting link with target at
861                            // fixup_pos.
862     Instr instr = instr_at(fixup_pos);
863     if (is_internal) {
864       target_at_put(fixup_pos, pos, is_internal);
865     } else {
866       if (IsBranch(instr)) {
867         int branch_offset = BranchOffset(instr);
868         if (dist > branch_offset) {
869           if (trampoline_pos == kInvalidSlotPos) {
870             trampoline_pos = get_trampoline_entry(fixup_pos);
871             CHECK(trampoline_pos != kInvalidSlotPos);
872           }
873           CHECK((trampoline_pos - fixup_pos) <= branch_offset);
874           target_at_put(fixup_pos, trampoline_pos, false);
875           fixup_pos = trampoline_pos;
876           dist = pos - fixup_pos;
877         }
878         target_at_put(fixup_pos, pos, false);
879       } else {
880         DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
881                IsEmittedConstant(instr));
882         target_at_put(fixup_pos, pos, false);
883       }
884     }
885   }
886   L->bind_to(pos);
887 
888   // Keep track of the last bound label so we don't eliminate any instructions
889   // before a bound label.
890   if (pos > last_bound_pos_)
891     last_bound_pos_ = pos;
892 }
893 
894 
bind(Label * L)895 void Assembler::bind(Label* L) {
896   DCHECK(!L->is_bound());  // Label can only be bound once.
897   bind_to(L, pc_offset());
898 }
899 
900 
next(Label * L,bool is_internal)901 void Assembler::next(Label* L, bool is_internal) {
902   DCHECK(L->is_linked());
903   int link = target_at(L->pos(), is_internal);
904   if (link == kEndOfChain) {
905     L->Unuse();
906   } else {
907     DCHECK(link >= 0);
908     L->link_to(link);
909   }
910 }
911 
912 
is_near(Label * L)913 bool Assembler::is_near(Label* L) {
914   DCHECK(L->is_bound());
915   return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
916 }
917 
918 
is_near(Label * L,OffsetSize bits)919 bool Assembler::is_near(Label* L, OffsetSize bits) {
920   if (L == nullptr || !L->is_bound()) return true;
921   return ((pc_offset() - L->pos()) <
922           (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
923 }
924 
925 
is_near_branch(Label * L)926 bool Assembler::is_near_branch(Label* L) {
927   DCHECK(L->is_bound());
928   return kArchVariant == kMips64r6 ? is_near_r6(L) : is_near_pre_r6(L);
929 }
930 
931 
BranchOffset(Instr instr)932 int Assembler::BranchOffset(Instr instr) {
933   // At pre-R6 and for other R6 branches the offset is 16 bits.
934   int bits = OffsetSize::kOffset16;
935 
936   if (kArchVariant == kMips64r6) {
937     uint32_t opcode = GetOpcodeField(instr);
938     switch (opcode) {
939       // Checks BC or BALC.
940       case BC:
941       case BALC:
942         bits = OffsetSize::kOffset26;
943         break;
944 
945       // Checks BEQZC or BNEZC.
946       case POP66:
947       case POP76:
948         if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
949         break;
950       default:
951         break;
952     }
953   }
954 
955   return (1 << (bits + 2 - 1)) - 1;
956 }
957 
958 
959 // We have to use a temporary register for things that can be relocated even
960 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
961 // space.  There is no guarantee that the relocated location can be similarly
962 // encoded.
MustUseReg(RelocInfo::Mode rmode)963 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
964   return !RelocInfo::IsNone(rmode);
965 }
966 
GenInstrRegister(Opcode opcode,Register rs,Register rt,Register rd,uint16_t sa,SecondaryField func)967 void Assembler::GenInstrRegister(Opcode opcode,
968                                  Register rs,
969                                  Register rt,
970                                  Register rd,
971                                  uint16_t sa,
972                                  SecondaryField func) {
973   DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
974   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
975       | (rd.code() << kRdShift) | (sa << kSaShift) | func;
976   emit(instr);
977 }
978 
979 
GenInstrRegister(Opcode opcode,Register rs,Register rt,uint16_t msb,uint16_t lsb,SecondaryField func)980 void Assembler::GenInstrRegister(Opcode opcode,
981                                  Register rs,
982                                  Register rt,
983                                  uint16_t msb,
984                                  uint16_t lsb,
985                                  SecondaryField func) {
986   DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
987   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
988       | (msb << kRdShift) | (lsb << kSaShift) | func;
989   emit(instr);
990 }
991 
992 
GenInstrRegister(Opcode opcode,SecondaryField fmt,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)993 void Assembler::GenInstrRegister(Opcode opcode,
994                                  SecondaryField fmt,
995                                  FPURegister ft,
996                                  FPURegister fs,
997                                  FPURegister fd,
998                                  SecondaryField func) {
999   DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1000   Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
1001       | (fd.code() << kFdShift) | func;
1002   emit(instr);
1003 }
1004 
1005 
GenInstrRegister(Opcode opcode,FPURegister fr,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1006 void Assembler::GenInstrRegister(Opcode opcode,
1007                                  FPURegister fr,
1008                                  FPURegister ft,
1009                                  FPURegister fs,
1010                                  FPURegister fd,
1011                                  SecondaryField func) {
1012   DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1013   Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
1014       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1015   emit(instr);
1016 }
1017 
1018 
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPURegister fs,FPURegister fd,SecondaryField func)1019 void Assembler::GenInstrRegister(Opcode opcode,
1020                                  SecondaryField fmt,
1021                                  Register rt,
1022                                  FPURegister fs,
1023                                  FPURegister fd,
1024                                  SecondaryField func) {
1025   DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1026   Instr instr = opcode | fmt | (rt.code() << kRtShift)
1027       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1028   emit(instr);
1029 }
1030 
1031 
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPUControlRegister fs,SecondaryField func)1032 void Assembler::GenInstrRegister(Opcode opcode,
1033                                  SecondaryField fmt,
1034                                  Register rt,
1035                                  FPUControlRegister fs,
1036                                  SecondaryField func) {
1037   DCHECK(fs.is_valid() && rt.is_valid());
1038   Instr instr =
1039       opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1040   emit(instr);
1041 }
1042 
1043 
1044 // Instructions with immediate value.
1045 // Registers are in the order of the instruction encoding, from left to right.
GenInstrImmediate(Opcode opcode,Register rs,Register rt,int32_t j,CompactBranchType is_compact_branch)1046 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1047                                   int32_t j,
1048                                   CompactBranchType is_compact_branch) {
1049   DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1050   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1051       | (j & kImm16Mask);
1052   emit(instr, is_compact_branch);
1053 }
1054 
1055 
GenInstrImmediate(Opcode opcode,Register rs,SecondaryField SF,int32_t j,CompactBranchType is_compact_branch)1056 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1057                                   int32_t j,
1058                                   CompactBranchType is_compact_branch) {
1059   DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1060   Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
1061   emit(instr, is_compact_branch);
1062 }
1063 
1064 
GenInstrImmediate(Opcode opcode,Register rs,FPURegister ft,int32_t j,CompactBranchType is_compact_branch)1065 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1066                                   int32_t j,
1067                                   CompactBranchType is_compact_branch) {
1068   DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1069   Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
1070       | (j & kImm16Mask);
1071   emit(instr, is_compact_branch);
1072 }
1073 
1074 
GenInstrImmediate(Opcode opcode,Register rs,int32_t offset21,CompactBranchType is_compact_branch)1075 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1076                                   CompactBranchType is_compact_branch) {
1077   DCHECK(rs.is_valid() && (is_int21(offset21)));
1078   Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1079   emit(instr, is_compact_branch);
1080 }
1081 
1082 
GenInstrImmediate(Opcode opcode,Register rs,uint32_t offset21)1083 void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1084                                   uint32_t offset21) {
1085   DCHECK(rs.is_valid() && (is_uint21(offset21)));
1086   Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1087   emit(instr);
1088 }
1089 
1090 
GenInstrImmediate(Opcode opcode,int32_t offset26,CompactBranchType is_compact_branch)1091 void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1092                                   CompactBranchType is_compact_branch) {
1093   DCHECK(is_int26(offset26));
1094   Instr instr = opcode | (offset26 & kImm26Mask);
1095   emit(instr, is_compact_branch);
1096 }
1097 
1098 
GenInstrJump(Opcode opcode,uint32_t address)1099 void Assembler::GenInstrJump(Opcode opcode,
1100                              uint32_t address) {
1101   BlockTrampolinePoolScope block_trampoline_pool(this);
1102   DCHECK(is_uint26(address));
1103   Instr instr = opcode | address;
1104   emit(instr);
1105   BlockTrampolinePoolFor(1);  // For associated delay slot.
1106 }
1107 
1108 
1109 // Returns the next free trampoline entry.
get_trampoline_entry(int32_t pos)1110 int32_t Assembler::get_trampoline_entry(int32_t pos) {
1111   int32_t trampoline_entry = kInvalidSlotPos;
1112   if (!internal_trampoline_exception_) {
1113     if (trampoline_.start() > pos) {
1114      trampoline_entry = trampoline_.take_slot();
1115     }
1116 
1117     if (kInvalidSlotPos == trampoline_entry) {
1118       internal_trampoline_exception_ = true;
1119     }
1120   }
1121   return trampoline_entry;
1122 }
1123 
1124 
jump_address(Label * L)1125 uint64_t Assembler::jump_address(Label* L) {
1126   int64_t target_pos;
1127   if (L->is_bound()) {
1128     target_pos = L->pos();
1129   } else {
1130     if (L->is_linked()) {
1131       target_pos = L->pos();  // L's link.
1132       L->link_to(pc_offset());
1133     } else {
1134       L->link_to(pc_offset());
1135       return kEndOfJumpChain;
1136     }
1137   }
1138   uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
1139   DCHECK((imm & 3) == 0);
1140 
1141   return imm;
1142 }
1143 
1144 
jump_offset(Label * L)1145 uint64_t Assembler::jump_offset(Label* L) {
1146   int64_t target_pos;
1147   int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1148 
1149   if (L->is_bound()) {
1150     target_pos = L->pos();
1151   } else {
1152     if (L->is_linked()) {
1153       target_pos = L->pos();  // L's link.
1154       L->link_to(pc_offset() + pad);
1155     } else {
1156       L->link_to(pc_offset() + pad);
1157       return kEndOfJumpChain;
1158     }
1159   }
1160   int64_t imm = target_pos - (pc_offset() + pad);
1161   DCHECK((imm & 3) == 0);
1162 
1163   return static_cast<uint64_t>(imm);
1164 }
1165 
1166 
branch_offset_helper(Label * L,OffsetSize bits)1167 int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
1168   int32_t target_pos;
1169   int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1170 
1171   if (L->is_bound()) {
1172     target_pos = L->pos();
1173   } else {
1174     if (L->is_linked()) {
1175       target_pos = L->pos();
1176       L->link_to(pc_offset() + pad);
1177     } else {
1178       L->link_to(pc_offset() + pad);
1179       if (!trampoline_emitted_) {
1180         unbound_labels_count_++;
1181         next_buffer_check_ -= kTrampolineSlotsSize;
1182       }
1183       return kEndOfChain;
1184     }
1185   }
1186 
1187   int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1188   DCHECK(is_intn(offset, bits + 2));
1189   DCHECK((offset & 3) == 0);
1190 
1191   return offset;
1192 }
1193 
1194 
label_at_put(Label * L,int at_offset)1195 void Assembler::label_at_put(Label* L, int at_offset) {
1196   int target_pos;
1197   if (L->is_bound()) {
1198     target_pos = L->pos();
1199     instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1200   } else {
1201     if (L->is_linked()) {
1202       target_pos = L->pos();  // L's link.
1203       int32_t imm18 = target_pos - at_offset;
1204       DCHECK((imm18 & 3) == 0);
1205       int32_t imm16 = imm18 >> 2;
1206       DCHECK(is_int16(imm16));
1207       instr_at_put(at_offset, (imm16 & kImm16Mask));
1208     } else {
1209       target_pos = kEndOfChain;
1210       instr_at_put(at_offset, 0);
1211       if (!trampoline_emitted_) {
1212         unbound_labels_count_++;
1213         next_buffer_check_ -= kTrampolineSlotsSize;
1214       }
1215     }
1216     L->link_to(at_offset);
1217   }
1218 }
1219 
1220 
1221 //------- Branch and jump instructions --------
1222 
b(int16_t offset)1223 void Assembler::b(int16_t offset) {
1224   beq(zero_reg, zero_reg, offset);
1225 }
1226 
1227 
bal(int16_t offset)1228 void Assembler::bal(int16_t offset) {
1229   positions_recorder()->WriteRecordedPositions();
1230   bgezal(zero_reg, offset);
1231 }
1232 
1233 
bc(int32_t offset)1234 void Assembler::bc(int32_t offset) {
1235   DCHECK(kArchVariant == kMips64r6);
1236   GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1237 }
1238 
1239 
balc(int32_t offset)1240 void Assembler::balc(int32_t offset) {
1241   DCHECK(kArchVariant == kMips64r6);
1242   positions_recorder()->WriteRecordedPositions();
1243   GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1244 }
1245 
1246 
beq(Register rs,Register rt,int16_t offset)1247 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1248   BlockTrampolinePoolScope block_trampoline_pool(this);
1249   GenInstrImmediate(BEQ, rs, rt, offset);
1250   BlockTrampolinePoolFor(1);  // For associated delay slot.
1251 }
1252 
1253 
bgez(Register rs,int16_t offset)1254 void Assembler::bgez(Register rs, int16_t offset) {
1255   BlockTrampolinePoolScope block_trampoline_pool(this);
1256   GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1257   BlockTrampolinePoolFor(1);  // For associated delay slot.
1258 }
1259 
1260 
bgezc(Register rt,int16_t offset)1261 void Assembler::bgezc(Register rt, int16_t offset) {
1262   DCHECK(kArchVariant == kMips64r6);
1263   DCHECK(!(rt.is(zero_reg)));
1264   GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1265 }
1266 
1267 
bgeuc(Register rs,Register rt,int16_t offset)1268 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1269   DCHECK(kArchVariant == kMips64r6);
1270   DCHECK(!(rs.is(zero_reg)));
1271   DCHECK(!(rt.is(zero_reg)));
1272   DCHECK(rs.code() != rt.code());
1273   GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1274 }
1275 
1276 
bgec(Register rs,Register rt,int16_t offset)1277 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1278   DCHECK(kArchVariant == kMips64r6);
1279   DCHECK(!(rs.is(zero_reg)));
1280   DCHECK(!(rt.is(zero_reg)));
1281   DCHECK(rs.code() != rt.code());
1282   GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1283 }
1284 
1285 
bgezal(Register rs,int16_t offset)1286 void Assembler::bgezal(Register rs, int16_t offset) {
1287   DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
1288   BlockTrampolinePoolScope block_trampoline_pool(this);
1289   positions_recorder()->WriteRecordedPositions();
1290   GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1291   BlockTrampolinePoolFor(1);  // For associated delay slot.
1292 }
1293 
1294 
bgtz(Register rs,int16_t offset)1295 void Assembler::bgtz(Register rs, int16_t offset) {
1296   BlockTrampolinePoolScope block_trampoline_pool(this);
1297   GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1298   BlockTrampolinePoolFor(1);  // For associated delay slot.
1299 }
1300 
1301 
bgtzc(Register rt,int16_t offset)1302 void Assembler::bgtzc(Register rt, int16_t offset) {
1303   DCHECK(kArchVariant == kMips64r6);
1304   DCHECK(!(rt.is(zero_reg)));
1305   GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1306                     CompactBranchType::COMPACT_BRANCH);
1307 }
1308 
1309 
blez(Register rs,int16_t offset)1310 void Assembler::blez(Register rs, int16_t offset) {
1311   BlockTrampolinePoolScope block_trampoline_pool(this);
1312   GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1313   BlockTrampolinePoolFor(1);  // For associated delay slot.
1314 }
1315 
1316 
blezc(Register rt,int16_t offset)1317 void Assembler::blezc(Register rt, int16_t offset) {
1318   DCHECK(kArchVariant == kMips64r6);
1319   DCHECK(!(rt.is(zero_reg)));
1320   GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1321                     CompactBranchType::COMPACT_BRANCH);
1322 }
1323 
1324 
bltzc(Register rt,int16_t offset)1325 void Assembler::bltzc(Register rt, int16_t offset) {
1326   DCHECK(kArchVariant == kMips64r6);
1327   DCHECK(!rt.is(zero_reg));
1328   GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1329 }
1330 
1331 
bltuc(Register rs,Register rt,int16_t offset)1332 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1333   DCHECK(kArchVariant == kMips64r6);
1334   DCHECK(!(rs.is(zero_reg)));
1335   DCHECK(!(rt.is(zero_reg)));
1336   DCHECK(rs.code() != rt.code());
1337   GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1338 }
1339 
1340 
bltc(Register rs,Register rt,int16_t offset)1341 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1342   DCHECK(kArchVariant == kMips64r6);
1343   DCHECK(!rs.is(zero_reg));
1344   DCHECK(!rt.is(zero_reg));
1345   DCHECK(rs.code() != rt.code());
1346   GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1347 }
1348 
1349 
bltz(Register rs,int16_t offset)1350 void Assembler::bltz(Register rs, int16_t offset) {
1351   BlockTrampolinePoolScope block_trampoline_pool(this);
1352   GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1353   BlockTrampolinePoolFor(1);  // For associated delay slot.
1354 }
1355 
1356 
bltzal(Register rs,int16_t offset)1357 void Assembler::bltzal(Register rs, int16_t offset) {
1358   DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
1359   BlockTrampolinePoolScope block_trampoline_pool(this);
1360   positions_recorder()->WriteRecordedPositions();
1361   GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1362   BlockTrampolinePoolFor(1);  // For associated delay slot.
1363 }
1364 
1365 
bne(Register rs,Register rt,int16_t offset)1366 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1367   BlockTrampolinePoolScope block_trampoline_pool(this);
1368   GenInstrImmediate(BNE, rs, rt, offset);
1369   BlockTrampolinePoolFor(1);  // For associated delay slot.
1370 }
1371 
1372 
bovc(Register rs,Register rt,int16_t offset)1373 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1374   DCHECK(kArchVariant == kMips64r6);
1375   DCHECK(!(rs.is(zero_reg)));
1376   DCHECK(rs.code() >= rt.code());
1377   GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1378 }
1379 
1380 
bnvc(Register rs,Register rt,int16_t offset)1381 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1382   DCHECK(kArchVariant == kMips64r6);
1383   DCHECK(!(rs.is(zero_reg)));
1384   DCHECK(rs.code() >= rt.code());
1385   GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1386 }
1387 
1388 
blezalc(Register rt,int16_t offset)1389 void Assembler::blezalc(Register rt, int16_t offset) {
1390   DCHECK(kArchVariant == kMips64r6);
1391   DCHECK(!(rt.is(zero_reg)));
1392   positions_recorder()->WriteRecordedPositions();
1393   GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1394                     CompactBranchType::COMPACT_BRANCH);
1395 }
1396 
1397 
bgezalc(Register rt,int16_t offset)1398 void Assembler::bgezalc(Register rt, int16_t offset) {
1399   DCHECK(kArchVariant == kMips64r6);
1400   DCHECK(!(rt.is(zero_reg)));
1401   positions_recorder()->WriteRecordedPositions();
1402   GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1403 }
1404 
1405 
bgezall(Register rs,int16_t offset)1406 void Assembler::bgezall(Register rs, int16_t offset) {
1407   DCHECK(kArchVariant != kMips64r6);
1408   DCHECK(!(rs.is(zero_reg)));
1409   BlockTrampolinePoolScope block_trampoline_pool(this);
1410   positions_recorder()->WriteRecordedPositions();
1411   GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1412   BlockTrampolinePoolFor(1);  // For associated delay slot.
1413 }
1414 
1415 
bltzalc(Register rt,int16_t offset)1416 void Assembler::bltzalc(Register rt, int16_t offset) {
1417   DCHECK(kArchVariant == kMips64r6);
1418   DCHECK(!(rt.is(zero_reg)));
1419   positions_recorder()->WriteRecordedPositions();
1420   GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1421 }
1422 
1423 
bgtzalc(Register rt,int16_t offset)1424 void Assembler::bgtzalc(Register rt, int16_t offset) {
1425   DCHECK(kArchVariant == kMips64r6);
1426   DCHECK(!(rt.is(zero_reg)));
1427   positions_recorder()->WriteRecordedPositions();
1428   GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1429                     CompactBranchType::COMPACT_BRANCH);
1430 }
1431 
1432 
beqzalc(Register rt,int16_t offset)1433 void Assembler::beqzalc(Register rt, int16_t offset) {
1434   DCHECK(kArchVariant == kMips64r6);
1435   DCHECK(!(rt.is(zero_reg)));
1436   positions_recorder()->WriteRecordedPositions();
1437   GenInstrImmediate(ADDI, zero_reg, rt, offset,
1438                     CompactBranchType::COMPACT_BRANCH);
1439 }
1440 
1441 
bnezalc(Register rt,int16_t offset)1442 void Assembler::bnezalc(Register rt, int16_t offset) {
1443   DCHECK(kArchVariant == kMips64r6);
1444   DCHECK(!(rt.is(zero_reg)));
1445   positions_recorder()->WriteRecordedPositions();
1446   GenInstrImmediate(DADDI, zero_reg, rt, offset,
1447                     CompactBranchType::COMPACT_BRANCH);
1448 }
1449 
1450 
beqc(Register rs,Register rt,int16_t offset)1451 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1452   DCHECK(kArchVariant == kMips64r6);
1453   DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1454   if (rs.code() < rt.code()) {
1455     GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1456   } else {
1457     GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1458   }
1459 }
1460 
1461 
beqzc(Register rs,int32_t offset)1462 void Assembler::beqzc(Register rs, int32_t offset) {
1463   DCHECK(kArchVariant == kMips64r6);
1464   DCHECK(!(rs.is(zero_reg)));
1465   GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
1466 }
1467 
1468 
bnec(Register rs,Register rt,int16_t offset)1469 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1470   DCHECK(kArchVariant == kMips64r6);
1471   DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1472   if (rs.code() < rt.code()) {
1473     GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1474   } else {
1475     GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1476   }
1477 }
1478 
1479 
bnezc(Register rs,int32_t offset)1480 void Assembler::bnezc(Register rs, int32_t offset) {
1481   DCHECK(kArchVariant == kMips64r6);
1482   DCHECK(!(rs.is(zero_reg)));
1483   GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
1484 }
1485 
1486 
j(int64_t target)1487 void Assembler::j(int64_t target) {
1488   BlockTrampolinePoolScope block_trampoline_pool(this);
1489   GenInstrJump(J, static_cast<uint32_t>(target >> 2) & kImm26Mask);
1490   BlockTrampolinePoolFor(1);  // For associated delay slot.
1491 }
1492 
1493 
j(Label * target)1494 void Assembler::j(Label* target) {
1495   uint64_t imm = jump_offset(target);
1496   if (target->is_bound()) {
1497     BlockTrampolinePoolScope block_trampoline_pool(this);
1498     GenInstrJump(static_cast<Opcode>(kJRawMark),
1499                  static_cast<uint32_t>(imm >> 2) & kImm26Mask);
1500     BlockTrampolinePoolFor(1);  // For associated delay slot.
1501   } else {
1502     j(imm);
1503   }
1504 }
1505 
1506 
jal(Label * target)1507 void Assembler::jal(Label* target) {
1508   uint64_t imm = jump_offset(target);
1509   if (target->is_bound()) {
1510     BlockTrampolinePoolScope block_trampoline_pool(this);
1511     positions_recorder()->WriteRecordedPositions();
1512     GenInstrJump(static_cast<Opcode>(kJalRawMark),
1513                  static_cast<uint32_t>(imm >> 2) & kImm26Mask);
1514     BlockTrampolinePoolFor(1);  // For associated delay slot.
1515   } else {
1516     jal(imm);
1517   }
1518 }
1519 
1520 
jr(Register rs)1521 void Assembler::jr(Register rs) {
1522   if (kArchVariant != kMips64r6) {
1523     BlockTrampolinePoolScope block_trampoline_pool(this);
1524     if (rs.is(ra)) {
1525       positions_recorder()->WriteRecordedPositions();
1526     }
1527     GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1528     BlockTrampolinePoolFor(1);  // For associated delay slot.
1529   } else {
1530     jalr(rs, zero_reg);
1531   }
1532 }
1533 
1534 
jal(int64_t target)1535 void Assembler::jal(int64_t target) {
1536   BlockTrampolinePoolScope block_trampoline_pool(this);
1537   positions_recorder()->WriteRecordedPositions();
1538   GenInstrJump(JAL, static_cast<uint32_t>(target >> 2) & kImm26Mask);
1539   BlockTrampolinePoolFor(1);  // For associated delay slot.
1540 }
1541 
1542 
jalr(Register rs,Register rd)1543 void Assembler::jalr(Register rs, Register rd) {
1544   DCHECK(rs.code() != rd.code());
1545   BlockTrampolinePoolScope block_trampoline_pool(this);
1546   positions_recorder()->WriteRecordedPositions();
1547   GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1548   BlockTrampolinePoolFor(1);  // For associated delay slot.
1549 }
1550 
1551 
jic(Register rt,int16_t offset)1552 void Assembler::jic(Register rt, int16_t offset) {
1553   DCHECK(kArchVariant == kMips64r6);
1554   GenInstrImmediate(POP66, zero_reg, rt, offset);
1555 }
1556 
1557 
jialc(Register rt,int16_t offset)1558 void Assembler::jialc(Register rt, int16_t offset) {
1559   DCHECK(kArchVariant == kMips64r6);
1560   positions_recorder()->WriteRecordedPositions();
1561   GenInstrImmediate(POP76, zero_reg, rt, offset);
1562 }
1563 
1564 
1565 // -------Data-processing-instructions---------
1566 
1567 // Arithmetic.
1568 
addu(Register rd,Register rs,Register rt)1569 void Assembler::addu(Register rd, Register rs, Register rt) {
1570   GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1571 }
1572 
1573 
addiu(Register rd,Register rs,int32_t j)1574 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1575   GenInstrImmediate(ADDIU, rs, rd, j);
1576 }
1577 
1578 
subu(Register rd,Register rs,Register rt)1579 void Assembler::subu(Register rd, Register rs, Register rt) {
1580   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1581 }
1582 
1583 
mul(Register rd,Register rs,Register rt)1584 void Assembler::mul(Register rd, Register rs, Register rt) {
1585   if (kArchVariant == kMips64r6) {
1586       GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1587   } else {
1588       GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1589   }
1590 }
1591 
1592 
muh(Register rd,Register rs,Register rt)1593 void Assembler::muh(Register rd, Register rs, Register rt) {
1594   DCHECK(kArchVariant == kMips64r6);
1595   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1596 }
1597 
1598 
mulu(Register rd,Register rs,Register rt)1599 void Assembler::mulu(Register rd, Register rs, Register rt) {
1600   DCHECK(kArchVariant == kMips64r6);
1601   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1602 }
1603 
1604 
muhu(Register rd,Register rs,Register rt)1605 void Assembler::muhu(Register rd, Register rs, Register rt) {
1606   DCHECK(kArchVariant == kMips64r6);
1607   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1608 }
1609 
1610 
dmul(Register rd,Register rs,Register rt)1611 void Assembler::dmul(Register rd, Register rs, Register rt) {
1612   DCHECK(kArchVariant == kMips64r6);
1613   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
1614 }
1615 
1616 
dmuh(Register rd,Register rs,Register rt)1617 void Assembler::dmuh(Register rd, Register rs, Register rt) {
1618   DCHECK(kArchVariant == kMips64r6);
1619   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
1620 }
1621 
1622 
dmulu(Register rd,Register rs,Register rt)1623 void Assembler::dmulu(Register rd, Register rs, Register rt) {
1624   DCHECK(kArchVariant == kMips64r6);
1625   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
1626 }
1627 
1628 
dmuhu(Register rd,Register rs,Register rt)1629 void Assembler::dmuhu(Register rd, Register rs, Register rt) {
1630   DCHECK(kArchVariant == kMips64r6);
1631   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
1632 }
1633 
1634 
mult(Register rs,Register rt)1635 void Assembler::mult(Register rs, Register rt) {
1636   DCHECK(kArchVariant != kMips64r6);
1637   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1638 }
1639 
1640 
multu(Register rs,Register rt)1641 void Assembler::multu(Register rs, Register rt) {
1642   DCHECK(kArchVariant != kMips64r6);
1643   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1644 }
1645 
1646 
daddiu(Register rd,Register rs,int32_t j)1647 void Assembler::daddiu(Register rd, Register rs, int32_t j) {
1648   GenInstrImmediate(DADDIU, rs, rd, j);
1649 }
1650 
1651 
div(Register rs,Register rt)1652 void Assembler::div(Register rs, Register rt) {
1653   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1654 }
1655 
1656 
div(Register rd,Register rs,Register rt)1657 void Assembler::div(Register rd, Register rs, Register rt) {
1658   DCHECK(kArchVariant == kMips64r6);
1659   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1660 }
1661 
1662 
mod(Register rd,Register rs,Register rt)1663 void Assembler::mod(Register rd, Register rs, Register rt) {
1664   DCHECK(kArchVariant == kMips64r6);
1665   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1666 }
1667 
1668 
divu(Register rs,Register rt)1669 void Assembler::divu(Register rs, Register rt) {
1670   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1671 }
1672 
1673 
divu(Register rd,Register rs,Register rt)1674 void Assembler::divu(Register rd, Register rs, Register rt) {
1675   DCHECK(kArchVariant == kMips64r6);
1676   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1677 }
1678 
1679 
modu(Register rd,Register rs,Register rt)1680 void Assembler::modu(Register rd, Register rs, Register rt) {
1681   DCHECK(kArchVariant == kMips64r6);
1682   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1683 }
1684 
1685 
daddu(Register rd,Register rs,Register rt)1686 void Assembler::daddu(Register rd, Register rs, Register rt) {
1687   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
1688 }
1689 
1690 
dsubu(Register rd,Register rs,Register rt)1691 void Assembler::dsubu(Register rd, Register rs, Register rt) {
1692   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
1693 }
1694 
1695 
dmult(Register rs,Register rt)1696 void Assembler::dmult(Register rs, Register rt) {
1697   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
1698 }
1699 
1700 
dmultu(Register rs,Register rt)1701 void Assembler::dmultu(Register rs, Register rt) {
1702   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
1703 }
1704 
1705 
ddiv(Register rs,Register rt)1706 void Assembler::ddiv(Register rs, Register rt) {
1707   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
1708 }
1709 
1710 
ddiv(Register rd,Register rs,Register rt)1711 void Assembler::ddiv(Register rd, Register rs, Register rt) {
1712   DCHECK(kArchVariant == kMips64r6);
1713   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
1714 }
1715 
1716 
dmod(Register rd,Register rs,Register rt)1717 void Assembler::dmod(Register rd, Register rs, Register rt) {
1718   DCHECK(kArchVariant == kMips64r6);
1719   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
1720 }
1721 
1722 
ddivu(Register rs,Register rt)1723 void Assembler::ddivu(Register rs, Register rt) {
1724   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
1725 }
1726 
1727 
ddivu(Register rd,Register rs,Register rt)1728 void Assembler::ddivu(Register rd, Register rs, Register rt) {
1729   DCHECK(kArchVariant == kMips64r6);
1730   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
1731 }
1732 
1733 
dmodu(Register rd,Register rs,Register rt)1734 void Assembler::dmodu(Register rd, Register rs, Register rt) {
1735   DCHECK(kArchVariant == kMips64r6);
1736   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
1737 }
1738 
1739 
1740 // Logical.
1741 
and_(Register rd,Register rs,Register rt)1742 void Assembler::and_(Register rd, Register rs, Register rt) {
1743   GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1744 }
1745 
1746 
andi(Register rt,Register rs,int32_t j)1747 void Assembler::andi(Register rt, Register rs, int32_t j) {
1748   DCHECK(is_uint16(j));
1749   GenInstrImmediate(ANDI, rs, rt, j);
1750 }
1751 
1752 
or_(Register rd,Register rs,Register rt)1753 void Assembler::or_(Register rd, Register rs, Register rt) {
1754   GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1755 }
1756 
1757 
ori(Register rt,Register rs,int32_t j)1758 void Assembler::ori(Register rt, Register rs, int32_t j) {
1759   DCHECK(is_uint16(j));
1760   GenInstrImmediate(ORI, rs, rt, j);
1761 }
1762 
1763 
xor_(Register rd,Register rs,Register rt)1764 void Assembler::xor_(Register rd, Register rs, Register rt) {
1765   GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1766 }
1767 
1768 
xori(Register rt,Register rs,int32_t j)1769 void Assembler::xori(Register rt, Register rs, int32_t j) {
1770   DCHECK(is_uint16(j));
1771   GenInstrImmediate(XORI, rs, rt, j);
1772 }
1773 
1774 
nor(Register rd,Register rs,Register rt)1775 void Assembler::nor(Register rd, Register rs, Register rt) {
1776   GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1777 }
1778 
1779 
1780 // Shifts.
sll(Register rd,Register rt,uint16_t sa,bool coming_from_nop)1781 void Assembler::sll(Register rd,
1782                     Register rt,
1783                     uint16_t sa,
1784                     bool coming_from_nop) {
1785   // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1786   // generated using the sll instruction. They must be generated using
1787   // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1788   // instructions.
1789   DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1790   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
1791 }
1792 
1793 
sllv(Register rd,Register rt,Register rs)1794 void Assembler::sllv(Register rd, Register rt, Register rs) {
1795   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1796 }
1797 
1798 
srl(Register rd,Register rt,uint16_t sa)1799 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1800   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
1801 }
1802 
1803 
srlv(Register rd,Register rt,Register rs)1804 void Assembler::srlv(Register rd, Register rt, Register rs) {
1805   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1806 }
1807 
1808 
sra(Register rd,Register rt,uint16_t sa)1809 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1810   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
1811 }
1812 
1813 
srav(Register rd,Register rt,Register rs)1814 void Assembler::srav(Register rd, Register rt, Register rs) {
1815   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1816 }
1817 
1818 
rotr(Register rd,Register rt,uint16_t sa)1819 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1820   // Should be called via MacroAssembler::Ror.
1821   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1822   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
1823   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1824       | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1825   emit(instr);
1826 }
1827 
1828 
rotrv(Register rd,Register rt,Register rs)1829 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1830   // Should be called via MacroAssembler::Ror.
1831   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1832   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
1833   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1834      | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1835   emit(instr);
1836 }
1837 
1838 
dsll(Register rd,Register rt,uint16_t sa)1839 void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
1840   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL);
1841 }
1842 
1843 
dsllv(Register rd,Register rt,Register rs)1844 void Assembler::dsllv(Register rd, Register rt, Register rs) {
1845   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
1846 }
1847 
1848 
dsrl(Register rd,Register rt,uint16_t sa)1849 void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
1850   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL);
1851 }
1852 
1853 
dsrlv(Register rd,Register rt,Register rs)1854 void Assembler::dsrlv(Register rd, Register rt, Register rs) {
1855   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
1856 }
1857 
1858 
drotr(Register rd,Register rt,uint16_t sa)1859 void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
1860   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1861   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1862       | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
1863   emit(instr);
1864 }
1865 
1866 
drotrv(Register rd,Register rt,Register rs)1867 void Assembler::drotrv(Register rd, Register rt, Register rs) {
1868   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1869   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1870       | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
1871   emit(instr);
1872 }
1873 
1874 
dsra(Register rd,Register rt,uint16_t sa)1875 void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
1876   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA);
1877 }
1878 
1879 
dsrav(Register rd,Register rt,Register rs)1880 void Assembler::dsrav(Register rd, Register rt, Register rs) {
1881   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
1882 }
1883 
1884 
dsll32(Register rd,Register rt,uint16_t sa)1885 void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
1886   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL32);
1887 }
1888 
1889 
dsrl32(Register rd,Register rt,uint16_t sa)1890 void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
1891   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL32);
1892 }
1893 
1894 
dsra32(Register rd,Register rt,uint16_t sa)1895 void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
1896   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA32);
1897 }
1898 
1899 
lsa(Register rd,Register rt,Register rs,uint8_t sa)1900 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
1901   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1902   DCHECK(sa < 5 && sa > 0);
1903   DCHECK(kArchVariant == kMips64r6);
1904   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1905                 (rd.code() << kRdShift) | (sa - 1) << kSaShift | LSA;
1906   emit(instr);
1907 }
1908 
1909 
dlsa(Register rd,Register rt,Register rs,uint8_t sa)1910 void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
1911   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1912   DCHECK(sa < 5 && sa > 0);
1913   DCHECK(kArchVariant == kMips64r6);
1914   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1915                 (rd.code() << kRdShift) | (sa - 1) << kSaShift | DLSA;
1916   emit(instr);
1917 }
1918 
1919 
1920 // ------------Memory-instructions-------------
1921 
1922 // Helper for base-reg + offset, when offset is larger than int16.
LoadRegPlusOffsetToAt(const MemOperand & src)1923 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1924   DCHECK(!src.rm().is(at));
1925   DCHECK(is_int32(src.offset_));
1926   daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask);
1927   dsll(at, at, kLuiShift);
1928   ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
1929   daddu(at, at, src.rm());  // Add base register.
1930 }
1931 
1932 
lb(Register rd,const MemOperand & rs)1933 void Assembler::lb(Register rd, const MemOperand& rs) {
1934   if (is_int16(rs.offset_)) {
1935     GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1936   } else {  // Offset > 16 bits, use multiple instructions to load.
1937     LoadRegPlusOffsetToAt(rs);
1938     GenInstrImmediate(LB, at, rd, 0);  // Equiv to lb(rd, MemOperand(at, 0));
1939   }
1940 }
1941 
1942 
lbu(Register rd,const MemOperand & rs)1943 void Assembler::lbu(Register rd, const MemOperand& rs) {
1944   if (is_int16(rs.offset_)) {
1945     GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1946   } else {  // Offset > 16 bits, use multiple instructions to load.
1947     LoadRegPlusOffsetToAt(rs);
1948     GenInstrImmediate(LBU, at, rd, 0);  // Equiv to lbu(rd, MemOperand(at, 0));
1949   }
1950 }
1951 
1952 
lh(Register rd,const MemOperand & rs)1953 void Assembler::lh(Register rd, const MemOperand& rs) {
1954   if (is_int16(rs.offset_)) {
1955     GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1956   } else {  // Offset > 16 bits, use multiple instructions to load.
1957     LoadRegPlusOffsetToAt(rs);
1958     GenInstrImmediate(LH, at, rd, 0);  // Equiv to lh(rd, MemOperand(at, 0));
1959   }
1960 }
1961 
1962 
lhu(Register rd,const MemOperand & rs)1963 void Assembler::lhu(Register rd, const MemOperand& rs) {
1964   if (is_int16(rs.offset_)) {
1965     GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1966   } else {  // Offset > 16 bits, use multiple instructions to load.
1967     LoadRegPlusOffsetToAt(rs);
1968     GenInstrImmediate(LHU, at, rd, 0);  // Equiv to lhu(rd, MemOperand(at, 0));
1969   }
1970 }
1971 
1972 
lw(Register rd,const MemOperand & rs)1973 void Assembler::lw(Register rd, const MemOperand& rs) {
1974   if (is_int16(rs.offset_)) {
1975     GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1976   } else {  // Offset > 16 bits, use multiple instructions to load.
1977     LoadRegPlusOffsetToAt(rs);
1978     GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
1979   }
1980 }
1981 
1982 
lwu(Register rd,const MemOperand & rs)1983 void Assembler::lwu(Register rd, const MemOperand& rs) {
1984   if (is_int16(rs.offset_)) {
1985     GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
1986   } else {  // Offset > 16 bits, use multiple instructions to load.
1987     LoadRegPlusOffsetToAt(rs);
1988     GenInstrImmediate(LWU, at, rd, 0);  // Equiv to lwu(rd, MemOperand(at, 0));
1989   }
1990 }
1991 
1992 
lwl(Register rd,const MemOperand & rs)1993 void Assembler::lwl(Register rd, const MemOperand& rs) {
1994   GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1995 }
1996 
1997 
lwr(Register rd,const MemOperand & rs)1998 void Assembler::lwr(Register rd, const MemOperand& rs) {
1999   GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
2000 }
2001 
2002 
sb(Register rd,const MemOperand & rs)2003 void Assembler::sb(Register rd, const MemOperand& rs) {
2004   if (is_int16(rs.offset_)) {
2005     GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
2006   } else {  // Offset > 16 bits, use multiple instructions to store.
2007     LoadRegPlusOffsetToAt(rs);
2008     GenInstrImmediate(SB, at, rd, 0);  // Equiv to sb(rd, MemOperand(at, 0));
2009   }
2010 }
2011 
2012 
sh(Register rd,const MemOperand & rs)2013 void Assembler::sh(Register rd, const MemOperand& rs) {
2014   if (is_int16(rs.offset_)) {
2015     GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
2016   } else {  // Offset > 16 bits, use multiple instructions to store.
2017     LoadRegPlusOffsetToAt(rs);
2018     GenInstrImmediate(SH, at, rd, 0);  // Equiv to sh(rd, MemOperand(at, 0));
2019   }
2020 }
2021 
2022 
sw(Register rd,const MemOperand & rs)2023 void Assembler::sw(Register rd, const MemOperand& rs) {
2024   if (is_int16(rs.offset_)) {
2025     GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
2026   } else {  // Offset > 16 bits, use multiple instructions to store.
2027     LoadRegPlusOffsetToAt(rs);
2028     GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
2029   }
2030 }
2031 
2032 
swl(Register rd,const MemOperand & rs)2033 void Assembler::swl(Register rd, const MemOperand& rs) {
2034   GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
2035 }
2036 
2037 
swr(Register rd,const MemOperand & rs)2038 void Assembler::swr(Register rd, const MemOperand& rs) {
2039   GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
2040 }
2041 
2042 
lui(Register rd,int32_t j)2043 void Assembler::lui(Register rd, int32_t j) {
2044   DCHECK(is_uint16(j));
2045   GenInstrImmediate(LUI, zero_reg, rd, j);
2046 }
2047 
2048 
aui(Register rt,Register rs,int32_t j)2049 void Assembler::aui(Register rt, Register rs, int32_t j) {
2050   // This instruction uses same opcode as 'lui'. The difference in encoding is
2051   // 'lui' has zero reg. for rs field.
2052   DCHECK(is_uint16(j));
2053   GenInstrImmediate(LUI, rs, rt, j);
2054 }
2055 
2056 
daui(Register rt,Register rs,int32_t j)2057 void Assembler::daui(Register rt, Register rs, int32_t j) {
2058   DCHECK(is_uint16(j));
2059   DCHECK(!rs.is(zero_reg));
2060   GenInstrImmediate(DAUI, rs, rt, j);
2061 }
2062 
2063 
dahi(Register rs,int32_t j)2064 void Assembler::dahi(Register rs, int32_t j) {
2065   DCHECK(is_uint16(j));
2066   GenInstrImmediate(REGIMM, rs, DAHI, j);
2067 }
2068 
2069 
dati(Register rs,int32_t j)2070 void Assembler::dati(Register rs, int32_t j) {
2071   DCHECK(is_uint16(j));
2072   GenInstrImmediate(REGIMM, rs, DATI, j);
2073 }
2074 
2075 
ldl(Register rd,const MemOperand & rs)2076 void Assembler::ldl(Register rd, const MemOperand& rs) {
2077   GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
2078 }
2079 
2080 
ldr(Register rd,const MemOperand & rs)2081 void Assembler::ldr(Register rd, const MemOperand& rs) {
2082   GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
2083 }
2084 
2085 
sdl(Register rd,const MemOperand & rs)2086 void Assembler::sdl(Register rd, const MemOperand& rs) {
2087   GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
2088 }
2089 
2090 
sdr(Register rd,const MemOperand & rs)2091 void Assembler::sdr(Register rd, const MemOperand& rs) {
2092   GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
2093 }
2094 
2095 
ld(Register rd,const MemOperand & rs)2096 void Assembler::ld(Register rd, const MemOperand& rs) {
2097   if (is_int16(rs.offset_)) {
2098     GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
2099   } else {  // Offset > 16 bits, use multiple instructions to load.
2100     LoadRegPlusOffsetToAt(rs);
2101     GenInstrImmediate(LD, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
2102   }
2103 }
2104 
2105 
sd(Register rd,const MemOperand & rs)2106 void Assembler::sd(Register rd, const MemOperand& rs) {
2107   if (is_int16(rs.offset_)) {
2108     GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
2109   } else {  // Offset > 16 bits, use multiple instructions to store.
2110     LoadRegPlusOffsetToAt(rs);
2111     GenInstrImmediate(SD, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
2112   }
2113 }
2114 
2115 
2116 // ---------PC-Relative instructions-----------
2117 
addiupc(Register rs,int32_t imm19)2118 void Assembler::addiupc(Register rs, int32_t imm19) {
2119   DCHECK(kArchVariant == kMips64r6);
2120   DCHECK(rs.is_valid() && is_int19(imm19));
2121   uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
2122   GenInstrImmediate(PCREL, rs, imm21);
2123 }
2124 
2125 
lwpc(Register rs,int32_t offset19)2126 void Assembler::lwpc(Register rs, int32_t offset19) {
2127   DCHECK(kArchVariant == kMips64r6);
2128   DCHECK(rs.is_valid() && is_int19(offset19));
2129   uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
2130   GenInstrImmediate(PCREL, rs, imm21);
2131 }
2132 
2133 
lwupc(Register rs,int32_t offset19)2134 void Assembler::lwupc(Register rs, int32_t offset19) {
2135   DCHECK(kArchVariant == kMips64r6);
2136   DCHECK(rs.is_valid() && is_int19(offset19));
2137   uint32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
2138   GenInstrImmediate(PCREL, rs, imm21);
2139 }
2140 
2141 
ldpc(Register rs,int32_t offset18)2142 void Assembler::ldpc(Register rs, int32_t offset18) {
2143   DCHECK(kArchVariant == kMips64r6);
2144   DCHECK(rs.is_valid() && is_int18(offset18));
2145   uint32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
2146   GenInstrImmediate(PCREL, rs, imm21);
2147 }
2148 
2149 
auipc(Register rs,int16_t imm16)2150 void Assembler::auipc(Register rs, int16_t imm16) {
2151   DCHECK(kArchVariant == kMips64r6);
2152   DCHECK(rs.is_valid());
2153   uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
2154   GenInstrImmediate(PCREL, rs, imm21);
2155 }
2156 
2157 
aluipc(Register rs,int16_t imm16)2158 void Assembler::aluipc(Register rs, int16_t imm16) {
2159   DCHECK(kArchVariant == kMips64r6);
2160   DCHECK(rs.is_valid());
2161   uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
2162   GenInstrImmediate(PCREL, rs, imm21);
2163 }
2164 
2165 
2166 // -------------Misc-instructions--------------
2167 
2168 // Break / Trap instructions.
break_(uint32_t code,bool break_as_stop)2169 void Assembler::break_(uint32_t code, bool break_as_stop) {
2170   DCHECK((code & ~0xfffff) == 0);
2171   // We need to invalidate breaks that could be stops as well because the
2172   // simulator expects a char pointer after the stop instruction.
2173   // See constants-mips.h for explanation.
2174   DCHECK((break_as_stop &&
2175           code <= kMaxStopCode &&
2176           code > kMaxWatchpointCode) ||
2177          (!break_as_stop &&
2178           (code > kMaxStopCode ||
2179            code <= kMaxWatchpointCode)));
2180   Instr break_instr = SPECIAL | BREAK | (code << 6);
2181   emit(break_instr);
2182 }
2183 
2184 
stop(const char * msg,uint32_t code)2185 void Assembler::stop(const char* msg, uint32_t code) {
2186   DCHECK(code > kMaxWatchpointCode);
2187   DCHECK(code <= kMaxStopCode);
2188 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
2189   break_(0x54321);
2190 #else  // V8_HOST_ARCH_MIPS
2191   BlockTrampolinePoolFor(3);
2192   // The Simulator will handle the stop instruction and get the message address.
2193   // On MIPS stop() is just a special kind of break_().
2194   break_(code, true);
2195   emit(reinterpret_cast<uint64_t>(msg));
2196 #endif
2197 }
2198 
2199 
tge(Register rs,Register rt,uint16_t code)2200 void Assembler::tge(Register rs, Register rt, uint16_t code) {
2201   DCHECK(is_uint10(code));
2202   Instr instr = SPECIAL | TGE | rs.code() << kRsShift
2203       | rt.code() << kRtShift | code << 6;
2204   emit(instr);
2205 }
2206 
2207 
tgeu(Register rs,Register rt,uint16_t code)2208 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2209   DCHECK(is_uint10(code));
2210   Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
2211       | rt.code() << kRtShift | code << 6;
2212   emit(instr);
2213 }
2214 
2215 
tlt(Register rs,Register rt,uint16_t code)2216 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2217   DCHECK(is_uint10(code));
2218   Instr instr =
2219       SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2220   emit(instr);
2221 }
2222 
2223 
tltu(Register rs,Register rt,uint16_t code)2224 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2225   DCHECK(is_uint10(code));
2226   Instr instr =
2227       SPECIAL | TLTU | rs.code() << kRsShift
2228       | rt.code() << kRtShift | code << 6;
2229   emit(instr);
2230 }
2231 
2232 
teq(Register rs,Register rt,uint16_t code)2233 void Assembler::teq(Register rs, Register rt, uint16_t code) {
2234   DCHECK(is_uint10(code));
2235   Instr instr =
2236       SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2237   emit(instr);
2238 }
2239 
2240 
tne(Register rs,Register rt,uint16_t code)2241 void Assembler::tne(Register rs, Register rt, uint16_t code) {
2242   DCHECK(is_uint10(code));
2243   Instr instr =
2244       SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2245   emit(instr);
2246 }
2247 
2248 
2249 // Move from HI/LO register.
2250 
mfhi(Register rd)2251 void Assembler::mfhi(Register rd) {
2252   GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2253 }
2254 
2255 
mflo(Register rd)2256 void Assembler::mflo(Register rd) {
2257   GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2258 }
2259 
2260 
2261 // Set on less than instructions.
slt(Register rd,Register rs,Register rt)2262 void Assembler::slt(Register rd, Register rs, Register rt) {
2263   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2264 }
2265 
2266 
sltu(Register rd,Register rs,Register rt)2267 void Assembler::sltu(Register rd, Register rs, Register rt) {
2268   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2269 }
2270 
2271 
slti(Register rt,Register rs,int32_t j)2272 void Assembler::slti(Register rt, Register rs, int32_t j) {
2273   GenInstrImmediate(SLTI, rs, rt, j);
2274 }
2275 
2276 
sltiu(Register rt,Register rs,int32_t j)2277 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2278   GenInstrImmediate(SLTIU, rs, rt, j);
2279 }
2280 
2281 
2282 // Conditional move.
movz(Register rd,Register rs,Register rt)2283 void Assembler::movz(Register rd, Register rs, Register rt) {
2284   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2285 }
2286 
2287 
movn(Register rd,Register rs,Register rt)2288 void Assembler::movn(Register rd, Register rs, Register rt) {
2289   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2290 }
2291 
2292 
movt(Register rd,Register rs,uint16_t cc)2293 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2294   Register rt;
2295   rt.reg_code = (cc & 0x0007) << 2 | 1;
2296   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2297 }
2298 
2299 
movf(Register rd,Register rs,uint16_t cc)2300 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2301   Register rt;
2302   rt.reg_code = (cc & 0x0007) << 2 | 0;
2303   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2304 }
2305 
2306 
min_s(FPURegister fd,FPURegister fs,FPURegister ft)2307 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2308   min(S, fd, fs, ft);
2309 }
2310 
2311 
min_d(FPURegister fd,FPURegister fs,FPURegister ft)2312 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2313   min(D, fd, fs, ft);
2314 }
2315 
2316 
max_s(FPURegister fd,FPURegister fs,FPURegister ft)2317 void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2318   max(S, fd, fs, ft);
2319 }
2320 
2321 
max_d(FPURegister fd,FPURegister fs,FPURegister ft)2322 void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2323   max(D, fd, fs, ft);
2324 }
2325 
2326 
mina_s(FPURegister fd,FPURegister fs,FPURegister ft)2327 void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2328   mina(S, fd, fs, ft);
2329 }
2330 
2331 
mina_d(FPURegister fd,FPURegister fs,FPURegister ft)2332 void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2333   mina(D, fd, fs, ft);
2334 }
2335 
2336 
maxa_s(FPURegister fd,FPURegister fs,FPURegister ft)2337 void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2338   maxa(S, fd, fs, ft);
2339 }
2340 
2341 
maxa_d(FPURegister fd,FPURegister fs,FPURegister ft)2342 void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2343   maxa(D, fd, fs, ft);
2344 }
2345 
2346 
max(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2347 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
2348                     FPURegister ft) {
2349   DCHECK(kArchVariant == kMips64r6);
2350   DCHECK((fmt == D) || (fmt == S));
2351   GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2352 }
2353 
2354 
min(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2355 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
2356                     FPURegister ft) {
2357   DCHECK(kArchVariant == kMips64r6);
2358   DCHECK((fmt == D) || (fmt == S));
2359   GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2360 }
2361 
2362 
2363 // GPR.
seleqz(Register rd,Register rs,Register rt)2364 void Assembler::seleqz(Register rd, Register rs, Register rt) {
2365   DCHECK(kArchVariant == kMips64r6);
2366   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2367 }
2368 
2369 
2370 // GPR.
selnez(Register rd,Register rs,Register rt)2371 void Assembler::selnez(Register rd, Register rs, Register rt) {
2372   DCHECK(kArchVariant == kMips64r6);
2373   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2374 }
2375 
2376 
2377 // Bit twiddling.
clz(Register rd,Register rs)2378 void Assembler::clz(Register rd, Register rs) {
2379   if (kArchVariant != kMips64r6) {
2380     // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2381     GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2382   } else {
2383     GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2384   }
2385 }
2386 
2387 
dclz(Register rd,Register rs)2388 void Assembler::dclz(Register rd, Register rs) {
2389   if (kArchVariant != kMips64r6) {
2390     // dclz instr requires same GPR number in 'rd' and 'rt' fields.
2391     GenInstrRegister(SPECIAL2, rs, rd, rd, 0, DCLZ);
2392   } else {
2393     GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, DCLZ_R6);
2394   }
2395 }
2396 
2397 
ins_(Register rt,Register rs,uint16_t pos,uint16_t size)2398 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2399   // Should be called via MacroAssembler::Ins.
2400   // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2401   DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
2402   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2403 }
2404 
2405 
dins_(Register rt,Register rs,uint16_t pos,uint16_t size)2406 void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2407   // Should be called via MacroAssembler::Dins.
2408   // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
2409   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2410   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, DINS);
2411 }
2412 
2413 
ext_(Register rt,Register rs,uint16_t pos,uint16_t size)2414 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2415   // Should be called via MacroAssembler::Ext.
2416   // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2417   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2418   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2419 }
2420 
2421 
dext_(Register rt,Register rs,uint16_t pos,uint16_t size)2422 void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2423   // Should be called via MacroAssembler::Dext.
2424   // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
2425   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2426   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
2427 }
2428 
2429 
dextm(Register rt,Register rs,uint16_t pos,uint16_t size)2430 void Assembler::dextm(Register rt, Register rs, uint16_t pos, uint16_t size) {
2431   // Should be called via MacroAssembler::Dextm.
2432   // Dextm instr has 'rt' field as dest, and two uint5: msb, lsb.
2433   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2434   GenInstrRegister(SPECIAL3, rs, rt, size - 1 - 32, pos, DEXTM);
2435 }
2436 
2437 
dextu(Register rt,Register rs,uint16_t pos,uint16_t size)2438 void Assembler::dextu(Register rt, Register rs, uint16_t pos, uint16_t size) {
2439   // Should be called via MacroAssembler::Dextu.
2440   // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
2441   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2442   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
2443 }
2444 
2445 
bitswap(Register rd,Register rt)2446 void Assembler::bitswap(Register rd, Register rt) {
2447   DCHECK(kArchVariant == kMips64r6);
2448   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2449 }
2450 
2451 
dbitswap(Register rd,Register rt)2452 void Assembler::dbitswap(Register rd, Register rt) {
2453   DCHECK(kArchVariant == kMips64r6);
2454   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, DBSHFL);
2455 }
2456 
2457 
pref(int32_t hint,const MemOperand & rs)2458 void Assembler::pref(int32_t hint, const MemOperand& rs) {
2459   DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2460   Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2461       | (rs.offset_);
2462   emit(instr);
2463 }
2464 
2465 
align(Register rd,Register rs,Register rt,uint8_t bp)2466 void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2467   DCHECK(kArchVariant == kMips64r6);
2468   DCHECK(is_uint3(bp));
2469   uint16_t sa = (ALIGN << kBp2Bits) | bp;
2470   GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2471 }
2472 
2473 
dalign(Register rd,Register rs,Register rt,uint8_t bp)2474 void Assembler::dalign(Register rd, Register rs, Register rt, uint8_t bp) {
2475   DCHECK(kArchVariant == kMips64r6);
2476   DCHECK(is_uint3(bp));
2477   uint16_t sa = (DALIGN << kBp3Bits) | bp;
2478   GenInstrRegister(SPECIAL3, rs, rt, rd, sa, DBSHFL);
2479 }
2480 
2481 
2482 // --------Coprocessor-instructions----------------
2483 
2484 // Load, store, move.
lwc1(FPURegister fd,const MemOperand & src)2485 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2486   if (is_int16(src.offset_)) {
2487     GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2488   } else {  // Offset > 16 bits, use multiple instructions to load.
2489     LoadRegPlusOffsetToAt(src);
2490     GenInstrImmediate(LWC1, at, fd, 0);
2491   }
2492 }
2493 
2494 
ldc1(FPURegister fd,const MemOperand & src)2495 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
2496   DCHECK(!src.rm().is(at));
2497   if (is_int16(src.offset_)) {
2498     GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
2499   } else {  // Offset > 16 bits, use multiple instructions to load.
2500     LoadRegPlusOffsetToAt(src);
2501     GenInstrImmediate(LDC1, at, fd, 0);
2502   }
2503 }
2504 
2505 
swc1(FPURegister fd,const MemOperand & src)2506 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
2507   if (is_int16(src.offset_)) {
2508     GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
2509   } else {  // Offset > 16 bits, use multiple instructions to load.
2510     LoadRegPlusOffsetToAt(src);
2511     GenInstrImmediate(SWC1, at, fd, 0);
2512   }
2513 }
2514 
2515 
sdc1(FPURegister fd,const MemOperand & src)2516 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
2517   DCHECK(!src.rm().is(at));
2518   if (is_int16(src.offset_)) {
2519     GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
2520   } else {  // Offset > 16 bits, use multiple instructions to load.
2521     LoadRegPlusOffsetToAt(src);
2522     GenInstrImmediate(SDC1, at, fd, 0);
2523   }
2524 }
2525 
2526 
mtc1(Register rt,FPURegister fs)2527 void Assembler::mtc1(Register rt, FPURegister fs) {
2528   GenInstrRegister(COP1, MTC1, rt, fs, f0);
2529 }
2530 
2531 
mthc1(Register rt,FPURegister fs)2532 void Assembler::mthc1(Register rt, FPURegister fs) {
2533   GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2534 }
2535 
2536 
dmtc1(Register rt,FPURegister fs)2537 void Assembler::dmtc1(Register rt, FPURegister fs) {
2538   GenInstrRegister(COP1, DMTC1, rt, fs, f0);
2539 }
2540 
2541 
mfc1(Register rt,FPURegister fs)2542 void Assembler::mfc1(Register rt, FPURegister fs) {
2543   GenInstrRegister(COP1, MFC1, rt, fs, f0);
2544 }
2545 
2546 
mfhc1(Register rt,FPURegister fs)2547 void Assembler::mfhc1(Register rt, FPURegister fs) {
2548   GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2549 }
2550 
2551 
dmfc1(Register rt,FPURegister fs)2552 void Assembler::dmfc1(Register rt, FPURegister fs) {
2553   GenInstrRegister(COP1, DMFC1, rt, fs, f0);
2554 }
2555 
2556 
ctc1(Register rt,FPUControlRegister fs)2557 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2558   GenInstrRegister(COP1, CTC1, rt, fs);
2559 }
2560 
2561 
cfc1(Register rt,FPUControlRegister fs)2562 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2563   GenInstrRegister(COP1, CFC1, rt, fs);
2564 }
2565 
2566 
DoubleAsTwoUInt32(double d,uint32_t * lo,uint32_t * hi)2567 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2568   uint64_t i;
2569   memcpy(&i, &d, 8);
2570 
2571   *lo = i & 0xffffffff;
2572   *hi = i >> 32;
2573 }
2574 
2575 
sel(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2576 void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
2577                     FPURegister ft) {
2578   DCHECK(kArchVariant == kMips64r6);
2579   DCHECK((fmt == D) || (fmt == S));
2580 
2581   GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
2582 }
2583 
2584 
sel_s(FPURegister fd,FPURegister fs,FPURegister ft)2585 void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2586   sel(S, fd, fs, ft);
2587 }
2588 
2589 
sel_d(FPURegister fd,FPURegister fs,FPURegister ft)2590 void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2591   sel(D, fd, fs, ft);
2592 }
2593 
2594 
2595 // FPR.
seleqz(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2596 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
2597                        FPURegister ft) {
2598   DCHECK((fmt == D) || (fmt == S));
2599   GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
2600 }
2601 
2602 
seleqz_d(FPURegister fd,FPURegister fs,FPURegister ft)2603 void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2604   seleqz(D, fd, fs, ft);
2605 }
2606 
2607 
seleqz_s(FPURegister fd,FPURegister fs,FPURegister ft)2608 void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2609   seleqz(S, fd, fs, ft);
2610 }
2611 
2612 
selnez_d(FPURegister fd,FPURegister fs,FPURegister ft)2613 void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2614   selnez(D, fd, fs, ft);
2615 }
2616 
2617 
selnez_s(FPURegister fd,FPURegister fs,FPURegister ft)2618 void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2619   selnez(S, fd, fs, ft);
2620 }
2621 
2622 
movz_s(FPURegister fd,FPURegister fs,Register rt)2623 void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
2624   DCHECK(kArchVariant == kMips64r2);
2625   GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
2626 }
2627 
2628 
movz_d(FPURegister fd,FPURegister fs,Register rt)2629 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
2630   DCHECK(kArchVariant == kMips64r2);
2631   GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
2632 }
2633 
2634 
movt_s(FPURegister fd,FPURegister fs,uint16_t cc)2635 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2636   DCHECK(kArchVariant == kMips64r2);
2637   FPURegister ft;
2638   ft.reg_code = (cc & 0x0007) << 2 | 1;
2639   GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2640 }
2641 
2642 
movt_d(FPURegister fd,FPURegister fs,uint16_t cc)2643 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2644   DCHECK(kArchVariant == kMips64r2);
2645   FPURegister ft;
2646   ft.reg_code = (cc & 0x0007) << 2 | 1;
2647   GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2648 }
2649 
2650 
movf_s(FPURegister fd,FPURegister fs,uint16_t cc)2651 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2652   DCHECK(kArchVariant == kMips64r2);
2653   FPURegister ft;
2654   ft.reg_code = (cc & 0x0007) << 2 | 0;
2655   GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2656 }
2657 
2658 
movf_d(FPURegister fd,FPURegister fs,uint16_t cc)2659 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2660   DCHECK(kArchVariant == kMips64r2);
2661   FPURegister ft;
2662   ft.reg_code = (cc & 0x0007) << 2 | 0;
2663   GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2664 }
2665 
2666 
movn_s(FPURegister fd,FPURegister fs,Register rt)2667 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
2668   DCHECK(kArchVariant == kMips64r2);
2669   GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
2670 }
2671 
2672 
movn_d(FPURegister fd,FPURegister fs,Register rt)2673 void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
2674   DCHECK(kArchVariant == kMips64r2);
2675   GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
2676 }
2677 
2678 
2679 // FPR.
selnez(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2680 void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
2681                        FPURegister ft) {
2682   DCHECK(kArchVariant == kMips64r6);
2683   DCHECK((fmt == D) || (fmt == S));
2684   GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
2685 }
2686 
2687 
2688 // Arithmetic.
2689 
add_s(FPURegister fd,FPURegister fs,FPURegister ft)2690 void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2691   GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
2692 }
2693 
2694 
add_d(FPURegister fd,FPURegister fs,FPURegister ft)2695 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2696   GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2697 }
2698 
2699 
sub_s(FPURegister fd,FPURegister fs,FPURegister ft)2700 void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2701   GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
2702 }
2703 
2704 
sub_d(FPURegister fd,FPURegister fs,FPURegister ft)2705 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2706   GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2707 }
2708 
2709 
mul_s(FPURegister fd,FPURegister fs,FPURegister ft)2710 void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2711   GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
2712 }
2713 
2714 
mul_d(FPURegister fd,FPURegister fs,FPURegister ft)2715 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2716   GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2717 }
2718 
2719 
madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2720 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2721     FPURegister ft) {
2722   GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2723 }
2724 
2725 
div_s(FPURegister fd,FPURegister fs,FPURegister ft)2726 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2727   GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
2728 }
2729 
2730 
div_d(FPURegister fd,FPURegister fs,FPURegister ft)2731 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2732   GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2733 }
2734 
2735 
abs_s(FPURegister fd,FPURegister fs)2736 void Assembler::abs_s(FPURegister fd, FPURegister fs) {
2737   GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
2738 }
2739 
2740 
abs_d(FPURegister fd,FPURegister fs)2741 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2742   GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2743 }
2744 
2745 
mov_d(FPURegister fd,FPURegister fs)2746 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2747   GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2748 }
2749 
2750 
mov_s(FPURegister fd,FPURegister fs)2751 void Assembler::mov_s(FPURegister fd, FPURegister fs) {
2752   GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
2753 }
2754 
2755 
neg_s(FPURegister fd,FPURegister fs)2756 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
2757   GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
2758 }
2759 
2760 
neg_d(FPURegister fd,FPURegister fs)2761 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2762   GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2763 }
2764 
2765 
sqrt_s(FPURegister fd,FPURegister fs)2766 void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
2767   GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
2768 }
2769 
2770 
sqrt_d(FPURegister fd,FPURegister fs)2771 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2772   GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2773 }
2774 
2775 
rsqrt_s(FPURegister fd,FPURegister fs)2776 void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
2777   GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
2778 }
2779 
2780 
rsqrt_d(FPURegister fd,FPURegister fs)2781 void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
2782   GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
2783 }
2784 
2785 
recip_d(FPURegister fd,FPURegister fs)2786 void Assembler::recip_d(FPURegister fd, FPURegister fs) {
2787   GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
2788 }
2789 
2790 
recip_s(FPURegister fd,FPURegister fs)2791 void Assembler::recip_s(FPURegister fd, FPURegister fs) {
2792   GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
2793 }
2794 
2795 
2796 // Conversions.
cvt_w_s(FPURegister fd,FPURegister fs)2797 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2798   GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2799 }
2800 
2801 
cvt_w_d(FPURegister fd,FPURegister fs)2802 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2803   GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2804 }
2805 
2806 
trunc_w_s(FPURegister fd,FPURegister fs)2807 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2808   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2809 }
2810 
2811 
trunc_w_d(FPURegister fd,FPURegister fs)2812 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2813   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2814 }
2815 
2816 
round_w_s(FPURegister fd,FPURegister fs)2817 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2818   GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2819 }
2820 
2821 
round_w_d(FPURegister fd,FPURegister fs)2822 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2823   GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2824 }
2825 
2826 
floor_w_s(FPURegister fd,FPURegister fs)2827 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2828   GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2829 }
2830 
2831 
floor_w_d(FPURegister fd,FPURegister fs)2832 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2833   GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2834 }
2835 
2836 
ceil_w_s(FPURegister fd,FPURegister fs)2837 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2838   GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2839 }
2840 
2841 
ceil_w_d(FPURegister fd,FPURegister fs)2842 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2843   GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2844 }
2845 
2846 
rint_s(FPURegister fd,FPURegister fs)2847 void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
2848 
2849 
rint_d(FPURegister fd,FPURegister fs)2850 void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
2851 
2852 
rint(SecondaryField fmt,FPURegister fd,FPURegister fs)2853 void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
2854   DCHECK(kArchVariant == kMips64r6);
2855   GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
2856 }
2857 
2858 
cvt_l_s(FPURegister fd,FPURegister fs)2859 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2860   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2861   GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2862 }
2863 
2864 
cvt_l_d(FPURegister fd,FPURegister fs)2865 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2866   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2867   GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2868 }
2869 
2870 
trunc_l_s(FPURegister fd,FPURegister fs)2871 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2872   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2873   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2874 }
2875 
2876 
trunc_l_d(FPURegister fd,FPURegister fs)2877 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2878   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2879   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2880 }
2881 
2882 
round_l_s(FPURegister fd,FPURegister fs)2883 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2884   GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2885 }
2886 
2887 
round_l_d(FPURegister fd,FPURegister fs)2888 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2889   GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2890 }
2891 
2892 
floor_l_s(FPURegister fd,FPURegister fs)2893 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2894   GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2895 }
2896 
2897 
floor_l_d(FPURegister fd,FPURegister fs)2898 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
2899   GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
2900 }
2901 
2902 
ceil_l_s(FPURegister fd,FPURegister fs)2903 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2904   GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2905 }
2906 
2907 
ceil_l_d(FPURegister fd,FPURegister fs)2908 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2909   GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2910 }
2911 
2912 
class_s(FPURegister fd,FPURegister fs)2913 void Assembler::class_s(FPURegister fd, FPURegister fs) {
2914   DCHECK(kArchVariant == kMips64r6);
2915   GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
2916 }
2917 
2918 
class_d(FPURegister fd,FPURegister fs)2919 void Assembler::class_d(FPURegister fd, FPURegister fs) {
2920   DCHECK(kArchVariant == kMips64r6);
2921   GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
2922 }
2923 
2924 
mina(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2925 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
2926                      FPURegister ft) {
2927   DCHECK(kArchVariant == kMips64r6);
2928   DCHECK((fmt == D) || (fmt == S));
2929   GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2930 }
2931 
2932 
maxa(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2933 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
2934                      FPURegister ft) {
2935   DCHECK(kArchVariant == kMips64r6);
2936   DCHECK((fmt == D) || (fmt == S));
2937   GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2938 }
2939 
2940 
cvt_s_w(FPURegister fd,FPURegister fs)2941 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
2942   GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
2943 }
2944 
2945 
cvt_s_l(FPURegister fd,FPURegister fs)2946 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
2947   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2948   GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
2949 }
2950 
2951 
cvt_s_d(FPURegister fd,FPURegister fs)2952 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
2953   GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
2954 }
2955 
2956 
cvt_d_w(FPURegister fd,FPURegister fs)2957 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
2958   GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
2959 }
2960 
2961 
cvt_d_l(FPURegister fd,FPURegister fs)2962 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
2963   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2964   GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
2965 }
2966 
2967 
cvt_d_s(FPURegister fd,FPURegister fs)2968 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
2969   GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
2970 }
2971 
2972 
2973 // Conditions for >= MIPSr6.
cmp(FPUCondition cond,SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2974 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
2975     FPURegister fd, FPURegister fs, FPURegister ft) {
2976   DCHECK(kArchVariant == kMips64r6);
2977   DCHECK((fmt & ~(31 << kRsShift)) == 0);
2978   Instr instr = COP1 | fmt | ft.code() << kFtShift |
2979       fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
2980   emit(instr);
2981 }
2982 
2983 
cmp_s(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)2984 void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
2985                       FPURegister ft) {
2986   cmp(cond, W, fd, fs, ft);
2987 }
2988 
cmp_d(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)2989 void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
2990                       FPURegister ft) {
2991   cmp(cond, L, fd, fs, ft);
2992 }
2993 
2994 
bc1eqz(int16_t offset,FPURegister ft)2995 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
2996   DCHECK(kArchVariant == kMips64r6);
2997   Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
2998   emit(instr);
2999 }
3000 
3001 
bc1nez(int16_t offset,FPURegister ft)3002 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
3003   DCHECK(kArchVariant == kMips64r6);
3004   Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
3005   emit(instr);
3006 }
3007 
3008 
3009 // Conditions for < MIPSr6.
c(FPUCondition cond,SecondaryField fmt,FPURegister fs,FPURegister ft,uint16_t cc)3010 void Assembler::c(FPUCondition cond, SecondaryField fmt,
3011     FPURegister fs, FPURegister ft, uint16_t cc) {
3012   DCHECK(kArchVariant != kMips64r6);
3013   DCHECK(is_uint3(cc));
3014   DCHECK(fmt == S || fmt == D);
3015   DCHECK((fmt & ~(31 << kRsShift)) == 0);
3016   Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
3017       | cc << 8 | 3 << 4 | cond;
3018   emit(instr);
3019 }
3020 
3021 
c_s(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)3022 void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
3023                     uint16_t cc) {
3024   c(cond, S, fs, ft, cc);
3025 }
3026 
3027 
c_d(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)3028 void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
3029                     uint16_t cc) {
3030   c(cond, D, fs, ft, cc);
3031 }
3032 
3033 
fcmp(FPURegister src1,const double src2,FPUCondition cond)3034 void Assembler::fcmp(FPURegister src1, const double src2,
3035       FPUCondition cond) {
3036   DCHECK(src2 == 0.0);
3037   mtc1(zero_reg, f14);
3038   cvt_d_w(f14, f14);
3039   c(cond, D, src1, f14, 0);
3040 }
3041 
3042 
bc1f(int16_t offset,uint16_t cc)3043 void Assembler::bc1f(int16_t offset, uint16_t cc) {
3044   DCHECK(is_uint3(cc));
3045   Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
3046   emit(instr);
3047 }
3048 
3049 
bc1t(int16_t offset,uint16_t cc)3050 void Assembler::bc1t(int16_t offset, uint16_t cc) {
3051   DCHECK(is_uint3(cc));
3052   Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
3053   emit(instr);
3054 }
3055 
3056 
RelocateInternalReference(RelocInfo::Mode rmode,byte * pc,intptr_t pc_delta)3057 int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
3058                                          intptr_t pc_delta) {
3059   if (RelocInfo::IsInternalReference(rmode)) {
3060     int64_t* p = reinterpret_cast<int64_t*>(pc);
3061     if (*p == kEndOfJumpChain) {
3062       return 0;  // Number of instructions patched.
3063     }
3064     *p += pc_delta;
3065     return 2;  // Number of instructions patched.
3066   }
3067   Instr instr = instr_at(pc);
3068   DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
3069   if (IsLui(instr)) {
3070     Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
3071     Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
3072     Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
3073     DCHECK(IsOri(instr_ori));
3074     DCHECK(IsOri(instr_ori2));
3075     // TODO(plind): symbolic names for the shifts.
3076     int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
3077     imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
3078     imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
3079     // Sign extend address.
3080     imm >>= 16;
3081 
3082     if (imm == kEndOfJumpChain) {
3083       return 0;  // Number of instructions patched.
3084     }
3085     imm += pc_delta;
3086     DCHECK((imm & 3) == 0);
3087 
3088     instr_lui &= ~kImm16Mask;
3089     instr_ori &= ~kImm16Mask;
3090     instr_ori2 &= ~kImm16Mask;
3091 
3092     instr_at_put(pc + 0 * Assembler::kInstrSize,
3093                  instr_lui | ((imm >> 32) & kImm16Mask));
3094     instr_at_put(pc + 1 * Assembler::kInstrSize,
3095                  instr_ori | (imm >> 16 & kImm16Mask));
3096     instr_at_put(pc + 3 * Assembler::kInstrSize,
3097                  instr_ori2 | (imm & kImm16Mask));
3098     return 4;  // Number of instructions patched.
3099   } else if (IsJ(instr) || IsJal(instr)) {
3100     // Regular j/jal relocation.
3101     uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
3102     imm28 += pc_delta;
3103     imm28 &= kImm28Mask;
3104     instr &= ~kImm26Mask;
3105     DCHECK((imm28 & 3) == 0);
3106     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
3107     instr_at_put(pc, instr | (imm26 & kImm26Mask));
3108     return 1;  // Number of instructions patched.
3109   } else {
3110     DCHECK(((instr & kJumpRawMask) == kJRawMark) ||
3111            ((instr & kJumpRawMask) == kJalRawMark));
3112     // Unbox raw offset and emit j/jal.
3113     int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
3114     // Sign extend 28-bit offset to 32-bit.
3115     imm28 = (imm28 << 4) >> 4;
3116     uint64_t target =
3117         static_cast<int64_t>(imm28) + reinterpret_cast<uint64_t>(pc);
3118     target &= kImm28Mask;
3119     DCHECK((imm28 & 3) == 0);
3120     uint32_t imm26 = static_cast<uint32_t>(target >> 2);
3121     // Check markings whether to emit j or jal.
3122     uint32_t unbox = (instr & kJRawMark) ? J : JAL;
3123     instr_at_put(pc, unbox | (imm26 & kImm26Mask));
3124     return 1;  // Number of instructions patched.
3125   }
3126 }
3127 
3128 
GrowBuffer()3129 void Assembler::GrowBuffer() {
3130   if (!own_buffer_) FATAL("external code buffer is too small");
3131 
3132   // Compute new buffer size.
3133   CodeDesc desc;  // The new buffer.
3134   if (buffer_size_ < 1 * MB) {
3135     desc.buffer_size = 2*buffer_size_;
3136   } else {
3137     desc.buffer_size = buffer_size_ + 1*MB;
3138   }
3139   CHECK_GT(desc.buffer_size, 0);  // No overflow.
3140 
3141   // Set up new buffer.
3142   desc.buffer = NewArray<byte>(desc.buffer_size);
3143   desc.origin = this;
3144 
3145   desc.instr_size = pc_offset();
3146   desc.reloc_size =
3147       static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
3148 
3149   // Copy the data.
3150   intptr_t pc_delta = desc.buffer - buffer_;
3151   intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
3152       (buffer_ + buffer_size_);
3153   MemMove(desc.buffer, buffer_, desc.instr_size);
3154   MemMove(reloc_info_writer.pos() + rc_delta,
3155               reloc_info_writer.pos(), desc.reloc_size);
3156 
3157   // Switch buffers.
3158   DeleteArray(buffer_);
3159   buffer_ = desc.buffer;
3160   buffer_size_ = desc.buffer_size;
3161   pc_ += pc_delta;
3162   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3163                                reloc_info_writer.last_pc() + pc_delta);
3164 
3165   // Relocate runtime entries.
3166   for (RelocIterator it(desc); !it.done(); it.next()) {
3167     RelocInfo::Mode rmode = it.rinfo()->rmode();
3168     if (rmode == RelocInfo::INTERNAL_REFERENCE) {
3169       byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
3170       RelocateInternalReference(rmode, p, pc_delta);
3171     }
3172   }
3173   DCHECK(!overflow());
3174 }
3175 
3176 
db(uint8_t data)3177 void Assembler::db(uint8_t data) {
3178   CheckForEmitInForbiddenSlot();
3179   EmitHelper(data);
3180 }
3181 
3182 
dd(uint32_t data)3183 void Assembler::dd(uint32_t data) {
3184   CheckForEmitInForbiddenSlot();
3185   EmitHelper(data);
3186 }
3187 
3188 
dq(uint64_t data)3189 void Assembler::dq(uint64_t data) {
3190   CheckForEmitInForbiddenSlot();
3191   EmitHelper(data);
3192 }
3193 
3194 
dd(Label * label)3195 void Assembler::dd(Label* label) {
3196   uint64_t data;
3197   CheckForEmitInForbiddenSlot();
3198   if (label->is_bound()) {
3199     data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
3200   } else {
3201     data = jump_address(label);
3202     internal_reference_positions_.insert(label->pos());
3203   }
3204   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3205   EmitHelper(data);
3206 }
3207 
3208 
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)3209 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3210   // We do not try to reuse pool constants.
3211   RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
3212   if (rmode >= RelocInfo::COMMENT &&
3213       rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL) {
3214     // Adjust code for new modes.
3215     DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
3216            || RelocInfo::IsComment(rmode)
3217            || RelocInfo::IsPosition(rmode));
3218     // These modes do not need an entry in the constant pool.
3219   }
3220   if (!RelocInfo::IsNone(rinfo.rmode())) {
3221     // Don't record external references unless the heap will be serialized.
3222     if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
3223         !serializer_enabled() && !emit_debug_code()) {
3224       return;
3225     }
3226     DCHECK(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
3227     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
3228       RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode,
3229                                        RecordedAstId().ToInt(), NULL);
3230       ClearRecordedAstId();
3231       reloc_info_writer.Write(&reloc_info_with_ast_id);
3232     } else {
3233       reloc_info_writer.Write(&rinfo);
3234     }
3235   }
3236 }
3237 
3238 
BlockTrampolinePoolFor(int instructions)3239 void Assembler::BlockTrampolinePoolFor(int instructions) {
3240   CheckTrampolinePoolQuick(instructions);
3241   BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
3242 }
3243 
3244 
CheckTrampolinePool()3245 void Assembler::CheckTrampolinePool() {
3246   // Some small sequences of instructions must not be broken up by the
3247   // insertion of a trampoline pool; such sequences are protected by setting
3248   // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
3249   // which are both checked here. Also, recursive calls to CheckTrampolinePool
3250   // are blocked by trampoline_pool_blocked_nesting_.
3251   if ((trampoline_pool_blocked_nesting_ > 0) ||
3252       (pc_offset() < no_trampoline_pool_before_)) {
3253     // Emission is currently blocked; make sure we try again as soon as
3254     // possible.
3255     if (trampoline_pool_blocked_nesting_ > 0) {
3256       next_buffer_check_ = pc_offset() + kInstrSize;
3257     } else {
3258       next_buffer_check_ = no_trampoline_pool_before_;
3259     }
3260     return;
3261   }
3262 
3263   DCHECK(!trampoline_emitted_);
3264   DCHECK(unbound_labels_count_ >= 0);
3265   if (unbound_labels_count_ > 0) {
3266     // First we emit jump (2 instructions), then we emit trampoline pool.
3267     { BlockTrampolinePoolScope block_trampoline_pool(this);
3268       Label after_pool;
3269       if (kArchVariant == kMips64r6) {
3270         bc(&after_pool);
3271       } else {
3272         b(&after_pool);
3273         nop();
3274       }
3275 
3276       EmitForbiddenSlotInstruction();
3277       int pool_start = pc_offset();
3278       for (int i = 0; i < unbound_labels_count_; i++) {
3279         { BlockGrowBufferScope block_buf_growth(this);
3280           // Buffer growth (and relocation) must be blocked for internal
3281           // references until associated instructions are emitted and available
3282           // to be patched.
3283           RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3284           j(&after_pool);
3285         }
3286         nop();
3287       }
3288       bind(&after_pool);
3289       trampoline_ = Trampoline(pool_start, unbound_labels_count_);
3290 
3291       trampoline_emitted_ = true;
3292       // As we are only going to emit trampoline once, we need to prevent any
3293       // further emission.
3294       next_buffer_check_ = kMaxInt;
3295     }
3296   } else {
3297     // Number of branches to unbound label at this point is zero, so we can
3298     // move next buffer check to maximum.
3299     next_buffer_check_ = pc_offset() +
3300         kMaxBranchOffset - kTrampolineSlotsSize * 16;
3301   }
3302   return;
3303 }
3304 
3305 
target_address_at(Address pc)3306 Address Assembler::target_address_at(Address pc) {
3307   Instr instr0 = instr_at(pc);
3308   Instr instr1 = instr_at(pc + 1 * kInstrSize);
3309   Instr instr3 = instr_at(pc + 3 * kInstrSize);
3310 
3311   // Interpret 4 instructions for address generated by li: See listing in
3312   // Assembler::set_target_address_at() just below.
3313   if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
3314       (GetOpcodeField(instr3) == ORI)) {
3315     // Assemble the 48 bit value.
3316      int64_t addr  = static_cast<int64_t>(
3317           ((uint64_t)(GetImmediate16(instr0)) << 32) |
3318           ((uint64_t)(GetImmediate16(instr1)) << 16) |
3319           ((uint64_t)(GetImmediate16(instr3))));
3320 
3321     // Sign extend to get canonical address.
3322     addr = (addr << 16) >> 16;
3323     return reinterpret_cast<Address>(addr);
3324   }
3325   // We should never get here, force a bad address if we do.
3326   UNREACHABLE();
3327   return (Address)0x0;
3328 }
3329 
3330 
3331 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
3332 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
3333 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
3334 // OS::nan_value() returns a qNaN.
QuietNaN(HeapObject * object)3335 void Assembler::QuietNaN(HeapObject* object) {
3336   HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
3337 }
3338 
3339 
3340 // On Mips64, a target address is stored in a 4-instruction sequence:
3341 //    0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
3342 //    1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
3343 //    2: dsll(rd, rd, 16);
3344 //    3: ori(rd, rd, j.imm32_ & kImm16Mask);
3345 //
3346 // Patching the address must replace all the lui & ori instructions,
3347 // and flush the i-cache.
3348 //
3349 // There is an optimization below, which emits a nop when the address
3350 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
3351 // and possibly removed.
set_target_address_at(Isolate * isolate,Address pc,Address target,ICacheFlushMode icache_flush_mode)3352 void Assembler::set_target_address_at(Isolate* isolate, Address pc,
3353                                       Address target,
3354                                       ICacheFlushMode icache_flush_mode) {
3355 // There is an optimization where only 4 instructions are used to load address
3356 // in code on MIP64 because only 48-bits of address is effectively used.
3357 // It relies on fact the upper [63:48] bits are not used for virtual address
3358 // translation and they have to be set according to value of bit 47 in order
3359 // get canonical address.
3360   Instr instr1 = instr_at(pc + kInstrSize);
3361   uint32_t rt_code = GetRt(instr1);
3362   uint32_t* p = reinterpret_cast<uint32_t*>(pc);
3363   uint64_t itarget = reinterpret_cast<uint64_t>(target);
3364 
3365 #ifdef DEBUG
3366   // Check we have the result from a li macro-instruction.
3367   Instr instr0 = instr_at(pc);
3368   Instr instr3 = instr_at(pc + kInstrSize * 3);
3369   CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
3370          GetOpcodeField(instr3) == ORI));
3371 #endif
3372 
3373   // Must use 4 instructions to insure patchable code.
3374   // lui rt, upper-16.
3375   // ori rt, rt, lower-16.
3376   // dsll rt, rt, 16.
3377   // ori rt rt, lower-16.
3378   *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
3379   *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
3380       | ((itarget >> 16) & kImm16Mask);
3381   *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
3382       | (itarget & kImm16Mask);
3383 
3384   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
3385     Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize);
3386   }
3387 }
3388 
3389 
3390 }  // namespace internal
3391 }  // namespace v8
3392 
3393 #endif  // V8_TARGET_ARCH_MIPS64
3394