1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34
35 #include "src/mips/assembler-mips.h"
36
37 #if V8_TARGET_ARCH_MIPS
38
39 #include "src/base/bits.h"
40 #include "src/base/cpu.h"
41 #include "src/mips/assembler-mips-inl.h"
42
43 namespace v8 {
44 namespace internal {
45
46 // Get the CPU features enabled by the build. For cross compilation the
47 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
48 // can be defined to enable FPU instructions when building the
49 // snapshot.
CpuFeaturesImpliedByCompiler()50 static unsigned CpuFeaturesImpliedByCompiler() {
51 unsigned answer = 0;
52 #ifdef CAN_USE_FPU_INSTRUCTIONS
53 answer |= 1u << FPU;
54 #endif // def CAN_USE_FPU_INSTRUCTIONS
55
56 // If the compiler is allowed to use FPU then we can use FPU too in our code
57 // generation even when generating snapshots. This won't work for cross
58 // compilation.
59 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
60 answer |= 1u << FPU;
61 #endif
62
63 return answer;
64 }
65
66
ProbeImpl(bool cross_compile)67 void CpuFeatures::ProbeImpl(bool cross_compile) {
68 supported_ |= CpuFeaturesImpliedByCompiler();
69
70 // Only use statically determined features for cross compile (snapshot).
71 if (cross_compile) return;
72
73 // If the compiler is allowed to use fpu then we can use fpu too in our
74 // code generation.
75 #ifndef __mips__
76 // For the simulator build, use FPU.
77 supported_ |= 1u << FPU;
78 #if defined(_MIPS_ARCH_MIPS32R6)
79 // FP64 mode is implied on r6.
80 supported_ |= 1u << FP64FPU;
81 #endif
82 #if defined(FPU_MODE_FP64)
83 supported_ |= 1u << FP64FPU;
84 #endif
85 #else
86 // Probe for additional features at runtime.
87 base::CPU cpu;
88 if (cpu.has_fpu()) supported_ |= 1u << FPU;
89 #if defined(FPU_MODE_FPXX)
90 if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
91 #elif defined(FPU_MODE_FP64)
92 supported_ |= 1u << FP64FPU;
93 #endif
94 #if defined(_MIPS_ARCH_MIPS32RX)
95 if (cpu.architecture() == 6) {
96 supported_ |= 1u << MIPSr6;
97 } else if (cpu.architecture() == 2) {
98 supported_ |= 1u << MIPSr1;
99 supported_ |= 1u << MIPSr2;
100 } else {
101 supported_ |= 1u << MIPSr1;
102 }
103 #endif
104 #endif
105 }
106
107
PrintTarget()108 void CpuFeatures::PrintTarget() { }
PrintFeatures()109 void CpuFeatures::PrintFeatures() { }
110
111
ToNumber(Register reg)112 int ToNumber(Register reg) {
113 DCHECK(reg.is_valid());
114 const int kNumbers[] = {
115 0, // zero_reg
116 1, // at
117 2, // v0
118 3, // v1
119 4, // a0
120 5, // a1
121 6, // a2
122 7, // a3
123 8, // t0
124 9, // t1
125 10, // t2
126 11, // t3
127 12, // t4
128 13, // t5
129 14, // t6
130 15, // t7
131 16, // s0
132 17, // s1
133 18, // s2
134 19, // s3
135 20, // s4
136 21, // s5
137 22, // s6
138 23, // s7
139 24, // t8
140 25, // t9
141 26, // k0
142 27, // k1
143 28, // gp
144 29, // sp
145 30, // fp
146 31, // ra
147 };
148 return kNumbers[reg.code()];
149 }
150
151
ToRegister(int num)152 Register ToRegister(int num) {
153 DCHECK(num >= 0 && num < kNumRegisters);
154 const Register kRegisters[] = {
155 zero_reg,
156 at,
157 v0, v1,
158 a0, a1, a2, a3,
159 t0, t1, t2, t3, t4, t5, t6, t7,
160 s0, s1, s2, s3, s4, s5, s6, s7,
161 t8, t9,
162 k0, k1,
163 gp,
164 sp,
165 fp,
166 ra
167 };
168 return kRegisters[num];
169 }
170
171
172 // -----------------------------------------------------------------------------
173 // Implementation of RelocInfo.
174
175 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
176 1 << RelocInfo::INTERNAL_REFERENCE |
177 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
178
179
IsCodedSpecially()180 bool RelocInfo::IsCodedSpecially() {
181 // The deserializer needs to know whether a pointer is specially coded. Being
182 // specially coded on MIPS means that it is a lui/ori instruction, and that is
183 // always the case inside code objects.
184 return true;
185 }
186
187
IsInConstantPool()188 bool RelocInfo::IsInConstantPool() {
189 return false;
190 }
191
wasm_memory_reference()192 Address RelocInfo::wasm_memory_reference() {
193 DCHECK(IsWasmMemoryReference(rmode_));
194 return Assembler::target_address_at(pc_, host_);
195 }
196
wasm_global_reference()197 Address RelocInfo::wasm_global_reference() {
198 DCHECK(IsWasmGlobalReference(rmode_));
199 return Assembler::target_address_at(pc_, host_);
200 }
201
wasm_memory_size_reference()202 uint32_t RelocInfo::wasm_memory_size_reference() {
203 DCHECK(IsWasmMemorySizeReference(rmode_));
204 return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
205 }
206
unchecked_update_wasm_memory_reference(Address address,ICacheFlushMode flush_mode)207 void RelocInfo::unchecked_update_wasm_memory_reference(
208 Address address, ICacheFlushMode flush_mode) {
209 Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
210 }
211
unchecked_update_wasm_memory_size(uint32_t size,ICacheFlushMode flush_mode)212 void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
213 ICacheFlushMode flush_mode) {
214 Assembler::set_target_address_at(isolate_, pc_, host_,
215 reinterpret_cast<Address>(size), flush_mode);
216 }
217
218 // -----------------------------------------------------------------------------
219 // Implementation of Operand and MemOperand.
220 // See assembler-mips-inl.h for inlined constructors.
221
Operand(Handle<Object> handle)222 Operand::Operand(Handle<Object> handle) {
223 AllowDeferredHandleDereference using_raw_address;
224 rm_ = no_reg;
225 // Verify all Objects referred by code are NOT in new space.
226 Object* obj = *handle;
227 if (obj->IsHeapObject()) {
228 imm32_ = reinterpret_cast<intptr_t>(handle.location());
229 rmode_ = RelocInfo::EMBEDDED_OBJECT;
230 } else {
231 // No relocation needed.
232 imm32_ = reinterpret_cast<intptr_t>(obj);
233 rmode_ = RelocInfo::NONE32;
234 }
235 }
236
237
MemOperand(Register rm,int32_t offset)238 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
239 offset_ = offset;
240 }
241
242
MemOperand(Register rm,int32_t unit,int32_t multiplier,OffsetAddend offset_addend)243 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
244 OffsetAddend offset_addend) : Operand(rm) {
245 offset_ = unit * multiplier + offset_addend;
246 }
247
248
249 // -----------------------------------------------------------------------------
250 // Specific instructions, constants, and masks.
251
252 static const int kNegOffset = 0x00008000;
253 // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
254 // operations as post-increment of sp.
255 const Instr kPopInstruction = ADDIU | (Register::kCode_sp << kRsShift) |
256 (Register::kCode_sp << kRtShift) |
257 (kPointerSize & kImm16Mask); // NOLINT
258 // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
259 const Instr kPushInstruction = ADDIU | (Register::kCode_sp << kRsShift) |
260 (Register::kCode_sp << kRtShift) |
261 (-kPointerSize & kImm16Mask); // NOLINT
262 // sw(r, MemOperand(sp, 0))
263 const Instr kPushRegPattern =
264 SW | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
265 // lw(r, MemOperand(sp, 0))
266 const Instr kPopRegPattern =
267 LW | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
268
269 const Instr kLwRegFpOffsetPattern =
270 LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
271
272 const Instr kSwRegFpOffsetPattern =
273 SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
274
275 const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
276 (kNegOffset & kImm16Mask); // NOLINT
277
278 const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
279 (kNegOffset & kImm16Mask); // NOLINT
280 // A mask for the Rt register for push, pop, lw, sw instructions.
281 const Instr kRtMask = kRtFieldMask;
282 const Instr kLwSwInstrTypeMask = 0xffe00000;
283 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
284 const Instr kLwSwOffsetMask = kImm16Mask;
285
Assembler(Isolate * isolate,void * buffer,int buffer_size)286 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
287 : AssemblerBase(isolate, buffer, buffer_size),
288 recorded_ast_id_(TypeFeedbackId::None()) {
289 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
290
291 last_trampoline_pool_end_ = 0;
292 no_trampoline_pool_before_ = 0;
293 trampoline_pool_blocked_nesting_ = 0;
294 // We leave space (16 * kTrampolineSlotsSize)
295 // for BlockTrampolinePoolScope buffer.
296 next_buffer_check_ = FLAG_force_long_branches
297 ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
298 internal_trampoline_exception_ = false;
299 last_bound_pos_ = 0;
300
301 trampoline_emitted_ = FLAG_force_long_branches;
302 unbound_labels_count_ = 0;
303 block_buffer_growth_ = false;
304
305 ClearRecordedAstId();
306 }
307
308
GetCode(CodeDesc * desc)309 void Assembler::GetCode(CodeDesc* desc) {
310 EmitForbiddenSlotInstruction();
311 DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
312 // Set up code descriptor.
313 desc->buffer = buffer_;
314 desc->buffer_size = buffer_size_;
315 desc->instr_size = pc_offset();
316 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
317 desc->origin = this;
318 desc->constant_pool_size = 0;
319 desc->unwinding_info_size = 0;
320 desc->unwinding_info = nullptr;
321 }
322
323
Align(int m)324 void Assembler::Align(int m) {
325 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
326 EmitForbiddenSlotInstruction();
327 while ((pc_offset() & (m - 1)) != 0) {
328 nop();
329 }
330 }
331
332
CodeTargetAlign()333 void Assembler::CodeTargetAlign() {
334 // No advantage to aligning branch/call targets to more than
335 // single instruction, that I am aware of.
336 Align(4);
337 }
338
339
GetRtReg(Instr instr)340 Register Assembler::GetRtReg(Instr instr) {
341 Register rt;
342 rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
343 return rt;
344 }
345
346
GetRsReg(Instr instr)347 Register Assembler::GetRsReg(Instr instr) {
348 Register rs;
349 rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
350 return rs;
351 }
352
353
GetRdReg(Instr instr)354 Register Assembler::GetRdReg(Instr instr) {
355 Register rd;
356 rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
357 return rd;
358 }
359
360
GetRt(Instr instr)361 uint32_t Assembler::GetRt(Instr instr) {
362 return (instr & kRtFieldMask) >> kRtShift;
363 }
364
365
GetRtField(Instr instr)366 uint32_t Assembler::GetRtField(Instr instr) {
367 return instr & kRtFieldMask;
368 }
369
370
GetRs(Instr instr)371 uint32_t Assembler::GetRs(Instr instr) {
372 return (instr & kRsFieldMask) >> kRsShift;
373 }
374
375
GetRsField(Instr instr)376 uint32_t Assembler::GetRsField(Instr instr) {
377 return instr & kRsFieldMask;
378 }
379
380
GetRd(Instr instr)381 uint32_t Assembler::GetRd(Instr instr) {
382 return (instr & kRdFieldMask) >> kRdShift;
383 }
384
385
GetRdField(Instr instr)386 uint32_t Assembler::GetRdField(Instr instr) {
387 return instr & kRdFieldMask;
388 }
389
390
GetSa(Instr instr)391 uint32_t Assembler::GetSa(Instr instr) {
392 return (instr & kSaFieldMask) >> kSaShift;
393 }
394
395
GetSaField(Instr instr)396 uint32_t Assembler::GetSaField(Instr instr) {
397 return instr & kSaFieldMask;
398 }
399
400
GetOpcodeField(Instr instr)401 uint32_t Assembler::GetOpcodeField(Instr instr) {
402 return instr & kOpcodeMask;
403 }
404
405
GetFunction(Instr instr)406 uint32_t Assembler::GetFunction(Instr instr) {
407 return (instr & kFunctionFieldMask) >> kFunctionShift;
408 }
409
410
GetFunctionField(Instr instr)411 uint32_t Assembler::GetFunctionField(Instr instr) {
412 return instr & kFunctionFieldMask;
413 }
414
415
GetImmediate16(Instr instr)416 uint32_t Assembler::GetImmediate16(Instr instr) {
417 return instr & kImm16Mask;
418 }
419
420
GetLabelConst(Instr instr)421 uint32_t Assembler::GetLabelConst(Instr instr) {
422 return instr & ~kImm16Mask;
423 }
424
425
IsPop(Instr instr)426 bool Assembler::IsPop(Instr instr) {
427 return (instr & ~kRtMask) == kPopRegPattern;
428 }
429
430
IsPush(Instr instr)431 bool Assembler::IsPush(Instr instr) {
432 return (instr & ~kRtMask) == kPushRegPattern;
433 }
434
435
IsSwRegFpOffset(Instr instr)436 bool Assembler::IsSwRegFpOffset(Instr instr) {
437 return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
438 }
439
440
IsLwRegFpOffset(Instr instr)441 bool Assembler::IsLwRegFpOffset(Instr instr) {
442 return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
443 }
444
445
IsSwRegFpNegOffset(Instr instr)446 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
447 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
448 kSwRegFpNegOffsetPattern);
449 }
450
451
IsLwRegFpNegOffset(Instr instr)452 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
453 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
454 kLwRegFpNegOffsetPattern);
455 }
456
457
458 // Labels refer to positions in the (to be) generated code.
459 // There are bound, linked, and unused labels.
460 //
461 // Bound labels refer to known positions in the already
462 // generated code. pos() is the position the label refers to.
463 //
464 // Linked labels refer to unknown positions in the code
465 // to be generated; pos() is the position of the last
466 // instruction using the label.
467
468 // The link chain is terminated by a value in the instruction of -1,
469 // which is an otherwise illegal value (branch -1 is inf loop).
470 // The instruction 16-bit offset field addresses 32-bit words, but in
471 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
472
473 const int kEndOfChain = -4;
474 // Determines the end of the Jump chain (a subset of the label link chain).
475 const int kEndOfJumpChain = 0;
476
477
IsBranch(Instr instr)478 bool Assembler::IsBranch(Instr instr) {
479 uint32_t opcode = GetOpcodeField(instr);
480 uint32_t rt_field = GetRtField(instr);
481 uint32_t rs_field = GetRsField(instr);
482 // Checks if the instruction is a branch.
483 bool isBranch =
484 opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
485 opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
486 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
487 rt_field == BLTZAL || rt_field == BGEZAL)) ||
488 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
489 (opcode == COP1 && rs_field == BC1EQZ) ||
490 (opcode == COP1 && rs_field == BC1NEZ);
491 if (!isBranch && IsMipsArchVariant(kMips32r6)) {
492 // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
493 // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
494 isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
495 opcode == BALC ||
496 (opcode == POP66 && rs_field != 0) || // BEQZC
497 (opcode == POP76 && rs_field != 0); // BNEZC
498 }
499 return isBranch;
500 }
501
502
IsBc(Instr instr)503 bool Assembler::IsBc(Instr instr) {
504 uint32_t opcode = GetOpcodeField(instr);
505 // Checks if the instruction is a BC or BALC.
506 return opcode == BC || opcode == BALC;
507 }
508
509
IsBzc(Instr instr)510 bool Assembler::IsBzc(Instr instr) {
511 uint32_t opcode = GetOpcodeField(instr);
512 // Checks if the instruction is BEQZC or BNEZC.
513 return (opcode == POP66 && GetRsField(instr) != 0) ||
514 (opcode == POP76 && GetRsField(instr) != 0);
515 }
516
517
IsEmittedConstant(Instr instr)518 bool Assembler::IsEmittedConstant(Instr instr) {
519 uint32_t label_constant = GetLabelConst(instr);
520 return label_constant == 0; // Emitted label const in reg-exp engine.
521 }
522
523
IsBeq(Instr instr)524 bool Assembler::IsBeq(Instr instr) {
525 return GetOpcodeField(instr) == BEQ;
526 }
527
528
IsBne(Instr instr)529 bool Assembler::IsBne(Instr instr) {
530 return GetOpcodeField(instr) == BNE;
531 }
532
533
IsBeqzc(Instr instr)534 bool Assembler::IsBeqzc(Instr instr) {
535 uint32_t opcode = GetOpcodeField(instr);
536 return opcode == POP66 && GetRsField(instr) != 0;
537 }
538
539
IsBnezc(Instr instr)540 bool Assembler::IsBnezc(Instr instr) {
541 uint32_t opcode = GetOpcodeField(instr);
542 return opcode == POP76 && GetRsField(instr) != 0;
543 }
544
545
IsBeqc(Instr instr)546 bool Assembler::IsBeqc(Instr instr) {
547 uint32_t opcode = GetOpcodeField(instr);
548 uint32_t rs = GetRsField(instr);
549 uint32_t rt = GetRtField(instr);
550 return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
551 }
552
553
IsBnec(Instr instr)554 bool Assembler::IsBnec(Instr instr) {
555 uint32_t opcode = GetOpcodeField(instr);
556 uint32_t rs = GetRsField(instr);
557 uint32_t rt = GetRtField(instr);
558 return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
559 }
560
IsJicOrJialc(Instr instr)561 bool Assembler::IsJicOrJialc(Instr instr) {
562 uint32_t opcode = GetOpcodeField(instr);
563 uint32_t rs = GetRsField(instr);
564 return (opcode == POP66 || opcode == POP76) && rs == 0;
565 }
566
IsJump(Instr instr)567 bool Assembler::IsJump(Instr instr) {
568 uint32_t opcode = GetOpcodeField(instr);
569 uint32_t rt_field = GetRtField(instr);
570 uint32_t rd_field = GetRdField(instr);
571 uint32_t function_field = GetFunctionField(instr);
572 // Checks if the instruction is a jump.
573 return opcode == J || opcode == JAL ||
574 (opcode == SPECIAL && rt_field == 0 &&
575 ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
576 }
577
IsJ(Instr instr)578 bool Assembler::IsJ(Instr instr) {
579 uint32_t opcode = GetOpcodeField(instr);
580 // Checks if the instruction is a jump.
581 return opcode == J;
582 }
583
584
IsJal(Instr instr)585 bool Assembler::IsJal(Instr instr) {
586 return GetOpcodeField(instr) == JAL;
587 }
588
589
IsJr(Instr instr)590 bool Assembler::IsJr(Instr instr) {
591 if (!IsMipsArchVariant(kMips32r6)) {
592 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
593 } else {
594 return GetOpcodeField(instr) == SPECIAL &&
595 GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
596 }
597 }
598
599
IsJalr(Instr instr)600 bool Assembler::IsJalr(Instr instr) {
601 return GetOpcodeField(instr) == SPECIAL &&
602 GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
603 }
604
605
IsLui(Instr instr)606 bool Assembler::IsLui(Instr instr) {
607 uint32_t opcode = GetOpcodeField(instr);
608 // Checks if the instruction is a load upper immediate.
609 return opcode == LUI;
610 }
611
612
IsOri(Instr instr)613 bool Assembler::IsOri(Instr instr) {
614 uint32_t opcode = GetOpcodeField(instr);
615 // Checks if the instruction is a load upper immediate.
616 return opcode == ORI;
617 }
618
619
IsNop(Instr instr,unsigned int type)620 bool Assembler::IsNop(Instr instr, unsigned int type) {
621 // See Assembler::nop(type).
622 DCHECK(type < 32);
623 uint32_t opcode = GetOpcodeField(instr);
624 uint32_t function = GetFunctionField(instr);
625 uint32_t rt = GetRt(instr);
626 uint32_t rd = GetRd(instr);
627 uint32_t sa = GetSa(instr);
628
629 // Traditional mips nop == sll(zero_reg, zero_reg, 0)
630 // When marking non-zero type, use sll(zero_reg, at, type)
631 // to avoid use of mips ssnop and ehb special encodings
632 // of the sll instruction.
633
634 Register nop_rt_reg = (type == 0) ? zero_reg : at;
635 bool ret = (opcode == SPECIAL && function == SLL &&
636 rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
637 rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
638 sa == type);
639
640 return ret;
641 }
642
643
GetBranchOffset(Instr instr)644 int32_t Assembler::GetBranchOffset(Instr instr) {
645 DCHECK(IsBranch(instr));
646 return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
647 }
648
649
IsLw(Instr instr)650 bool Assembler::IsLw(Instr instr) {
651 return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
652 }
653
654
GetLwOffset(Instr instr)655 int16_t Assembler::GetLwOffset(Instr instr) {
656 DCHECK(IsLw(instr));
657 return ((instr & kImm16Mask));
658 }
659
660
SetLwOffset(Instr instr,int16_t offset)661 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
662 DCHECK(IsLw(instr));
663
664 // We actually create a new lw instruction based on the original one.
665 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
666 | (offset & kImm16Mask);
667
668 return temp_instr;
669 }
670
671
IsSw(Instr instr)672 bool Assembler::IsSw(Instr instr) {
673 return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
674 }
675
676
SetSwOffset(Instr instr,int16_t offset)677 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
678 DCHECK(IsSw(instr));
679 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
680 }
681
682
IsAddImmediate(Instr instr)683 bool Assembler::IsAddImmediate(Instr instr) {
684 return ((instr & kOpcodeMask) == ADDIU);
685 }
686
687
SetAddImmediateOffset(Instr instr,int16_t offset)688 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
689 DCHECK(IsAddImmediate(instr));
690 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
691 }
692
693
IsAndImmediate(Instr instr)694 bool Assembler::IsAndImmediate(Instr instr) {
695 return GetOpcodeField(instr) == ANDI;
696 }
697
698
OffsetSizeInBits(Instr instr)699 static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
700 if (IsMipsArchVariant(kMips32r6)) {
701 if (Assembler::IsBc(instr)) {
702 return Assembler::OffsetSize::kOffset26;
703 } else if (Assembler::IsBzc(instr)) {
704 return Assembler::OffsetSize::kOffset21;
705 }
706 }
707 return Assembler::OffsetSize::kOffset16;
708 }
709
710
AddBranchOffset(int pos,Instr instr)711 static inline int32_t AddBranchOffset(int pos, Instr instr) {
712 int bits = OffsetSizeInBits(instr);
713 const int32_t mask = (1 << bits) - 1;
714 bits = 32 - bits;
715
716 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
717 // the compiler uses arithmetic shifts for signed integers.
718 int32_t imm = ((instr & mask) << bits) >> (bits - 2);
719
720 if (imm == kEndOfChain) {
721 // EndOfChain sentinel is returned directly, not relative to pc or pos.
722 return kEndOfChain;
723 } else {
724 return pos + Assembler::kBranchPCOffset + imm;
725 }
726 }
727
CreateTargetAddress(Instr instr_lui,Instr instr_jic)728 uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
729 DCHECK(IsLui(instr_lui) && IsJicOrJialc(instr_jic));
730 int16_t jic_offset = GetImmediate16(instr_jic);
731 int16_t lui_offset = GetImmediate16(instr_lui);
732
733 if (jic_offset < 0) {
734 lui_offset += kImm16Mask;
735 }
736 uint32_t lui_offset_u = (static_cast<uint32_t>(lui_offset)) << kLuiShift;
737 uint32_t jic_offset_u = static_cast<uint32_t>(jic_offset) & kImm16Mask;
738
739 return lui_offset_u | jic_offset_u;
740 }
741
742 // Use just lui and jic instructions. Insert lower part of the target address in
743 // jic offset part. Since jic sign-extends offset and then add it with register,
744 // before that addition, difference between upper part of the target address and
745 // upper part of the sign-extended offset (0xffff or 0x0000), will be inserted
746 // in jic register with lui instruction.
UnpackTargetAddress(uint32_t address,int16_t & lui_offset,int16_t & jic_offset)747 void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
748 int16_t& jic_offset) {
749 lui_offset = (address & kHiMask) >> kLuiShift;
750 jic_offset = address & kLoMask;
751
752 if (jic_offset < 0) {
753 lui_offset -= kImm16Mask;
754 }
755 }
756
UnpackTargetAddressUnsigned(uint32_t address,uint32_t & lui_offset,uint32_t & jic_offset)757 void Assembler::UnpackTargetAddressUnsigned(uint32_t address,
758 uint32_t& lui_offset,
759 uint32_t& jic_offset) {
760 int16_t lui_offset16 = (address & kHiMask) >> kLuiShift;
761 int16_t jic_offset16 = address & kLoMask;
762
763 if (jic_offset16 < 0) {
764 lui_offset16 -= kImm16Mask;
765 }
766 lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
767 jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
768 }
769
target_at(int pos,bool is_internal)770 int Assembler::target_at(int pos, bool is_internal) {
771 Instr instr = instr_at(pos);
772 if (is_internal) {
773 if (instr == 0) {
774 return kEndOfChain;
775 } else {
776 int32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
777 int delta = static_cast<int>(instr_address - instr);
778 DCHECK(pos > delta);
779 return pos - delta;
780 }
781 }
782 if ((instr & ~kImm16Mask) == 0) {
783 // Emitted label constant, not part of a branch.
784 if (instr == 0) {
785 return kEndOfChain;
786 } else {
787 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
788 return (imm18 + pos);
789 }
790 }
791 // Check we have a branch or jump instruction.
792 DCHECK(IsBranch(instr) || IsLui(instr));
793 if (IsBranch(instr)) {
794 return AddBranchOffset(pos, instr);
795 } else {
796 Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
797 Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
798 DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
799 int32_t imm;
800 if (IsJicOrJialc(instr2)) {
801 imm = CreateTargetAddress(instr1, instr2);
802 } else {
803 imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
804 imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
805 }
806
807 if (imm == kEndOfJumpChain) {
808 // EndOfChain sentinel is returned directly, not relative to pc or pos.
809 return kEndOfChain;
810 } else {
811 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
812 int32_t delta = instr_address - imm;
813 DCHECK(pos > delta);
814 return pos - delta;
815 }
816 }
817 return 0;
818 }
819
820
SetBranchOffset(int32_t pos,int32_t target_pos,Instr instr)821 static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
822 Instr instr) {
823 int32_t bits = OffsetSizeInBits(instr);
824 int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
825 DCHECK((imm & 3) == 0);
826 imm >>= 2;
827
828 const int32_t mask = (1 << bits) - 1;
829 instr &= ~mask;
830 DCHECK(is_intn(imm, bits));
831
832 return instr | (imm & mask);
833 }
834
835
target_at_put(int32_t pos,int32_t target_pos,bool is_internal)836 void Assembler::target_at_put(int32_t pos, int32_t target_pos,
837 bool is_internal) {
838 Instr instr = instr_at(pos);
839
840 if (is_internal) {
841 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
842 instr_at_put(pos, imm);
843 return;
844 }
845 if ((instr & ~kImm16Mask) == 0) {
846 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
847 // Emitted label constant, not part of a branch.
848 // Make label relative to Code* of generated Code object.
849 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
850 return;
851 }
852
853 DCHECK(IsBranch(instr) || IsLui(instr));
854 if (IsBranch(instr)) {
855 instr = SetBranchOffset(pos, target_pos, instr);
856 instr_at_put(pos, instr);
857 } else {
858 Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
859 Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
860 DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
861 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
862 DCHECK((imm & 3) == 0);
863 DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
864 instr1 &= ~kImm16Mask;
865 instr2 &= ~kImm16Mask;
866
867 if (IsJicOrJialc(instr2)) {
868 uint32_t lui_offset_u, jic_offset_u;
869 UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
870 instr_at_put(pos + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
871 instr_at_put(pos + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
872 } else {
873 instr_at_put(pos + 0 * Assembler::kInstrSize,
874 instr1 | ((imm & kHiMask) >> kLuiShift));
875 instr_at_put(pos + 1 * Assembler::kInstrSize,
876 instr2 | (imm & kImm16Mask));
877 }
878 }
879 }
880
881
print(Label * L)882 void Assembler::print(Label* L) {
883 if (L->is_unused()) {
884 PrintF("unused label\n");
885 } else if (L->is_bound()) {
886 PrintF("bound label to %d\n", L->pos());
887 } else if (L->is_linked()) {
888 Label l = *L;
889 PrintF("unbound label");
890 while (l.is_linked()) {
891 PrintF("@ %d ", l.pos());
892 Instr instr = instr_at(l.pos());
893 if ((instr & ~kImm16Mask) == 0) {
894 PrintF("value\n");
895 } else {
896 PrintF("%d\n", instr);
897 }
898 next(&l, internal_reference_positions_.find(l.pos()) !=
899 internal_reference_positions_.end());
900 }
901 } else {
902 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
903 }
904 }
905
906
bind_to(Label * L,int pos)907 void Assembler::bind_to(Label* L, int pos) {
908 DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
909 int32_t trampoline_pos = kInvalidSlotPos;
910 bool is_internal = false;
911 if (L->is_linked() && !trampoline_emitted_) {
912 unbound_labels_count_--;
913 next_buffer_check_ += kTrampolineSlotsSize;
914 }
915
916 while (L->is_linked()) {
917 int32_t fixup_pos = L->pos();
918 int32_t dist = pos - fixup_pos;
919 is_internal = internal_reference_positions_.find(fixup_pos) !=
920 internal_reference_positions_.end();
921 next(L, is_internal); // Call next before overwriting link with target at
922 // fixup_pos.
923 Instr instr = instr_at(fixup_pos);
924 if (is_internal) {
925 target_at_put(fixup_pos, pos, is_internal);
926 } else {
927 if (IsBranch(instr)) {
928 int branch_offset = BranchOffset(instr);
929 if (dist > branch_offset) {
930 if (trampoline_pos == kInvalidSlotPos) {
931 trampoline_pos = get_trampoline_entry(fixup_pos);
932 CHECK(trampoline_pos != kInvalidSlotPos);
933 }
934 CHECK((trampoline_pos - fixup_pos) <= branch_offset);
935 target_at_put(fixup_pos, trampoline_pos, false);
936 fixup_pos = trampoline_pos;
937 dist = pos - fixup_pos;
938 }
939 target_at_put(fixup_pos, pos, false);
940 } else {
941 target_at_put(fixup_pos, pos, false);
942 }
943 }
944 }
945 L->bind_to(pos);
946
947 // Keep track of the last bound label so we don't eliminate any instructions
948 // before a bound label.
949 if (pos > last_bound_pos_)
950 last_bound_pos_ = pos;
951 }
952
953
bind(Label * L)954 void Assembler::bind(Label* L) {
955 DCHECK(!L->is_bound()); // Label can only be bound once.
956 bind_to(L, pc_offset());
957 }
958
959
next(Label * L,bool is_internal)960 void Assembler::next(Label* L, bool is_internal) {
961 DCHECK(L->is_linked());
962 int link = target_at(L->pos(), is_internal);
963 if (link == kEndOfChain) {
964 L->Unuse();
965 } else {
966 DCHECK(link >= 0);
967 L->link_to(link);
968 }
969 }
970
971
is_near(Label * L)972 bool Assembler::is_near(Label* L) {
973 DCHECK(L->is_bound());
974 return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
975 }
976
977
is_near(Label * L,OffsetSize bits)978 bool Assembler::is_near(Label* L, OffsetSize bits) {
979 if (L == nullptr || !L->is_bound()) return true;
980 return pc_offset() - L->pos() < (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize;
981 }
982
983
is_near_branch(Label * L)984 bool Assembler::is_near_branch(Label* L) {
985 DCHECK(L->is_bound());
986 return IsMipsArchVariant(kMips32r6) ? is_near_r6(L) : is_near_pre_r6(L);
987 }
988
989
BranchOffset(Instr instr)990 int Assembler::BranchOffset(Instr instr) {
991 // At pre-R6 and for other R6 branches the offset is 16 bits.
992 int bits = OffsetSize::kOffset16;
993
994 if (IsMipsArchVariant(kMips32r6)) {
995 uint32_t opcode = GetOpcodeField(instr);
996 switch (opcode) {
997 // Checks BC or BALC.
998 case BC:
999 case BALC:
1000 bits = OffsetSize::kOffset26;
1001 break;
1002
1003 // Checks BEQZC or BNEZC.
1004 case POP66:
1005 case POP76:
1006 if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
1007 break;
1008 default:
1009 break;
1010 }
1011 }
1012
1013 return (1 << (bits + 2 - 1)) - 1;
1014 }
1015
1016
1017 // We have to use a temporary register for things that can be relocated even
1018 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
1019 // space. There is no guarantee that the relocated location can be similarly
1020 // encoded.
MustUseReg(RelocInfo::Mode rmode)1021 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
1022 return !RelocInfo::IsNone(rmode);
1023 }
1024
GenInstrRegister(Opcode opcode,Register rs,Register rt,Register rd,uint16_t sa,SecondaryField func)1025 void Assembler::GenInstrRegister(Opcode opcode,
1026 Register rs,
1027 Register rt,
1028 Register rd,
1029 uint16_t sa,
1030 SecondaryField func) {
1031 DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
1032 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1033 | (rd.code() << kRdShift) | (sa << kSaShift) | func;
1034 emit(instr);
1035 }
1036
1037
GenInstrRegister(Opcode opcode,Register rs,Register rt,uint16_t msb,uint16_t lsb,SecondaryField func)1038 void Assembler::GenInstrRegister(Opcode opcode,
1039 Register rs,
1040 Register rt,
1041 uint16_t msb,
1042 uint16_t lsb,
1043 SecondaryField func) {
1044 DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
1045 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1046 | (msb << kRdShift) | (lsb << kSaShift) | func;
1047 emit(instr);
1048 }
1049
1050
GenInstrRegister(Opcode opcode,SecondaryField fmt,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1051 void Assembler::GenInstrRegister(Opcode opcode,
1052 SecondaryField fmt,
1053 FPURegister ft,
1054 FPURegister fs,
1055 FPURegister fd,
1056 SecondaryField func) {
1057 DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1058 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
1059 | (fd.code() << kFdShift) | func;
1060 emit(instr);
1061 }
1062
1063
GenInstrRegister(Opcode opcode,FPURegister fr,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1064 void Assembler::GenInstrRegister(Opcode opcode,
1065 FPURegister fr,
1066 FPURegister ft,
1067 FPURegister fs,
1068 FPURegister fd,
1069 SecondaryField func) {
1070 DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1071 Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
1072 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1073 emit(instr);
1074 }
1075
1076
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPURegister fs,FPURegister fd,SecondaryField func)1077 void Assembler::GenInstrRegister(Opcode opcode,
1078 SecondaryField fmt,
1079 Register rt,
1080 FPURegister fs,
1081 FPURegister fd,
1082 SecondaryField func) {
1083 DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1084 Instr instr = opcode | fmt | (rt.code() << kRtShift)
1085 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1086 emit(instr);
1087 }
1088
1089
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPUControlRegister fs,SecondaryField func)1090 void Assembler::GenInstrRegister(Opcode opcode,
1091 SecondaryField fmt,
1092 Register rt,
1093 FPUControlRegister fs,
1094 SecondaryField func) {
1095 DCHECK(fs.is_valid() && rt.is_valid());
1096 Instr instr =
1097 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1098 emit(instr);
1099 }
1100
1101
1102 // Instructions with immediate value.
1103 // Registers are in the order of the instruction encoding, from left to right.
GenInstrImmediate(Opcode opcode,Register rs,Register rt,int32_t j,CompactBranchType is_compact_branch)1104 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1105 int32_t j,
1106 CompactBranchType is_compact_branch) {
1107 DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1108 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1109 | (j & kImm16Mask);
1110 emit(instr, is_compact_branch);
1111 }
1112
1113
GenInstrImmediate(Opcode opcode,Register rs,SecondaryField SF,int32_t j,CompactBranchType is_compact_branch)1114 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1115 int32_t j,
1116 CompactBranchType is_compact_branch) {
1117 DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1118 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
1119 emit(instr, is_compact_branch);
1120 }
1121
1122
GenInstrImmediate(Opcode opcode,Register rs,FPURegister ft,int32_t j,CompactBranchType is_compact_branch)1123 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1124 int32_t j,
1125 CompactBranchType is_compact_branch) {
1126 DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1127 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
1128 | (j & kImm16Mask);
1129 emit(instr, is_compact_branch);
1130 }
1131
1132
GenInstrImmediate(Opcode opcode,Register rs,int32_t offset21,CompactBranchType is_compact_branch)1133 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1134 CompactBranchType is_compact_branch) {
1135 DCHECK(rs.is_valid() && (is_int21(offset21)));
1136 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1137 emit(instr, is_compact_branch);
1138 }
1139
1140
GenInstrImmediate(Opcode opcode,Register rs,uint32_t offset21)1141 void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1142 uint32_t offset21) {
1143 DCHECK(rs.is_valid() && (is_uint21(offset21)));
1144 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1145 emit(instr);
1146 }
1147
1148
GenInstrImmediate(Opcode opcode,int32_t offset26,CompactBranchType is_compact_branch)1149 void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1150 CompactBranchType is_compact_branch) {
1151 DCHECK(is_int26(offset26));
1152 Instr instr = opcode | (offset26 & kImm26Mask);
1153 emit(instr, is_compact_branch);
1154 }
1155
1156
GenInstrJump(Opcode opcode,uint32_t address)1157 void Assembler::GenInstrJump(Opcode opcode,
1158 uint32_t address) {
1159 BlockTrampolinePoolScope block_trampoline_pool(this);
1160 DCHECK(is_uint26(address));
1161 Instr instr = opcode | address;
1162 emit(instr);
1163 BlockTrampolinePoolFor(1); // For associated delay slot.
1164 }
1165
1166
1167 // Returns the next free trampoline entry.
get_trampoline_entry(int32_t pos)1168 int32_t Assembler::get_trampoline_entry(int32_t pos) {
1169 int32_t trampoline_entry = kInvalidSlotPos;
1170
1171 if (!internal_trampoline_exception_) {
1172 if (trampoline_.start() > pos) {
1173 trampoline_entry = trampoline_.take_slot();
1174 }
1175
1176 if (kInvalidSlotPos == trampoline_entry) {
1177 internal_trampoline_exception_ = true;
1178 }
1179 }
1180 return trampoline_entry;
1181 }
1182
1183
jump_address(Label * L)1184 uint32_t Assembler::jump_address(Label* L) {
1185 int32_t target_pos;
1186
1187 if (L->is_bound()) {
1188 target_pos = L->pos();
1189 } else {
1190 if (L->is_linked()) {
1191 target_pos = L->pos(); // L's link.
1192 L->link_to(pc_offset());
1193 } else {
1194 L->link_to(pc_offset());
1195 return kEndOfJumpChain;
1196 }
1197 }
1198
1199 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
1200 DCHECK((imm & 3) == 0);
1201
1202 return imm;
1203 }
1204
1205
branch_offset_helper(Label * L,OffsetSize bits)1206 int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
1207 int32_t target_pos;
1208 int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1209
1210 if (L->is_bound()) {
1211 target_pos = L->pos();
1212 } else {
1213 if (L->is_linked()) {
1214 target_pos = L->pos();
1215 L->link_to(pc_offset() + pad);
1216 } else {
1217 L->link_to(pc_offset() + pad);
1218 if (!trampoline_emitted_) {
1219 unbound_labels_count_++;
1220 next_buffer_check_ -= kTrampolineSlotsSize;
1221 }
1222 return kEndOfChain;
1223 }
1224 }
1225
1226 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1227 DCHECK(is_intn(offset, bits + 2));
1228 DCHECK((offset & 3) == 0);
1229
1230 return offset;
1231 }
1232
1233
label_at_put(Label * L,int at_offset)1234 void Assembler::label_at_put(Label* L, int at_offset) {
1235 int target_pos;
1236 if (L->is_bound()) {
1237 target_pos = L->pos();
1238 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1239 } else {
1240 if (L->is_linked()) {
1241 target_pos = L->pos(); // L's link.
1242 int32_t imm18 = target_pos - at_offset;
1243 DCHECK((imm18 & 3) == 0);
1244 int32_t imm16 = imm18 >> 2;
1245 DCHECK(is_int16(imm16));
1246 instr_at_put(at_offset, (imm16 & kImm16Mask));
1247 } else {
1248 target_pos = kEndOfChain;
1249 instr_at_put(at_offset, 0);
1250 if (!trampoline_emitted_) {
1251 unbound_labels_count_++;
1252 next_buffer_check_ -= kTrampolineSlotsSize;
1253 }
1254 }
1255 L->link_to(at_offset);
1256 }
1257 }
1258
1259
1260 //------- Branch and jump instructions --------
1261
b(int16_t offset)1262 void Assembler::b(int16_t offset) {
1263 beq(zero_reg, zero_reg, offset);
1264 }
1265
1266
bal(int16_t offset)1267 void Assembler::bal(int16_t offset) {
1268 bgezal(zero_reg, offset);
1269 }
1270
1271
bc(int32_t offset)1272 void Assembler::bc(int32_t offset) {
1273 DCHECK(IsMipsArchVariant(kMips32r6));
1274 GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1275 }
1276
1277
balc(int32_t offset)1278 void Assembler::balc(int32_t offset) {
1279 DCHECK(IsMipsArchVariant(kMips32r6));
1280 GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1281 }
1282
1283
beq(Register rs,Register rt,int16_t offset)1284 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1285 BlockTrampolinePoolScope block_trampoline_pool(this);
1286 GenInstrImmediate(BEQ, rs, rt, offset);
1287 BlockTrampolinePoolFor(1); // For associated delay slot.
1288 }
1289
1290
bgez(Register rs,int16_t offset)1291 void Assembler::bgez(Register rs, int16_t offset) {
1292 BlockTrampolinePoolScope block_trampoline_pool(this);
1293 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1294 BlockTrampolinePoolFor(1); // For associated delay slot.
1295 }
1296
1297
bgezc(Register rt,int16_t offset)1298 void Assembler::bgezc(Register rt, int16_t offset) {
1299 DCHECK(IsMipsArchVariant(kMips32r6));
1300 DCHECK(!(rt.is(zero_reg)));
1301 GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1302 }
1303
1304
bgeuc(Register rs,Register rt,int16_t offset)1305 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1306 DCHECK(IsMipsArchVariant(kMips32r6));
1307 DCHECK(!(rs.is(zero_reg)));
1308 DCHECK(!(rt.is(zero_reg)));
1309 DCHECK(rs.code() != rt.code());
1310 GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1311 }
1312
1313
bgec(Register rs,Register rt,int16_t offset)1314 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1315 DCHECK(IsMipsArchVariant(kMips32r6));
1316 DCHECK(!(rs.is(zero_reg)));
1317 DCHECK(!(rt.is(zero_reg)));
1318 DCHECK(rs.code() != rt.code());
1319 GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1320 }
1321
1322
bgezal(Register rs,int16_t offset)1323 void Assembler::bgezal(Register rs, int16_t offset) {
1324 DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
1325 BlockTrampolinePoolScope block_trampoline_pool(this);
1326 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1327 BlockTrampolinePoolFor(1); // For associated delay slot.
1328 }
1329
1330
bgtz(Register rs,int16_t offset)1331 void Assembler::bgtz(Register rs, int16_t offset) {
1332 BlockTrampolinePoolScope block_trampoline_pool(this);
1333 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1334 BlockTrampolinePoolFor(1); // For associated delay slot.
1335 }
1336
1337
bgtzc(Register rt,int16_t offset)1338 void Assembler::bgtzc(Register rt, int16_t offset) {
1339 DCHECK(IsMipsArchVariant(kMips32r6));
1340 DCHECK(!(rt.is(zero_reg)));
1341 GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1342 CompactBranchType::COMPACT_BRANCH);
1343 }
1344
1345
blez(Register rs,int16_t offset)1346 void Assembler::blez(Register rs, int16_t offset) {
1347 BlockTrampolinePoolScope block_trampoline_pool(this);
1348 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1349 BlockTrampolinePoolFor(1); // For associated delay slot.
1350 }
1351
1352
blezc(Register rt,int16_t offset)1353 void Assembler::blezc(Register rt, int16_t offset) {
1354 DCHECK(IsMipsArchVariant(kMips32r6));
1355 DCHECK(!(rt.is(zero_reg)));
1356 GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1357 CompactBranchType::COMPACT_BRANCH);
1358 }
1359
1360
bltzc(Register rt,int16_t offset)1361 void Assembler::bltzc(Register rt, int16_t offset) {
1362 DCHECK(IsMipsArchVariant(kMips32r6));
1363 DCHECK(!rt.is(zero_reg));
1364 GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1365 }
1366
1367
bltuc(Register rs,Register rt,int16_t offset)1368 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1369 DCHECK(IsMipsArchVariant(kMips32r6));
1370 DCHECK(!(rs.is(zero_reg)));
1371 DCHECK(!(rt.is(zero_reg)));
1372 DCHECK(rs.code() != rt.code());
1373 GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1374 }
1375
1376
bltc(Register rs,Register rt,int16_t offset)1377 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1378 DCHECK(IsMipsArchVariant(kMips32r6));
1379 DCHECK(!rs.is(zero_reg));
1380 DCHECK(!rt.is(zero_reg));
1381 DCHECK(rs.code() != rt.code());
1382 GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1383 }
1384
1385
bltz(Register rs,int16_t offset)1386 void Assembler::bltz(Register rs, int16_t offset) {
1387 BlockTrampolinePoolScope block_trampoline_pool(this);
1388 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1389 BlockTrampolinePoolFor(1); // For associated delay slot.
1390 }
1391
1392
bltzal(Register rs,int16_t offset)1393 void Assembler::bltzal(Register rs, int16_t offset) {
1394 DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
1395 BlockTrampolinePoolScope block_trampoline_pool(this);
1396 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1397 BlockTrampolinePoolFor(1); // For associated delay slot.
1398 }
1399
1400
bne(Register rs,Register rt,int16_t offset)1401 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1402 BlockTrampolinePoolScope block_trampoline_pool(this);
1403 GenInstrImmediate(BNE, rs, rt, offset);
1404 BlockTrampolinePoolFor(1); // For associated delay slot.
1405 }
1406
1407
bovc(Register rs,Register rt,int16_t offset)1408 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1409 DCHECK(IsMipsArchVariant(kMips32r6));
1410 if (rs.code() >= rt.code()) {
1411 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1412 } else {
1413 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1414 }
1415 }
1416
1417
bnvc(Register rs,Register rt,int16_t offset)1418 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1419 DCHECK(IsMipsArchVariant(kMips32r6));
1420 if (rs.code() >= rt.code()) {
1421 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1422 } else {
1423 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1424 }
1425 }
1426
1427
blezalc(Register rt,int16_t offset)1428 void Assembler::blezalc(Register rt, int16_t offset) {
1429 DCHECK(IsMipsArchVariant(kMips32r6));
1430 DCHECK(!(rt.is(zero_reg)));
1431 GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1432 CompactBranchType::COMPACT_BRANCH);
1433 }
1434
1435
bgezalc(Register rt,int16_t offset)1436 void Assembler::bgezalc(Register rt, int16_t offset) {
1437 DCHECK(IsMipsArchVariant(kMips32r6));
1438 DCHECK(!(rt.is(zero_reg)));
1439 GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1440 }
1441
1442
bgezall(Register rs,int16_t offset)1443 void Assembler::bgezall(Register rs, int16_t offset) {
1444 DCHECK(!IsMipsArchVariant(kMips32r6));
1445 DCHECK(!(rs.is(zero_reg)));
1446 BlockTrampolinePoolScope block_trampoline_pool(this);
1447 GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1448 BlockTrampolinePoolFor(1); // For associated delay slot.
1449 }
1450
1451
bltzalc(Register rt,int16_t offset)1452 void Assembler::bltzalc(Register rt, int16_t offset) {
1453 DCHECK(IsMipsArchVariant(kMips32r6));
1454 DCHECK(!(rt.is(zero_reg)));
1455 GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1456 }
1457
1458
bgtzalc(Register rt,int16_t offset)1459 void Assembler::bgtzalc(Register rt, int16_t offset) {
1460 DCHECK(IsMipsArchVariant(kMips32r6));
1461 DCHECK(!(rt.is(zero_reg)));
1462 GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1463 CompactBranchType::COMPACT_BRANCH);
1464 }
1465
1466
beqzalc(Register rt,int16_t offset)1467 void Assembler::beqzalc(Register rt, int16_t offset) {
1468 DCHECK(IsMipsArchVariant(kMips32r6));
1469 DCHECK(!(rt.is(zero_reg)));
1470 GenInstrImmediate(ADDI, zero_reg, rt, offset,
1471 CompactBranchType::COMPACT_BRANCH);
1472 }
1473
1474
bnezalc(Register rt,int16_t offset)1475 void Assembler::bnezalc(Register rt, int16_t offset) {
1476 DCHECK(IsMipsArchVariant(kMips32r6));
1477 DCHECK(!(rt.is(zero_reg)));
1478 GenInstrImmediate(DADDI, zero_reg, rt, offset,
1479 CompactBranchType::COMPACT_BRANCH);
1480 }
1481
1482
beqc(Register rs,Register rt,int16_t offset)1483 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1484 DCHECK(IsMipsArchVariant(kMips32r6));
1485 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1486 if (rs.code() < rt.code()) {
1487 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1488 } else {
1489 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1490 }
1491 }
1492
1493
beqzc(Register rs,int32_t offset)1494 void Assembler::beqzc(Register rs, int32_t offset) {
1495 DCHECK(IsMipsArchVariant(kMips32r6));
1496 DCHECK(!(rs.is(zero_reg)));
1497 GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
1498 }
1499
1500
bnec(Register rs,Register rt,int16_t offset)1501 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1502 DCHECK(IsMipsArchVariant(kMips32r6));
1503 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1504 if (rs.code() < rt.code()) {
1505 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1506 } else {
1507 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1508 }
1509 }
1510
1511
bnezc(Register rs,int32_t offset)1512 void Assembler::bnezc(Register rs, int32_t offset) {
1513 DCHECK(IsMipsArchVariant(kMips32r6));
1514 DCHECK(!(rs.is(zero_reg)));
1515 GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
1516 }
1517
1518
j(int32_t target)1519 void Assembler::j(int32_t target) {
1520 #if DEBUG
1521 // Get pc of delay slot.
1522 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1523 bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1524 (kImm26Bits + kImmFieldShift)) == 0;
1525 DCHECK(in_range && ((target & 3) == 0));
1526 #endif
1527 BlockTrampolinePoolScope block_trampoline_pool(this);
1528 GenInstrJump(J, (target >> 2) & kImm26Mask);
1529 BlockTrampolinePoolFor(1); // For associated delay slot.
1530 }
1531
1532
jr(Register rs)1533 void Assembler::jr(Register rs) {
1534 if (!IsMipsArchVariant(kMips32r6)) {
1535 BlockTrampolinePoolScope block_trampoline_pool(this);
1536 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1537 BlockTrampolinePoolFor(1); // For associated delay slot.
1538 } else {
1539 jalr(rs, zero_reg);
1540 }
1541 }
1542
1543
jal(int32_t target)1544 void Assembler::jal(int32_t target) {
1545 #ifdef DEBUG
1546 // Get pc of delay slot.
1547 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1548 bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1549 (kImm26Bits + kImmFieldShift)) == 0;
1550 DCHECK(in_range && ((target & 3) == 0));
1551 #endif
1552 BlockTrampolinePoolScope block_trampoline_pool(this);
1553 GenInstrJump(JAL, (target >> 2) & kImm26Mask);
1554 BlockTrampolinePoolFor(1); // For associated delay slot.
1555 }
1556
1557
jalr(Register rs,Register rd)1558 void Assembler::jalr(Register rs, Register rd) {
1559 DCHECK(rs.code() != rd.code());
1560 BlockTrampolinePoolScope block_trampoline_pool(this);
1561 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1562 BlockTrampolinePoolFor(1); // For associated delay slot.
1563 }
1564
1565
jic(Register rt,int16_t offset)1566 void Assembler::jic(Register rt, int16_t offset) {
1567 DCHECK(IsMipsArchVariant(kMips32r6));
1568 GenInstrImmediate(POP66, zero_reg, rt, offset);
1569 }
1570
1571
jialc(Register rt,int16_t offset)1572 void Assembler::jialc(Register rt, int16_t offset) {
1573 DCHECK(IsMipsArchVariant(kMips32r6));
1574 GenInstrImmediate(POP76, zero_reg, rt, offset);
1575 }
1576
1577
1578 // -------Data-processing-instructions---------
1579
1580 // Arithmetic.
1581
addu(Register rd,Register rs,Register rt)1582 void Assembler::addu(Register rd, Register rs, Register rt) {
1583 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1584 }
1585
1586
addiu(Register rd,Register rs,int32_t j)1587 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1588 GenInstrImmediate(ADDIU, rs, rd, j);
1589 }
1590
1591
subu(Register rd,Register rs,Register rt)1592 void Assembler::subu(Register rd, Register rs, Register rt) {
1593 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1594 }
1595
1596
mul(Register rd,Register rs,Register rt)1597 void Assembler::mul(Register rd, Register rs, Register rt) {
1598 if (!IsMipsArchVariant(kMips32r6)) {
1599 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1600 } else {
1601 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1602 }
1603 }
1604
1605
mulu(Register rd,Register rs,Register rt)1606 void Assembler::mulu(Register rd, Register rs, Register rt) {
1607 DCHECK(IsMipsArchVariant(kMips32r6));
1608 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1609 }
1610
1611
muh(Register rd,Register rs,Register rt)1612 void Assembler::muh(Register rd, Register rs, Register rt) {
1613 DCHECK(IsMipsArchVariant(kMips32r6));
1614 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1615 }
1616
1617
muhu(Register rd,Register rs,Register rt)1618 void Assembler::muhu(Register rd, Register rs, Register rt) {
1619 DCHECK(IsMipsArchVariant(kMips32r6));
1620 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1621 }
1622
1623
mod(Register rd,Register rs,Register rt)1624 void Assembler::mod(Register rd, Register rs, Register rt) {
1625 DCHECK(IsMipsArchVariant(kMips32r6));
1626 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1627 }
1628
1629
modu(Register rd,Register rs,Register rt)1630 void Assembler::modu(Register rd, Register rs, Register rt) {
1631 DCHECK(IsMipsArchVariant(kMips32r6));
1632 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1633 }
1634
1635
mult(Register rs,Register rt)1636 void Assembler::mult(Register rs, Register rt) {
1637 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1638 }
1639
1640
multu(Register rs,Register rt)1641 void Assembler::multu(Register rs, Register rt) {
1642 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1643 }
1644
1645
div(Register rs,Register rt)1646 void Assembler::div(Register rs, Register rt) {
1647 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1648 }
1649
1650
div(Register rd,Register rs,Register rt)1651 void Assembler::div(Register rd, Register rs, Register rt) {
1652 DCHECK(IsMipsArchVariant(kMips32r6));
1653 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1654 }
1655
1656
divu(Register rs,Register rt)1657 void Assembler::divu(Register rs, Register rt) {
1658 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1659 }
1660
1661
divu(Register rd,Register rs,Register rt)1662 void Assembler::divu(Register rd, Register rs, Register rt) {
1663 DCHECK(IsMipsArchVariant(kMips32r6));
1664 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1665 }
1666
1667
1668 // Logical.
1669
and_(Register rd,Register rs,Register rt)1670 void Assembler::and_(Register rd, Register rs, Register rt) {
1671 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1672 }
1673
1674
andi(Register rt,Register rs,int32_t j)1675 void Assembler::andi(Register rt, Register rs, int32_t j) {
1676 DCHECK(is_uint16(j));
1677 GenInstrImmediate(ANDI, rs, rt, j);
1678 }
1679
1680
or_(Register rd,Register rs,Register rt)1681 void Assembler::or_(Register rd, Register rs, Register rt) {
1682 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1683 }
1684
1685
ori(Register rt,Register rs,int32_t j)1686 void Assembler::ori(Register rt, Register rs, int32_t j) {
1687 DCHECK(is_uint16(j));
1688 GenInstrImmediate(ORI, rs, rt, j);
1689 }
1690
1691
xor_(Register rd,Register rs,Register rt)1692 void Assembler::xor_(Register rd, Register rs, Register rt) {
1693 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1694 }
1695
1696
xori(Register rt,Register rs,int32_t j)1697 void Assembler::xori(Register rt, Register rs, int32_t j) {
1698 DCHECK(is_uint16(j));
1699 GenInstrImmediate(XORI, rs, rt, j);
1700 }
1701
1702
nor(Register rd,Register rs,Register rt)1703 void Assembler::nor(Register rd, Register rs, Register rt) {
1704 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1705 }
1706
1707
1708 // Shifts.
sll(Register rd,Register rt,uint16_t sa,bool coming_from_nop)1709 void Assembler::sll(Register rd,
1710 Register rt,
1711 uint16_t sa,
1712 bool coming_from_nop) {
1713 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1714 // generated using the sll instruction. They must be generated using
1715 // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1716 // instructions.
1717 DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1718 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
1719 }
1720
1721
sllv(Register rd,Register rt,Register rs)1722 void Assembler::sllv(Register rd, Register rt, Register rs) {
1723 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1724 }
1725
1726
srl(Register rd,Register rt,uint16_t sa)1727 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1728 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
1729 }
1730
1731
srlv(Register rd,Register rt,Register rs)1732 void Assembler::srlv(Register rd, Register rt, Register rs) {
1733 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1734 }
1735
1736
sra(Register rd,Register rt,uint16_t sa)1737 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1738 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
1739 }
1740
1741
srav(Register rd,Register rt,Register rs)1742 void Assembler::srav(Register rd, Register rt, Register rs) {
1743 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1744 }
1745
1746
rotr(Register rd,Register rt,uint16_t sa)1747 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1748 // Should be called via MacroAssembler::Ror.
1749 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1750 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1751 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1752 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1753 emit(instr);
1754 }
1755
1756
rotrv(Register rd,Register rt,Register rs)1757 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1758 // Should be called via MacroAssembler::Ror.
1759 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1760 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1761 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1762 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1763 emit(instr);
1764 }
1765
1766
lsa(Register rd,Register rt,Register rs,uint8_t sa)1767 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
1768 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1769 DCHECK(sa <= 3);
1770 DCHECK(IsMipsArchVariant(kMips32r6));
1771 Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
1772 rd.code() << kRdShift | sa << kSaShift | LSA;
1773 emit(instr);
1774 }
1775
1776
1777 // ------------Memory-instructions-------------
1778
1779 // Helper for base-reg + offset, when offset is larger than int16.
LoadRegPlusOffsetToAt(const MemOperand & src)1780 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1781 DCHECK(!src.rm().is(at));
1782 lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
1783 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1784 addu(at, at, src.rm()); // Add base register.
1785 }
1786
1787 // Helper for base-reg + upper part of offset, when offset is larger than int16.
1788 // Loads higher part of the offset to AT register.
1789 // Returns lower part of the offset to be used as offset
1790 // in Load/Store instructions
LoadRegPlusUpperOffsetPartToAt(const MemOperand & src)1791 int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
1792 DCHECK(!src.rm().is(at));
1793 int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
1794 // If the highest bit of the lower part of the offset is 1, this would make
1795 // the offset in the load/store instruction negative. We need to compensate
1796 // for this by adding 1 to the upper part of the offset.
1797 if (src.offset_ & kNegOffset) {
1798 hi += 1;
1799 }
1800 lui(at, hi);
1801 addu(at, at, src.rm());
1802 return (src.offset_ & kImm16Mask);
1803 }
1804
1805 // Helper for loading base-reg + upper offset's part to AT reg when we are using
1806 // two 32-bit loads/stores instead of one 64-bit
LoadUpperOffsetForTwoMemoryAccesses(const MemOperand & src)1807 int32_t Assembler::LoadUpperOffsetForTwoMemoryAccesses(const MemOperand& src) {
1808 DCHECK(!src.rm().is(at));
1809 if (is_int16((src.offset_ & kImm16Mask) + kIntSize)) {
1810 // Only if lower part of offset + kIntSize fits in 16bits
1811 return LoadRegPlusUpperOffsetPartToAt(src);
1812 }
1813 // In case offset's lower part + kIntSize doesn't fit in 16bits,
1814 // load reg + hole offset to AT
1815 LoadRegPlusOffsetToAt(src);
1816 return 0;
1817 }
1818
lb(Register rd,const MemOperand & rs)1819 void Assembler::lb(Register rd, const MemOperand& rs) {
1820 if (is_int16(rs.offset_)) {
1821 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1822 } else { // Offset > 16 bits, use multiple instructions to load.
1823 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1824 GenInstrImmediate(LB, at, rd, off16);
1825 }
1826 }
1827
1828
lbu(Register rd,const MemOperand & rs)1829 void Assembler::lbu(Register rd, const MemOperand& rs) {
1830 if (is_int16(rs.offset_)) {
1831 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1832 } else { // Offset > 16 bits, use multiple instructions to load.
1833 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1834 GenInstrImmediate(LBU, at, rd, off16);
1835 }
1836 }
1837
1838
lh(Register rd,const MemOperand & rs)1839 void Assembler::lh(Register rd, const MemOperand& rs) {
1840 if (is_int16(rs.offset_)) {
1841 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1842 } else { // Offset > 16 bits, use multiple instructions to load.
1843 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1844 GenInstrImmediate(LH, at, rd, off16);
1845 }
1846 }
1847
1848
lhu(Register rd,const MemOperand & rs)1849 void Assembler::lhu(Register rd, const MemOperand& rs) {
1850 if (is_int16(rs.offset_)) {
1851 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1852 } else { // Offset > 16 bits, use multiple instructions to load.
1853 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1854 GenInstrImmediate(LHU, at, rd, off16);
1855 }
1856 }
1857
1858
lw(Register rd,const MemOperand & rs)1859 void Assembler::lw(Register rd, const MemOperand& rs) {
1860 if (is_int16(rs.offset_)) {
1861 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1862 } else { // Offset > 16 bits, use multiple instructions to load.
1863 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1864 GenInstrImmediate(LW, at, rd, off16);
1865 }
1866 }
1867
1868
lwl(Register rd,const MemOperand & rs)1869 void Assembler::lwl(Register rd, const MemOperand& rs) {
1870 DCHECK(is_int16(rs.offset_));
1871 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1872 IsMipsArchVariant(kMips32r2));
1873 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1874 }
1875
1876
lwr(Register rd,const MemOperand & rs)1877 void Assembler::lwr(Register rd, const MemOperand& rs) {
1878 DCHECK(is_int16(rs.offset_));
1879 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1880 IsMipsArchVariant(kMips32r2));
1881 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1882 }
1883
1884
sb(Register rd,const MemOperand & rs)1885 void Assembler::sb(Register rd, const MemOperand& rs) {
1886 if (is_int16(rs.offset_)) {
1887 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1888 } else { // Offset > 16 bits, use multiple instructions to store.
1889 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1890 GenInstrImmediate(SB, at, rd, off16);
1891 }
1892 }
1893
1894
sh(Register rd,const MemOperand & rs)1895 void Assembler::sh(Register rd, const MemOperand& rs) {
1896 if (is_int16(rs.offset_)) {
1897 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1898 } else { // Offset > 16 bits, use multiple instructions to store.
1899 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1900 GenInstrImmediate(SH, at, rd, off16);
1901 }
1902 }
1903
1904
sw(Register rd,const MemOperand & rs)1905 void Assembler::sw(Register rd, const MemOperand& rs) {
1906 if (is_int16(rs.offset_)) {
1907 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1908 } else { // Offset > 16 bits, use multiple instructions to store.
1909 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1910 GenInstrImmediate(SW, at, rd, off16);
1911 }
1912 }
1913
1914
swl(Register rd,const MemOperand & rs)1915 void Assembler::swl(Register rd, const MemOperand& rs) {
1916 DCHECK(is_int16(rs.offset_));
1917 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1918 IsMipsArchVariant(kMips32r2));
1919 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1920 }
1921
1922
swr(Register rd,const MemOperand & rs)1923 void Assembler::swr(Register rd, const MemOperand& rs) {
1924 DCHECK(is_int16(rs.offset_));
1925 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1926 IsMipsArchVariant(kMips32r2));
1927 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1928 }
1929
1930
lui(Register rd,int32_t j)1931 void Assembler::lui(Register rd, int32_t j) {
1932 DCHECK(is_uint16(j));
1933 GenInstrImmediate(LUI, zero_reg, rd, j);
1934 }
1935
1936
aui(Register rt,Register rs,int32_t j)1937 void Assembler::aui(Register rt, Register rs, int32_t j) {
1938 // This instruction uses same opcode as 'lui'. The difference in encoding is
1939 // 'lui' has zero reg. for rs field.
1940 DCHECK(!(rs.is(zero_reg)));
1941 DCHECK(is_uint16(j));
1942 GenInstrImmediate(LUI, rs, rt, j);
1943 }
1944
1945 // ---------PC-Relative instructions-----------
1946
addiupc(Register rs,int32_t imm19)1947 void Assembler::addiupc(Register rs, int32_t imm19) {
1948 DCHECK(IsMipsArchVariant(kMips32r6));
1949 DCHECK(rs.is_valid() && is_int19(imm19));
1950 uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
1951 GenInstrImmediate(PCREL, rs, imm21);
1952 }
1953
1954
lwpc(Register rs,int32_t offset19)1955 void Assembler::lwpc(Register rs, int32_t offset19) {
1956 DCHECK(IsMipsArchVariant(kMips32r6));
1957 DCHECK(rs.is_valid() && is_int19(offset19));
1958 uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
1959 GenInstrImmediate(PCREL, rs, imm21);
1960 }
1961
1962
auipc(Register rs,int16_t imm16)1963 void Assembler::auipc(Register rs, int16_t imm16) {
1964 DCHECK(IsMipsArchVariant(kMips32r6));
1965 DCHECK(rs.is_valid());
1966 uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
1967 GenInstrImmediate(PCREL, rs, imm21);
1968 }
1969
1970
aluipc(Register rs,int16_t imm16)1971 void Assembler::aluipc(Register rs, int16_t imm16) {
1972 DCHECK(IsMipsArchVariant(kMips32r6));
1973 DCHECK(rs.is_valid());
1974 uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
1975 GenInstrImmediate(PCREL, rs, imm21);
1976 }
1977
1978
1979 // -------------Misc-instructions--------------
1980
1981 // Break / Trap instructions.
break_(uint32_t code,bool break_as_stop)1982 void Assembler::break_(uint32_t code, bool break_as_stop) {
1983 DCHECK((code & ~0xfffff) == 0);
1984 // We need to invalidate breaks that could be stops as well because the
1985 // simulator expects a char pointer after the stop instruction.
1986 // See constants-mips.h for explanation.
1987 DCHECK((break_as_stop &&
1988 code <= kMaxStopCode &&
1989 code > kMaxWatchpointCode) ||
1990 (!break_as_stop &&
1991 (code > kMaxStopCode ||
1992 code <= kMaxWatchpointCode)));
1993 Instr break_instr = SPECIAL | BREAK | (code << 6);
1994 emit(break_instr);
1995 }
1996
1997
stop(const char * msg,uint32_t code)1998 void Assembler::stop(const char* msg, uint32_t code) {
1999 DCHECK(code > kMaxWatchpointCode);
2000 DCHECK(code <= kMaxStopCode);
2001 #if V8_HOST_ARCH_MIPS
2002 break_(0x54321);
2003 #else // V8_HOST_ARCH_MIPS
2004 BlockTrampolinePoolFor(2);
2005 // The Simulator will handle the stop instruction and get the message address.
2006 // On MIPS stop() is just a special kind of break_().
2007 break_(code, true);
2008 // Do not embed the message string address! We used to do this, but that
2009 // made snapshots created from position-independent executable builds
2010 // non-deterministic.
2011 // TODO(yangguo): remove this field entirely.
2012 nop();
2013 #endif
2014 }
2015
2016
tge(Register rs,Register rt,uint16_t code)2017 void Assembler::tge(Register rs, Register rt, uint16_t code) {
2018 DCHECK(is_uint10(code));
2019 Instr instr = SPECIAL | TGE | rs.code() << kRsShift
2020 | rt.code() << kRtShift | code << 6;
2021 emit(instr);
2022 }
2023
2024
tgeu(Register rs,Register rt,uint16_t code)2025 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2026 DCHECK(is_uint10(code));
2027 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
2028 | rt.code() << kRtShift | code << 6;
2029 emit(instr);
2030 }
2031
2032
tlt(Register rs,Register rt,uint16_t code)2033 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2034 DCHECK(is_uint10(code));
2035 Instr instr =
2036 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2037 emit(instr);
2038 }
2039
2040
tltu(Register rs,Register rt,uint16_t code)2041 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2042 DCHECK(is_uint10(code));
2043 Instr instr =
2044 SPECIAL | TLTU | rs.code() << kRsShift
2045 | rt.code() << kRtShift | code << 6;
2046 emit(instr);
2047 }
2048
2049
teq(Register rs,Register rt,uint16_t code)2050 void Assembler::teq(Register rs, Register rt, uint16_t code) {
2051 DCHECK(is_uint10(code));
2052 Instr instr =
2053 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2054 emit(instr);
2055 }
2056
2057
tne(Register rs,Register rt,uint16_t code)2058 void Assembler::tne(Register rs, Register rt, uint16_t code) {
2059 DCHECK(is_uint10(code));
2060 Instr instr =
2061 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2062 emit(instr);
2063 }
2064
sync()2065 void Assembler::sync() {
2066 Instr sync_instr = SPECIAL | SYNC;
2067 emit(sync_instr);
2068 }
2069
2070 // Move from HI/LO register.
2071
mfhi(Register rd)2072 void Assembler::mfhi(Register rd) {
2073 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2074 }
2075
2076
mflo(Register rd)2077 void Assembler::mflo(Register rd) {
2078 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2079 }
2080
2081
2082 // Set on less than instructions.
slt(Register rd,Register rs,Register rt)2083 void Assembler::slt(Register rd, Register rs, Register rt) {
2084 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2085 }
2086
2087
sltu(Register rd,Register rs,Register rt)2088 void Assembler::sltu(Register rd, Register rs, Register rt) {
2089 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2090 }
2091
2092
slti(Register rt,Register rs,int32_t j)2093 void Assembler::slti(Register rt, Register rs, int32_t j) {
2094 GenInstrImmediate(SLTI, rs, rt, j);
2095 }
2096
2097
sltiu(Register rt,Register rs,int32_t j)2098 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2099 GenInstrImmediate(SLTIU, rs, rt, j);
2100 }
2101
2102
2103 // Conditional move.
movz(Register rd,Register rs,Register rt)2104 void Assembler::movz(Register rd, Register rs, Register rt) {
2105 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2106 }
2107
2108
movn(Register rd,Register rs,Register rt)2109 void Assembler::movn(Register rd, Register rs, Register rt) {
2110 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2111 }
2112
2113
movt(Register rd,Register rs,uint16_t cc)2114 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2115 Register rt;
2116 rt.reg_code = (cc & 0x0007) << 2 | 1;
2117 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2118 }
2119
2120
movf(Register rd,Register rs,uint16_t cc)2121 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2122 Register rt;
2123 rt.reg_code = (cc & 0x0007) << 2 | 0;
2124 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2125 }
2126
2127
seleqz(Register rd,Register rs,Register rt)2128 void Assembler::seleqz(Register rd, Register rs, Register rt) {
2129 DCHECK(IsMipsArchVariant(kMips32r6));
2130 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2131 }
2132
2133
2134 // Bit twiddling.
clz(Register rd,Register rs)2135 void Assembler::clz(Register rd, Register rs) {
2136 if (!IsMipsArchVariant(kMips32r6)) {
2137 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2138 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2139 } else {
2140 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2141 }
2142 }
2143
2144
ins_(Register rt,Register rs,uint16_t pos,uint16_t size)2145 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2146 // Should be called via MacroAssembler::Ins.
2147 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2148 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2149 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2150 }
2151
2152
ext_(Register rt,Register rs,uint16_t pos,uint16_t size)2153 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2154 // Should be called via MacroAssembler::Ext.
2155 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2156 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2157 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2158 }
2159
2160
bitswap(Register rd,Register rt)2161 void Assembler::bitswap(Register rd, Register rt) {
2162 DCHECK(IsMipsArchVariant(kMips32r6));
2163 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2164 }
2165
2166
pref(int32_t hint,const MemOperand & rs)2167 void Assembler::pref(int32_t hint, const MemOperand& rs) {
2168 DCHECK(!IsMipsArchVariant(kLoongson));
2169 DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2170 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2171 | (rs.offset_);
2172 emit(instr);
2173 }
2174
2175
align(Register rd,Register rs,Register rt,uint8_t bp)2176 void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2177 DCHECK(IsMipsArchVariant(kMips32r6));
2178 DCHECK(is_uint3(bp));
2179 uint16_t sa = (ALIGN << kBp2Bits) | bp;
2180 GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2181 }
2182
2183 // Byte swap.
wsbh(Register rd,Register rt)2184 void Assembler::wsbh(Register rd, Register rt) {
2185 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2186 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
2187 }
2188
seh(Register rd,Register rt)2189 void Assembler::seh(Register rd, Register rt) {
2190 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2191 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
2192 }
2193
seb(Register rd,Register rt)2194 void Assembler::seb(Register rd, Register rt) {
2195 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2196 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
2197 }
2198
2199 // --------Coprocessor-instructions----------------
2200
2201 // Load, store, move.
lwc1(FPURegister fd,const MemOperand & src)2202 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2203 if (is_int16(src.offset_)) {
2204 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2205 } else { // Offset > 16 bits, use multiple instructions to load.
2206 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
2207 GenInstrImmediate(LWC1, at, fd, off16);
2208 }
2209 }
2210
2211
ldc1(FPURegister fd,const MemOperand & src)2212 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
2213 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
2214 // load to two 32-bit loads.
2215 if (IsFp32Mode()) { // fp32 mode.
2216 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2217 GenInstrImmediate(LWC1, src.rm(), fd,
2218 src.offset_ + Register::kMantissaOffset);
2219 FPURegister nextfpreg;
2220 nextfpreg.setcode(fd.code() + 1);
2221 GenInstrImmediate(LWC1, src.rm(), nextfpreg,
2222 src.offset_ + Register::kExponentOffset);
2223 } else { // Offset > 16 bits, use multiple instructions to load.
2224 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
2225 GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset);
2226 FPURegister nextfpreg;
2227 nextfpreg.setcode(fd.code() + 1);
2228 GenInstrImmediate(LWC1, at, nextfpreg, off16 + Register::kExponentOffset);
2229 }
2230 } else {
2231 DCHECK(IsFp64Mode() || IsFpxxMode());
2232 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
2233 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2234 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2235 GenInstrImmediate(LWC1, src.rm(), fd,
2236 src.offset_ + Register::kMantissaOffset);
2237 GenInstrImmediate(LW, src.rm(), at,
2238 src.offset_ + Register::kExponentOffset);
2239 mthc1(at, fd);
2240 } else { // Offset > 16 bits, use multiple instructions to load.
2241 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
2242 GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset);
2243 GenInstrImmediate(LW, at, at, off16 + Register::kExponentOffset);
2244 mthc1(at, fd);
2245 }
2246 }
2247 }
2248
2249
swc1(FPURegister fd,const MemOperand & src)2250 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
2251 if (is_int16(src.offset_)) {
2252 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
2253 } else { // Offset > 16 bits, use multiple instructions to load.
2254 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
2255 GenInstrImmediate(SWC1, at, fd, off16);
2256 }
2257 }
2258
2259
sdc1(FPURegister fd,const MemOperand & src)2260 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
2261 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
2262 // store to two 32-bit stores.
2263 DCHECK(!src.rm().is(at));
2264 DCHECK(!src.rm().is(t8));
2265 if (IsFp32Mode()) { // fp32 mode.
2266 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2267 GenInstrImmediate(SWC1, src.rm(), fd,
2268 src.offset_ + Register::kMantissaOffset);
2269 FPURegister nextfpreg;
2270 nextfpreg.setcode(fd.code() + 1);
2271 GenInstrImmediate(SWC1, src.rm(), nextfpreg,
2272 src.offset_ + Register::kExponentOffset);
2273 } else { // Offset > 16 bits, use multiple instructions to load.
2274 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
2275 GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset);
2276 FPURegister nextfpreg;
2277 nextfpreg.setcode(fd.code() + 1);
2278 GenInstrImmediate(SWC1, at, nextfpreg, off16 + Register::kExponentOffset);
2279 }
2280 } else {
2281 DCHECK(IsFp64Mode() || IsFpxxMode());
2282 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
2283 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2284 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2285 GenInstrImmediate(SWC1, src.rm(), fd,
2286 src.offset_ + Register::kMantissaOffset);
2287 mfhc1(at, fd);
2288 GenInstrImmediate(SW, src.rm(), at,
2289 src.offset_ + Register::kExponentOffset);
2290 } else { // Offset > 16 bits, use multiple instructions to load.
2291 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
2292 GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset);
2293 mfhc1(t8, fd);
2294 GenInstrImmediate(SW, at, t8, off16 + Register::kExponentOffset);
2295 }
2296 }
2297 }
2298
2299
mtc1(Register rt,FPURegister fs)2300 void Assembler::mtc1(Register rt, FPURegister fs) {
2301 GenInstrRegister(COP1, MTC1, rt, fs, f0);
2302 }
2303
2304
mthc1(Register rt,FPURegister fs)2305 void Assembler::mthc1(Register rt, FPURegister fs) {
2306 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2307 }
2308
2309
mfc1(Register rt,FPURegister fs)2310 void Assembler::mfc1(Register rt, FPURegister fs) {
2311 GenInstrRegister(COP1, MFC1, rt, fs, f0);
2312 }
2313
2314
mfhc1(Register rt,FPURegister fs)2315 void Assembler::mfhc1(Register rt, FPURegister fs) {
2316 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2317 }
2318
2319
ctc1(Register rt,FPUControlRegister fs)2320 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2321 GenInstrRegister(COP1, CTC1, rt, fs);
2322 }
2323
2324
cfc1(Register rt,FPUControlRegister fs)2325 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2326 GenInstrRegister(COP1, CFC1, rt, fs);
2327 }
2328
2329
DoubleAsTwoUInt32(double d,uint32_t * lo,uint32_t * hi)2330 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2331 uint64_t i;
2332 memcpy(&i, &d, 8);
2333
2334 *lo = i & 0xffffffff;
2335 *hi = i >> 32;
2336 }
2337
2338
movn_s(FPURegister fd,FPURegister fs,Register rt)2339 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
2340 DCHECK(!IsMipsArchVariant(kMips32r6));
2341 GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
2342 }
2343
2344
movn_d(FPURegister fd,FPURegister fs,Register rt)2345 void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
2346 DCHECK(!IsMipsArchVariant(kMips32r6));
2347 GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
2348 }
2349
2350
sel(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2351 void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
2352 FPURegister ft) {
2353 DCHECK(IsMipsArchVariant(kMips32r6));
2354 DCHECK((fmt == D) || (fmt == S));
2355
2356 GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
2357 }
2358
2359
sel_s(FPURegister fd,FPURegister fs,FPURegister ft)2360 void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2361 sel(S, fd, fs, ft);
2362 }
2363
2364
sel_d(FPURegister fd,FPURegister fs,FPURegister ft)2365 void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2366 sel(D, fd, fs, ft);
2367 }
2368
2369
seleqz(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2370 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
2371 FPURegister ft) {
2372 DCHECK(IsMipsArchVariant(kMips32r6));
2373 DCHECK((fmt == D) || (fmt == S));
2374 GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
2375 }
2376
2377
selnez(Register rd,Register rs,Register rt)2378 void Assembler::selnez(Register rd, Register rs, Register rt) {
2379 DCHECK(IsMipsArchVariant(kMips32r6));
2380 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2381 }
2382
2383
selnez(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2384 void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
2385 FPURegister ft) {
2386 DCHECK(IsMipsArchVariant(kMips32r6));
2387 DCHECK((fmt == D) || (fmt == S));
2388 GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
2389 }
2390
2391
seleqz_d(FPURegister fd,FPURegister fs,FPURegister ft)2392 void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2393 seleqz(D, fd, fs, ft);
2394 }
2395
2396
seleqz_s(FPURegister fd,FPURegister fs,FPURegister ft)2397 void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2398 seleqz(S, fd, fs, ft);
2399 }
2400
2401
selnez_d(FPURegister fd,FPURegister fs,FPURegister ft)2402 void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2403 selnez(D, fd, fs, ft);
2404 }
2405
2406
selnez_s(FPURegister fd,FPURegister fs,FPURegister ft)2407 void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2408 selnez(S, fd, fs, ft);
2409 }
2410
2411
movz_s(FPURegister fd,FPURegister fs,Register rt)2412 void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
2413 DCHECK(!IsMipsArchVariant(kMips32r6));
2414 GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
2415 }
2416
2417
movz_d(FPURegister fd,FPURegister fs,Register rt)2418 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
2419 DCHECK(!IsMipsArchVariant(kMips32r6));
2420 GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
2421 }
2422
2423
movt_s(FPURegister fd,FPURegister fs,uint16_t cc)2424 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2425 DCHECK(!IsMipsArchVariant(kMips32r6));
2426 FPURegister ft;
2427 ft.reg_code = (cc & 0x0007) << 2 | 1;
2428 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2429 }
2430
2431
movt_d(FPURegister fd,FPURegister fs,uint16_t cc)2432 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2433 DCHECK(!IsMipsArchVariant(kMips32r6));
2434 FPURegister ft;
2435 ft.reg_code = (cc & 0x0007) << 2 | 1;
2436 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2437 }
2438
2439
movf_s(FPURegister fd,FPURegister fs,uint16_t cc)2440 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2441 DCHECK(!IsMipsArchVariant(kMips32r6));
2442 FPURegister ft;
2443 ft.reg_code = (cc & 0x0007) << 2 | 0;
2444 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2445 }
2446
2447
movf_d(FPURegister fd,FPURegister fs,uint16_t cc)2448 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2449 DCHECK(!IsMipsArchVariant(kMips32r6));
2450 FPURegister ft;
2451 ft.reg_code = (cc & 0x0007) << 2 | 0;
2452 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2453 }
2454
2455
2456 // Arithmetic.
2457
add_s(FPURegister fd,FPURegister fs,FPURegister ft)2458 void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2459 GenInstrRegister(COP1, S, ft, fs, fd, ADD_S);
2460 }
2461
2462
add_d(FPURegister fd,FPURegister fs,FPURegister ft)2463 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2464 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2465 }
2466
2467
sub_s(FPURegister fd,FPURegister fs,FPURegister ft)2468 void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2469 GenInstrRegister(COP1, S, ft, fs, fd, SUB_S);
2470 }
2471
2472
sub_d(FPURegister fd,FPURegister fs,FPURegister ft)2473 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2474 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2475 }
2476
2477
mul_s(FPURegister fd,FPURegister fs,FPURegister ft)2478 void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2479 GenInstrRegister(COP1, S, ft, fs, fd, MUL_S);
2480 }
2481
2482
mul_d(FPURegister fd,FPURegister fs,FPURegister ft)2483 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2484 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2485 }
2486
madd_s(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2487 void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2488 FPURegister ft) {
2489 DCHECK(IsMipsArchVariant(kMips32r2));
2490 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S);
2491 }
2492
madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2493 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2494 FPURegister ft) {
2495 DCHECK(IsMipsArchVariant(kMips32r2));
2496 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2497 }
2498
msub_s(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2499 void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2500 FPURegister ft) {
2501 DCHECK(IsMipsArchVariant(kMips32r2));
2502 GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S);
2503 }
2504
msub_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2505 void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2506 FPURegister ft) {
2507 DCHECK(IsMipsArchVariant(kMips32r2));
2508 GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D);
2509 }
2510
maddf_s(FPURegister fd,FPURegister fs,FPURegister ft)2511 void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2512 DCHECK(IsMipsArchVariant(kMips32r6));
2513 GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
2514 }
2515
maddf_d(FPURegister fd,FPURegister fs,FPURegister ft)2516 void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2517 DCHECK(IsMipsArchVariant(kMips32r6));
2518 GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
2519 }
2520
msubf_s(FPURegister fd,FPURegister fs,FPURegister ft)2521 void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2522 DCHECK(IsMipsArchVariant(kMips32r6));
2523 GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
2524 }
2525
msubf_d(FPURegister fd,FPURegister fs,FPURegister ft)2526 void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2527 DCHECK(IsMipsArchVariant(kMips32r6));
2528 GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
2529 }
2530
div_s(FPURegister fd,FPURegister fs,FPURegister ft)2531 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2532 GenInstrRegister(COP1, S, ft, fs, fd, DIV_S);
2533 }
2534
2535
div_d(FPURegister fd,FPURegister fs,FPURegister ft)2536 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2537 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2538 }
2539
2540
abs_s(FPURegister fd,FPURegister fs)2541 void Assembler::abs_s(FPURegister fd, FPURegister fs) {
2542 GenInstrRegister(COP1, S, f0, fs, fd, ABS_S);
2543 }
2544
2545
abs_d(FPURegister fd,FPURegister fs)2546 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2547 GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2548 }
2549
2550
mov_d(FPURegister fd,FPURegister fs)2551 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2552 GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2553 }
2554
2555
mov_s(FPURegister fd,FPURegister fs)2556 void Assembler::mov_s(FPURegister fd, FPURegister fs) {
2557 GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
2558 }
2559
2560
neg_s(FPURegister fd,FPURegister fs)2561 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
2562 GenInstrRegister(COP1, S, f0, fs, fd, NEG_S);
2563 }
2564
2565
neg_d(FPURegister fd,FPURegister fs)2566 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2567 GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2568 }
2569
2570
sqrt_s(FPURegister fd,FPURegister fs)2571 void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
2572 GenInstrRegister(COP1, S, f0, fs, fd, SQRT_S);
2573 }
2574
2575
sqrt_d(FPURegister fd,FPURegister fs)2576 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2577 GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2578 }
2579
2580
rsqrt_s(FPURegister fd,FPURegister fs)2581 void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
2582 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2583 GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
2584 }
2585
2586
rsqrt_d(FPURegister fd,FPURegister fs)2587 void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
2588 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2589 GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
2590 }
2591
2592
recip_d(FPURegister fd,FPURegister fs)2593 void Assembler::recip_d(FPURegister fd, FPURegister fs) {
2594 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2595 GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
2596 }
2597
2598
recip_s(FPURegister fd,FPURegister fs)2599 void Assembler::recip_s(FPURegister fd, FPURegister fs) {
2600 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2601 GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
2602 }
2603
2604
2605 // Conversions.
2606
cvt_w_s(FPURegister fd,FPURegister fs)2607 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2608 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2609 }
2610
2611
cvt_w_d(FPURegister fd,FPURegister fs)2612 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2613 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2614 }
2615
2616
trunc_w_s(FPURegister fd,FPURegister fs)2617 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2618 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2619 }
2620
2621
trunc_w_d(FPURegister fd,FPURegister fs)2622 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2623 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2624 }
2625
2626
round_w_s(FPURegister fd,FPURegister fs)2627 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2628 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2629 }
2630
2631
round_w_d(FPURegister fd,FPURegister fs)2632 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2633 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2634 }
2635
2636
floor_w_s(FPURegister fd,FPURegister fs)2637 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2638 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2639 }
2640
2641
floor_w_d(FPURegister fd,FPURegister fs)2642 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2643 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2644 }
2645
2646
ceil_w_s(FPURegister fd,FPURegister fs)2647 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2648 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2649 }
2650
2651
ceil_w_d(FPURegister fd,FPURegister fs)2652 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2653 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2654 }
2655
2656
rint_s(FPURegister fd,FPURegister fs)2657 void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
2658
2659
rint(SecondaryField fmt,FPURegister fd,FPURegister fs)2660 void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
2661 DCHECK(IsMipsArchVariant(kMips32r6));
2662 DCHECK((fmt == D) || (fmt == S));
2663 GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
2664 }
2665
2666
rint_d(FPURegister fd,FPURegister fs)2667 void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
2668
2669
cvt_l_s(FPURegister fd,FPURegister fs)2670 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2671 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2672 IsFp64Mode());
2673 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2674 }
2675
2676
cvt_l_d(FPURegister fd,FPURegister fs)2677 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2678 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2679 IsFp64Mode());
2680 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2681 }
2682
2683
trunc_l_s(FPURegister fd,FPURegister fs)2684 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2685 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2686 IsFp64Mode());
2687 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2688 }
2689
2690
trunc_l_d(FPURegister fd,FPURegister fs)2691 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2692 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2693 IsFp64Mode());
2694 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2695 }
2696
2697
round_l_s(FPURegister fd,FPURegister fs)2698 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2699 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2700 IsFp64Mode());
2701 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2702 }
2703
2704
round_l_d(FPURegister fd,FPURegister fs)2705 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2706 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2707 IsFp64Mode());
2708 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2709 }
2710
2711
floor_l_s(FPURegister fd,FPURegister fs)2712 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2713 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2714 IsFp64Mode());
2715 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2716 }
2717
2718
floor_l_d(FPURegister fd,FPURegister fs)2719 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
2720 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2721 IsFp64Mode());
2722 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
2723 }
2724
2725
ceil_l_s(FPURegister fd,FPURegister fs)2726 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2727 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2728 IsFp64Mode());
2729 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2730 }
2731
2732
ceil_l_d(FPURegister fd,FPURegister fs)2733 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2734 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2735 IsFp64Mode());
2736 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2737 }
2738
2739
class_s(FPURegister fd,FPURegister fs)2740 void Assembler::class_s(FPURegister fd, FPURegister fs) {
2741 DCHECK(IsMipsArchVariant(kMips32r6));
2742 GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
2743 }
2744
2745
class_d(FPURegister fd,FPURegister fs)2746 void Assembler::class_d(FPURegister fd, FPURegister fs) {
2747 DCHECK(IsMipsArchVariant(kMips32r6));
2748 GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
2749 }
2750
2751
min(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2752 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
2753 FPURegister ft) {
2754 DCHECK(IsMipsArchVariant(kMips32r6));
2755 DCHECK((fmt == D) || (fmt == S));
2756 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2757 }
2758
2759
mina(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2760 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
2761 FPURegister ft) {
2762 DCHECK(IsMipsArchVariant(kMips32r6));
2763 DCHECK((fmt == D) || (fmt == S));
2764 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2765 }
2766
2767
max(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2768 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
2769 FPURegister ft) {
2770 DCHECK(IsMipsArchVariant(kMips32r6));
2771 DCHECK((fmt == D) || (fmt == S));
2772 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2773 }
2774
2775
maxa(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2776 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
2777 FPURegister ft) {
2778 DCHECK(IsMipsArchVariant(kMips32r6));
2779 DCHECK((fmt == D) || (fmt == S));
2780 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2781 }
2782
2783
min_s(FPURegister fd,FPURegister fs,FPURegister ft)2784 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2785 min(S, fd, fs, ft);
2786 }
2787
2788
min_d(FPURegister fd,FPURegister fs,FPURegister ft)2789 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2790 min(D, fd, fs, ft);
2791 }
2792
2793
max_s(FPURegister fd,FPURegister fs,FPURegister ft)2794 void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2795 max(S, fd, fs, ft);
2796 }
2797
2798
max_d(FPURegister fd,FPURegister fs,FPURegister ft)2799 void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2800 max(D, fd, fs, ft);
2801 }
2802
2803
mina_s(FPURegister fd,FPURegister fs,FPURegister ft)2804 void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2805 mina(S, fd, fs, ft);
2806 }
2807
2808
mina_d(FPURegister fd,FPURegister fs,FPURegister ft)2809 void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2810 mina(D, fd, fs, ft);
2811 }
2812
2813
maxa_s(FPURegister fd,FPURegister fs,FPURegister ft)2814 void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2815 maxa(S, fd, fs, ft);
2816 }
2817
2818
maxa_d(FPURegister fd,FPURegister fs,FPURegister ft)2819 void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2820 maxa(D, fd, fs, ft);
2821 }
2822
2823
cvt_s_w(FPURegister fd,FPURegister fs)2824 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
2825 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
2826 }
2827
2828
cvt_s_l(FPURegister fd,FPURegister fs)2829 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
2830 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2831 IsFp64Mode());
2832 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
2833 }
2834
2835
cvt_s_d(FPURegister fd,FPURegister fs)2836 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
2837 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
2838 }
2839
2840
cvt_d_w(FPURegister fd,FPURegister fs)2841 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
2842 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
2843 }
2844
2845
cvt_d_l(FPURegister fd,FPURegister fs)2846 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
2847 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2848 IsFp64Mode());
2849 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
2850 }
2851
2852
cvt_d_s(FPURegister fd,FPURegister fs)2853 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
2854 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
2855 }
2856
2857
2858 // Conditions for >= MIPSr6.
cmp(FPUCondition cond,SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2859 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
2860 FPURegister fd, FPURegister fs, FPURegister ft) {
2861 DCHECK(IsMipsArchVariant(kMips32r6));
2862 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2863 Instr instr = COP1 | fmt | ft.code() << kFtShift |
2864 fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
2865 emit(instr);
2866 }
2867
2868
cmp_s(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)2869 void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
2870 FPURegister ft) {
2871 cmp(cond, W, fd, fs, ft);
2872 }
2873
cmp_d(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)2874 void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
2875 FPURegister ft) {
2876 cmp(cond, L, fd, fs, ft);
2877 }
2878
2879
bc1eqz(int16_t offset,FPURegister ft)2880 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
2881 DCHECK(IsMipsArchVariant(kMips32r6));
2882 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
2883 emit(instr);
2884 }
2885
2886
bc1nez(int16_t offset,FPURegister ft)2887 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
2888 DCHECK(IsMipsArchVariant(kMips32r6));
2889 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
2890 emit(instr);
2891 }
2892
2893
2894 // Conditions for < MIPSr6.
c(FPUCondition cond,SecondaryField fmt,FPURegister fs,FPURegister ft,uint16_t cc)2895 void Assembler::c(FPUCondition cond, SecondaryField fmt,
2896 FPURegister fs, FPURegister ft, uint16_t cc) {
2897 DCHECK(is_uint3(cc));
2898 DCHECK(fmt == S || fmt == D);
2899 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2900 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
2901 | cc << 8 | 3 << 4 | cond;
2902 emit(instr);
2903 }
2904
2905
c_s(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)2906 void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
2907 uint16_t cc) {
2908 c(cond, S, fs, ft, cc);
2909 }
2910
2911
c_d(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)2912 void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
2913 uint16_t cc) {
2914 c(cond, D, fs, ft, cc);
2915 }
2916
2917
fcmp(FPURegister src1,const double src2,FPUCondition cond)2918 void Assembler::fcmp(FPURegister src1, const double src2,
2919 FPUCondition cond) {
2920 DCHECK(src2 == 0.0);
2921 mtc1(zero_reg, f14);
2922 cvt_d_w(f14, f14);
2923 c(cond, D, src1, f14, 0);
2924 }
2925
2926
bc1f(int16_t offset,uint16_t cc)2927 void Assembler::bc1f(int16_t offset, uint16_t cc) {
2928 DCHECK(is_uint3(cc));
2929 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
2930 emit(instr);
2931 }
2932
2933
bc1t(int16_t offset,uint16_t cc)2934 void Assembler::bc1t(int16_t offset, uint16_t cc) {
2935 DCHECK(is_uint3(cc));
2936 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
2937 emit(instr);
2938 }
2939
2940
RelocateInternalReference(RelocInfo::Mode rmode,byte * pc,intptr_t pc_delta)2941 int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
2942 intptr_t pc_delta) {
2943 Instr instr = instr_at(pc);
2944
2945 if (RelocInfo::IsInternalReference(rmode)) {
2946 int32_t* p = reinterpret_cast<int32_t*>(pc);
2947 if (*p == 0) {
2948 return 0; // Number of instructions patched.
2949 }
2950 *p += pc_delta;
2951 return 1; // Number of instructions patched.
2952 } else {
2953 DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
2954 if (IsLui(instr)) {
2955 Instr instr1 = instr_at(pc + 0 * Assembler::kInstrSize);
2956 Instr instr2 = instr_at(pc + 1 * Assembler::kInstrSize);
2957 DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
2958 int32_t imm;
2959 if (IsJicOrJialc(instr2)) {
2960 imm = CreateTargetAddress(instr1, instr2);
2961 } else {
2962 imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
2963 imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
2964 }
2965
2966 if (imm == kEndOfJumpChain) {
2967 return 0; // Number of instructions patched.
2968 }
2969 imm += pc_delta;
2970 DCHECK((imm & 3) == 0);
2971 instr1 &= ~kImm16Mask;
2972 instr2 &= ~kImm16Mask;
2973
2974 if (IsJicOrJialc(instr2)) {
2975 uint32_t lui_offset_u, jic_offset_u;
2976 Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
2977 instr_at_put(pc + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
2978 instr_at_put(pc + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
2979 } else {
2980 instr_at_put(pc + 0 * Assembler::kInstrSize,
2981 instr1 | ((imm >> kLuiShift) & kImm16Mask));
2982 instr_at_put(pc + 1 * Assembler::kInstrSize,
2983 instr2 | (imm & kImm16Mask));
2984 }
2985 return 2; // Number of instructions patched.
2986 } else {
2987 UNREACHABLE();
2988 return 0;
2989 }
2990 }
2991 }
2992
2993
GrowBuffer()2994 void Assembler::GrowBuffer() {
2995 if (!own_buffer_) FATAL("external code buffer is too small");
2996
2997 // Compute new buffer size.
2998 CodeDesc desc; // The new buffer.
2999 if (buffer_size_ < 1 * MB) {
3000 desc.buffer_size = 2*buffer_size_;
3001 } else {
3002 desc.buffer_size = buffer_size_ + 1*MB;
3003 }
3004 CHECK_GT(desc.buffer_size, 0); // No overflow.
3005
3006 // Set up new buffer.
3007 desc.buffer = NewArray<byte>(desc.buffer_size);
3008 desc.origin = this;
3009
3010 desc.instr_size = pc_offset();
3011 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
3012
3013 // Copy the data.
3014 int pc_delta = desc.buffer - buffer_;
3015 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
3016 MemMove(desc.buffer, buffer_, desc.instr_size);
3017 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
3018 desc.reloc_size);
3019
3020 // Switch buffers.
3021 DeleteArray(buffer_);
3022 buffer_ = desc.buffer;
3023 buffer_size_ = desc.buffer_size;
3024 pc_ += pc_delta;
3025 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3026 reloc_info_writer.last_pc() + pc_delta);
3027
3028 // Relocate runtime entries.
3029 for (RelocIterator it(desc); !it.done(); it.next()) {
3030 RelocInfo::Mode rmode = it.rinfo()->rmode();
3031 if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED ||
3032 rmode == RelocInfo::INTERNAL_REFERENCE) {
3033 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
3034 RelocateInternalReference(rmode, p, pc_delta);
3035 }
3036 }
3037 DCHECK(!overflow());
3038 }
3039
3040
db(uint8_t data)3041 void Assembler::db(uint8_t data) {
3042 CheckForEmitInForbiddenSlot();
3043 EmitHelper(data);
3044 }
3045
3046
dd(uint32_t data)3047 void Assembler::dd(uint32_t data) {
3048 CheckForEmitInForbiddenSlot();
3049 EmitHelper(data);
3050 }
3051
3052
dq(uint64_t data)3053 void Assembler::dq(uint64_t data) {
3054 CheckForEmitInForbiddenSlot();
3055 EmitHelper(data);
3056 }
3057
3058
dd(Label * label)3059 void Assembler::dd(Label* label) {
3060 uint32_t data;
3061 CheckForEmitInForbiddenSlot();
3062 if (label->is_bound()) {
3063 data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
3064 } else {
3065 data = jump_address(label);
3066 unbound_labels_count_++;
3067 internal_reference_positions_.insert(label->pos());
3068 }
3069 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3070 EmitHelper(data);
3071 }
3072
3073
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)3074 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3075 // We do not try to reuse pool constants.
3076 RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
3077 if (rmode >= RelocInfo::COMMENT &&
3078 rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL) {
3079 // Adjust code for new modes.
3080 DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode));
3081 // These modes do not need an entry in the constant pool.
3082 }
3083 if (!RelocInfo::IsNone(rinfo.rmode())) {
3084 // Don't record external references unless the heap will be serialized.
3085 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
3086 !serializer_enabled() && !emit_debug_code()) {
3087 return;
3088 }
3089 DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
3090 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
3091 RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode,
3092 RecordedAstId().ToInt(), NULL);
3093 ClearRecordedAstId();
3094 reloc_info_writer.Write(&reloc_info_with_ast_id);
3095 } else {
3096 reloc_info_writer.Write(&rinfo);
3097 }
3098 }
3099 }
3100
3101
BlockTrampolinePoolFor(int instructions)3102 void Assembler::BlockTrampolinePoolFor(int instructions) {
3103 CheckTrampolinePoolQuick(instructions);
3104 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
3105 }
3106
3107
CheckTrampolinePool()3108 void Assembler::CheckTrampolinePool() {
3109 // Some small sequences of instructions must not be broken up by the
3110 // insertion of a trampoline pool; such sequences are protected by setting
3111 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
3112 // which are both checked here. Also, recursive calls to CheckTrampolinePool
3113 // are blocked by trampoline_pool_blocked_nesting_.
3114 if ((trampoline_pool_blocked_nesting_ > 0) ||
3115 (pc_offset() < no_trampoline_pool_before_)) {
3116 // Emission is currently blocked; make sure we try again as soon as
3117 // possible.
3118 if (trampoline_pool_blocked_nesting_ > 0) {
3119 next_buffer_check_ = pc_offset() + kInstrSize;
3120 } else {
3121 next_buffer_check_ = no_trampoline_pool_before_;
3122 }
3123 return;
3124 }
3125
3126 DCHECK(!trampoline_emitted_);
3127 DCHECK(unbound_labels_count_ >= 0);
3128 if (unbound_labels_count_ > 0) {
3129 // First we emit jump (2 instructions), then we emit trampoline pool.
3130 { BlockTrampolinePoolScope block_trampoline_pool(this);
3131 Label after_pool;
3132 if (IsMipsArchVariant(kMips32r6)) {
3133 bc(&after_pool);
3134 } else {
3135 b(&after_pool);
3136 nop();
3137 }
3138
3139 int pool_start = pc_offset();
3140 if (IsMipsArchVariant(kMips32r6)) {
3141 for (int i = 0; i < unbound_labels_count_; i++) {
3142 uint32_t imm32;
3143 imm32 = jump_address(&after_pool);
3144 uint32_t lui_offset, jic_offset;
3145 UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
3146 {
3147 BlockGrowBufferScope block_buf_growth(this);
3148 // Buffer growth (and relocation) must be blocked for internal
3149 // references until associated instructions are emitted and
3150 // available to be patched.
3151 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3152 lui(at, lui_offset);
3153 jic(at, jic_offset);
3154 }
3155 CheckBuffer();
3156 }
3157 } else {
3158 for (int i = 0; i < unbound_labels_count_; i++) {
3159 uint32_t imm32;
3160 imm32 = jump_address(&after_pool);
3161 {
3162 BlockGrowBufferScope block_buf_growth(this);
3163 // Buffer growth (and relocation) must be blocked for internal
3164 // references until associated instructions are emitted and
3165 // available to be patched.
3166 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3167 lui(at, (imm32 & kHiMask) >> kLuiShift);
3168 ori(at, at, (imm32 & kImm16Mask));
3169 }
3170 CheckBuffer();
3171 jr(at);
3172 nop();
3173 }
3174 }
3175 bind(&after_pool);
3176 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
3177
3178 trampoline_emitted_ = true;
3179 // As we are only going to emit trampoline once, we need to prevent any
3180 // further emission.
3181 next_buffer_check_ = kMaxInt;
3182 }
3183 } else {
3184 // Number of branches to unbound label at this point is zero, so we can
3185 // move next buffer check to maximum.
3186 next_buffer_check_ = pc_offset() +
3187 kMaxBranchOffset - kTrampolineSlotsSize * 16;
3188 }
3189 return;
3190 }
3191
3192
target_address_at(Address pc)3193 Address Assembler::target_address_at(Address pc) {
3194 Instr instr1 = instr_at(pc);
3195 Instr instr2 = instr_at(pc + kInstrSize);
3196 // Interpret 2 instructions generated by li: lui/ori
3197 if (IsLui(instr1) && IsOri(instr2)) {
3198 // Assemble the 32 bit value.
3199 return reinterpret_cast<Address>((GetImmediate16(instr1) << kLuiShift) |
3200 GetImmediate16(instr2));
3201 }
3202
3203 // We should never get here, force a bad address if we do.
3204 UNREACHABLE();
3205 return (Address)0x0;
3206 }
3207
3208
3209 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
3210 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
3211 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
3212 // OS::nan_value() returns a qNaN.
QuietNaN(HeapObject * object)3213 void Assembler::QuietNaN(HeapObject* object) {
3214 HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
3215 }
3216
3217
3218 // On Mips, a target address is stored in a lui/ori instruction pair, each
3219 // of which load 16 bits of the 32-bit address to a register.
3220 // Patching the address must replace both instr, and flush the i-cache.
3221 // On r6, target address is stored in a lui/jic pair, and both instr have to be
3222 // patched.
3223 //
3224 // There is an optimization below, which emits a nop when the address
3225 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
3226 // and possibly removed.
set_target_address_at(Isolate * isolate,Address pc,Address target,ICacheFlushMode icache_flush_mode)3227 void Assembler::set_target_address_at(Isolate* isolate, Address pc,
3228 Address target,
3229 ICacheFlushMode icache_flush_mode) {
3230 Instr instr2 = instr_at(pc + kInstrSize);
3231 uint32_t rt_code = GetRtField(instr2);
3232 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
3233 uint32_t itarget = reinterpret_cast<uint32_t>(target);
3234
3235 #ifdef DEBUG
3236 // Check we have the result from a li macro-instruction, using instr pair.
3237 Instr instr1 = instr_at(pc);
3238 CHECK(IsLui(instr1) && (IsOri(instr2) || IsJicOrJialc(instr2)));
3239 #endif
3240
3241 if (IsJicOrJialc(instr2)) {
3242 // Must use 2 instructions to insure patchable code => use lui and jic
3243 uint32_t lui_offset, jic_offset;
3244 Assembler::UnpackTargetAddressUnsigned(itarget, lui_offset, jic_offset);
3245
3246 *p &= ~kImm16Mask;
3247 *(p + 1) &= ~kImm16Mask;
3248
3249 *p |= lui_offset;
3250 *(p + 1) |= jic_offset;
3251
3252 } else {
3253 // Must use 2 instructions to insure patchable code => just use lui and ori.
3254 // lui rt, upper-16.
3255 // ori rt rt, lower-16.
3256 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
3257 *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
3258 }
3259
3260 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
3261 Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t));
3262 }
3263 }
3264
3265 } // namespace internal
3266 } // namespace v8
3267
3268 #endif // V8_TARGET_ARCH_MIPS
3269