1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_ 18 #define ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_ 19 20 #include <deque> 21 #include <utility> 22 #include <vector> 23 24 #include "arch/mips/instruction_set_features_mips.h" 25 #include "base/arena_containers.h" 26 #include "base/enums.h" 27 #include "base/macros.h" 28 #include "constants_mips.h" 29 #include "globals.h" 30 #include "managed_register_mips.h" 31 #include "offsets.h" 32 #include "utils/assembler.h" 33 #include "utils/jni_macro_assembler.h" 34 #include "utils/label.h" 35 36 namespace art { 37 namespace mips { 38 39 static constexpr size_t kMipsWordSize = 4; 40 static constexpr size_t kMipsDoublewordSize = 8; 41 42 enum LoadOperandType { 43 kLoadSignedByte, 44 kLoadUnsignedByte, 45 kLoadSignedHalfword, 46 kLoadUnsignedHalfword, 47 kLoadWord, 48 kLoadDoubleword 49 }; 50 51 enum StoreOperandType { 52 kStoreByte, 53 kStoreHalfword, 54 kStoreWord, 55 kStoreDoubleword 56 }; 57 58 // Used to test the values returned by ClassS/ClassD. 59 enum FPClassMaskType { 60 kSignalingNaN = 0x001, 61 kQuietNaN = 0x002, 62 kNegativeInfinity = 0x004, 63 kNegativeNormal = 0x008, 64 kNegativeSubnormal = 0x010, 65 kNegativeZero = 0x020, 66 kPositiveInfinity = 0x040, 67 kPositiveNormal = 0x080, 68 kPositiveSubnormal = 0x100, 69 kPositiveZero = 0x200, 70 }; 71 72 class MipsLabel : public Label { 73 public: MipsLabel()74 MipsLabel() : prev_branch_id_plus_one_(0) {} 75 MipsLabel(MipsLabel && src)76 MipsLabel(MipsLabel&& src) 77 : Label(std::move(src)), prev_branch_id_plus_one_(src.prev_branch_id_plus_one_) {} 78 79 private: 80 uint32_t prev_branch_id_plus_one_; // To get distance from preceding branch, if any. 81 82 friend class MipsAssembler; 83 DISALLOW_COPY_AND_ASSIGN(MipsLabel); 84 }; 85 86 // Assembler literal is a value embedded in code, retrieved using a PC-relative load. 87 class Literal { 88 public: 89 static constexpr size_t kMaxSize = 8; 90 Literal(uint32_t size,const uint8_t * data)91 Literal(uint32_t size, const uint8_t* data) 92 : label_(), size_(size) { 93 DCHECK_LE(size, Literal::kMaxSize); 94 memcpy(data_, data, size); 95 } 96 97 template <typename T> GetValue()98 T GetValue() const { 99 DCHECK_EQ(size_, sizeof(T)); 100 T value; 101 memcpy(&value, data_, sizeof(T)); 102 return value; 103 } 104 GetSize()105 uint32_t GetSize() const { 106 return size_; 107 } 108 GetData()109 const uint8_t* GetData() const { 110 return data_; 111 } 112 GetLabel()113 MipsLabel* GetLabel() { 114 return &label_; 115 } 116 GetLabel()117 const MipsLabel* GetLabel() const { 118 return &label_; 119 } 120 121 private: 122 MipsLabel label_; 123 const uint32_t size_; 124 uint8_t data_[kMaxSize]; 125 126 DISALLOW_COPY_AND_ASSIGN(Literal); 127 }; 128 129 // Jump table: table of labels emitted after the literals. Similar to literals. 130 class JumpTable { 131 public: JumpTable(std::vector<MipsLabel * > && labels)132 explicit JumpTable(std::vector<MipsLabel*>&& labels) 133 : label_(), labels_(std::move(labels)) { 134 } 135 GetSize()136 uint32_t GetSize() const { 137 return static_cast<uint32_t>(labels_.size()) * sizeof(uint32_t); 138 } 139 GetData()140 const std::vector<MipsLabel*>& GetData() const { 141 return labels_; 142 } 143 GetLabel()144 MipsLabel* GetLabel() { 145 return &label_; 146 } 147 GetLabel()148 const MipsLabel* GetLabel() const { 149 return &label_; 150 } 151 152 private: 153 MipsLabel label_; 154 std::vector<MipsLabel*> labels_; 155 156 DISALLOW_COPY_AND_ASSIGN(JumpTable); 157 }; 158 159 // Slowpath entered when Thread::Current()->_exception is non-null. 160 class MipsExceptionSlowPath { 161 public: MipsExceptionSlowPath(MipsManagedRegister scratch,size_t stack_adjust)162 explicit MipsExceptionSlowPath(MipsManagedRegister scratch, size_t stack_adjust) 163 : scratch_(scratch), stack_adjust_(stack_adjust) {} 164 MipsExceptionSlowPath(MipsExceptionSlowPath && src)165 MipsExceptionSlowPath(MipsExceptionSlowPath&& src) 166 : scratch_(src.scratch_), 167 stack_adjust_(src.stack_adjust_), 168 exception_entry_(std::move(src.exception_entry_)) {} 169 170 private: Entry()171 MipsLabel* Entry() { return &exception_entry_; } 172 const MipsManagedRegister scratch_; 173 const size_t stack_adjust_; 174 MipsLabel exception_entry_; 175 176 friend class MipsAssembler; 177 DISALLOW_COPY_AND_ASSIGN(MipsExceptionSlowPath); 178 }; 179 180 class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k32> { 181 public: 182 using JNIBase = JNIMacroAssembler<PointerSize::k32>; 183 184 explicit MipsAssembler(ArenaAllocator* arena, 185 const MipsInstructionSetFeatures* instruction_set_features = nullptr) Assembler(arena)186 : Assembler(arena), 187 overwriting_(false), 188 overwrite_location_(0), 189 reordering_(true), 190 ds_fsm_state_(kExpectingLabel), 191 ds_fsm_target_pc_(0), 192 literals_(arena->Adapter(kArenaAllocAssembler)), 193 jump_tables_(arena->Adapter(kArenaAllocAssembler)), 194 last_position_adjustment_(0), 195 last_old_position_(0), 196 last_branch_id_(0), 197 isa_features_(instruction_set_features) { 198 cfi().DelayEmittingAdvancePCs(); 199 } 200 CodeSize()201 size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); } 202 size_t CodePosition() OVERRIDE; cfi()203 DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); } 204 ~MipsAssembler()205 virtual ~MipsAssembler() { 206 for (auto& branch : branches_) { 207 CHECK(branch.IsResolved()); 208 } 209 } 210 211 // Emit Machine Instructions. 212 void Addu(Register rd, Register rs, Register rt); 213 void Addiu(Register rt, Register rs, uint16_t imm16); 214 void Subu(Register rd, Register rs, Register rt); 215 216 void MultR2(Register rs, Register rt); // R2 217 void MultuR2(Register rs, Register rt); // R2 218 void DivR2(Register rs, Register rt); // R2 219 void DivuR2(Register rs, Register rt); // R2 220 void MulR2(Register rd, Register rs, Register rt); // R2 221 void DivR2(Register rd, Register rs, Register rt); // R2 222 void ModR2(Register rd, Register rs, Register rt); // R2 223 void DivuR2(Register rd, Register rs, Register rt); // R2 224 void ModuR2(Register rd, Register rs, Register rt); // R2 225 void MulR6(Register rd, Register rs, Register rt); // R6 226 void MuhR6(Register rd, Register rs, Register rt); // R6 227 void MuhuR6(Register rd, Register rs, Register rt); // R6 228 void DivR6(Register rd, Register rs, Register rt); // R6 229 void ModR6(Register rd, Register rs, Register rt); // R6 230 void DivuR6(Register rd, Register rs, Register rt); // R6 231 void ModuR6(Register rd, Register rs, Register rt); // R6 232 233 void And(Register rd, Register rs, Register rt); 234 void Andi(Register rt, Register rs, uint16_t imm16); 235 void Or(Register rd, Register rs, Register rt); 236 void Ori(Register rt, Register rs, uint16_t imm16); 237 void Xor(Register rd, Register rs, Register rt); 238 void Xori(Register rt, Register rs, uint16_t imm16); 239 void Nor(Register rd, Register rs, Register rt); 240 241 void Movz(Register rd, Register rs, Register rt); // R2 242 void Movn(Register rd, Register rs, Register rt); // R2 243 void Seleqz(Register rd, Register rs, Register rt); // R6 244 void Selnez(Register rd, Register rs, Register rt); // R6 245 void ClzR6(Register rd, Register rs); 246 void ClzR2(Register rd, Register rs); 247 void CloR6(Register rd, Register rs); 248 void CloR2(Register rd, Register rs); 249 250 void Seb(Register rd, Register rt); // R2+ 251 void Seh(Register rd, Register rt); // R2+ 252 void Wsbh(Register rd, Register rt); // R2+ 253 void Bitswap(Register rd, Register rt); // R6 254 255 void Sll(Register rd, Register rt, int shamt); 256 void Srl(Register rd, Register rt, int shamt); 257 void Rotr(Register rd, Register rt, int shamt); // R2+ 258 void Sra(Register rd, Register rt, int shamt); 259 void Sllv(Register rd, Register rt, Register rs); 260 void Srlv(Register rd, Register rt, Register rs); 261 void Rotrv(Register rd, Register rt, Register rs); // R2+ 262 void Srav(Register rd, Register rt, Register rs); 263 void Ext(Register rd, Register rt, int pos, int size); // R2+ 264 void Ins(Register rd, Register rt, int pos, int size); // R2+ 265 void Lsa(Register rd, Register rs, Register rt, int saPlusOne); // R6 266 void ShiftAndAdd(Register dst, Register src_idx, Register src_base, int shamt, Register tmp = AT); 267 268 void Lb(Register rt, Register rs, uint16_t imm16); 269 void Lh(Register rt, Register rs, uint16_t imm16); 270 void Lw(Register rt, Register rs, uint16_t imm16); 271 void Lwl(Register rt, Register rs, uint16_t imm16); 272 void Lwr(Register rt, Register rs, uint16_t imm16); 273 void Lbu(Register rt, Register rs, uint16_t imm16); 274 void Lhu(Register rt, Register rs, uint16_t imm16); 275 void Lwpc(Register rs, uint32_t imm19); // R6 276 void Lui(Register rt, uint16_t imm16); 277 void Aui(Register rt, Register rs, uint16_t imm16); // R6 278 void Sync(uint32_t stype); 279 void Mfhi(Register rd); // R2 280 void Mflo(Register rd); // R2 281 282 void Sb(Register rt, Register rs, uint16_t imm16); 283 void Sh(Register rt, Register rs, uint16_t imm16); 284 void Sw(Register rt, Register rs, uint16_t imm16); 285 void Swl(Register rt, Register rs, uint16_t imm16); 286 void Swr(Register rt, Register rs, uint16_t imm16); 287 288 void LlR2(Register rt, Register base, int16_t imm16 = 0); 289 void ScR2(Register rt, Register base, int16_t imm16 = 0); 290 void LlR6(Register rt, Register base, int16_t imm9 = 0); 291 void ScR6(Register rt, Register base, int16_t imm9 = 0); 292 293 void Slt(Register rd, Register rs, Register rt); 294 void Sltu(Register rd, Register rs, Register rt); 295 void Slti(Register rt, Register rs, uint16_t imm16); 296 void Sltiu(Register rt, Register rs, uint16_t imm16); 297 298 // Branches and jumps to immediate offsets/addresses do not take care of their 299 // delay/forbidden slots and generally should not be used directly. This applies 300 // to the following R2 and R6 branch/jump instructions with imm16, imm21, addr26 301 // offsets/addresses. 302 // Use branches/jumps to labels instead. 303 void B(uint16_t imm16); 304 void Bal(uint16_t imm16); 305 void Beq(Register rs, Register rt, uint16_t imm16); 306 void Bne(Register rs, Register rt, uint16_t imm16); 307 void Beqz(Register rt, uint16_t imm16); 308 void Bnez(Register rt, uint16_t imm16); 309 void Bltz(Register rt, uint16_t imm16); 310 void Bgez(Register rt, uint16_t imm16); 311 void Blez(Register rt, uint16_t imm16); 312 void Bgtz(Register rt, uint16_t imm16); 313 void Bc1f(uint16_t imm16); // R2 314 void Bc1f(int cc, uint16_t imm16); // R2 315 void Bc1t(uint16_t imm16); // R2 316 void Bc1t(int cc, uint16_t imm16); // R2 317 void J(uint32_t addr26); 318 void Jal(uint32_t addr26); 319 // Jalr() and Jr() fill their delay slots when reordering is enabled. 320 // When reordering is disabled, the delay slots must be filled manually. 321 // You may use NopIfNoReordering() to fill them when reordering is disabled. 322 void Jalr(Register rd, Register rs); 323 void Jalr(Register rs); 324 void Jr(Register rs); 325 // Nal() does not fill its delay slot. It must be filled manually. 326 void Nal(); 327 void Auipc(Register rs, uint16_t imm16); // R6 328 void Addiupc(Register rs, uint32_t imm19); // R6 329 void Bc(uint32_t imm26); // R6 330 void Balc(uint32_t imm26); // R6 331 void Jic(Register rt, uint16_t imm16); // R6 332 void Jialc(Register rt, uint16_t imm16); // R6 333 void Bltc(Register rs, Register rt, uint16_t imm16); // R6 334 void Bltzc(Register rt, uint16_t imm16); // R6 335 void Bgtzc(Register rt, uint16_t imm16); // R6 336 void Bgec(Register rs, Register rt, uint16_t imm16); // R6 337 void Bgezc(Register rt, uint16_t imm16); // R6 338 void Blezc(Register rt, uint16_t imm16); // R6 339 void Bltuc(Register rs, Register rt, uint16_t imm16); // R6 340 void Bgeuc(Register rs, Register rt, uint16_t imm16); // R6 341 void Beqc(Register rs, Register rt, uint16_t imm16); // R6 342 void Bnec(Register rs, Register rt, uint16_t imm16); // R6 343 void Beqzc(Register rs, uint32_t imm21); // R6 344 void Bnezc(Register rs, uint32_t imm21); // R6 345 void Bc1eqz(FRegister ft, uint16_t imm16); // R6 346 void Bc1nez(FRegister ft, uint16_t imm16); // R6 347 348 void AddS(FRegister fd, FRegister fs, FRegister ft); 349 void SubS(FRegister fd, FRegister fs, FRegister ft); 350 void MulS(FRegister fd, FRegister fs, FRegister ft); 351 void DivS(FRegister fd, FRegister fs, FRegister ft); 352 void AddD(FRegister fd, FRegister fs, FRegister ft); 353 void SubD(FRegister fd, FRegister fs, FRegister ft); 354 void MulD(FRegister fd, FRegister fs, FRegister ft); 355 void DivD(FRegister fd, FRegister fs, FRegister ft); 356 void SqrtS(FRegister fd, FRegister fs); 357 void SqrtD(FRegister fd, FRegister fs); 358 void AbsS(FRegister fd, FRegister fs); 359 void AbsD(FRegister fd, FRegister fs); 360 void MovS(FRegister fd, FRegister fs); 361 void MovD(FRegister fd, FRegister fs); 362 void NegS(FRegister fd, FRegister fs); 363 void NegD(FRegister fd, FRegister fs); 364 365 void CunS(FRegister fs, FRegister ft); // R2 366 void CunS(int cc, FRegister fs, FRegister ft); // R2 367 void CeqS(FRegister fs, FRegister ft); // R2 368 void CeqS(int cc, FRegister fs, FRegister ft); // R2 369 void CueqS(FRegister fs, FRegister ft); // R2 370 void CueqS(int cc, FRegister fs, FRegister ft); // R2 371 void ColtS(FRegister fs, FRegister ft); // R2 372 void ColtS(int cc, FRegister fs, FRegister ft); // R2 373 void CultS(FRegister fs, FRegister ft); // R2 374 void CultS(int cc, FRegister fs, FRegister ft); // R2 375 void ColeS(FRegister fs, FRegister ft); // R2 376 void ColeS(int cc, FRegister fs, FRegister ft); // R2 377 void CuleS(FRegister fs, FRegister ft); // R2 378 void CuleS(int cc, FRegister fs, FRegister ft); // R2 379 void CunD(FRegister fs, FRegister ft); // R2 380 void CunD(int cc, FRegister fs, FRegister ft); // R2 381 void CeqD(FRegister fs, FRegister ft); // R2 382 void CeqD(int cc, FRegister fs, FRegister ft); // R2 383 void CueqD(FRegister fs, FRegister ft); // R2 384 void CueqD(int cc, FRegister fs, FRegister ft); // R2 385 void ColtD(FRegister fs, FRegister ft); // R2 386 void ColtD(int cc, FRegister fs, FRegister ft); // R2 387 void CultD(FRegister fs, FRegister ft); // R2 388 void CultD(int cc, FRegister fs, FRegister ft); // R2 389 void ColeD(FRegister fs, FRegister ft); // R2 390 void ColeD(int cc, FRegister fs, FRegister ft); // R2 391 void CuleD(FRegister fs, FRegister ft); // R2 392 void CuleD(int cc, FRegister fs, FRegister ft); // R2 393 void CmpUnS(FRegister fd, FRegister fs, FRegister ft); // R6 394 void CmpEqS(FRegister fd, FRegister fs, FRegister ft); // R6 395 void CmpUeqS(FRegister fd, FRegister fs, FRegister ft); // R6 396 void CmpLtS(FRegister fd, FRegister fs, FRegister ft); // R6 397 void CmpUltS(FRegister fd, FRegister fs, FRegister ft); // R6 398 void CmpLeS(FRegister fd, FRegister fs, FRegister ft); // R6 399 void CmpUleS(FRegister fd, FRegister fs, FRegister ft); // R6 400 void CmpOrS(FRegister fd, FRegister fs, FRegister ft); // R6 401 void CmpUneS(FRegister fd, FRegister fs, FRegister ft); // R6 402 void CmpNeS(FRegister fd, FRegister fs, FRegister ft); // R6 403 void CmpUnD(FRegister fd, FRegister fs, FRegister ft); // R6 404 void CmpEqD(FRegister fd, FRegister fs, FRegister ft); // R6 405 void CmpUeqD(FRegister fd, FRegister fs, FRegister ft); // R6 406 void CmpLtD(FRegister fd, FRegister fs, FRegister ft); // R6 407 void CmpUltD(FRegister fd, FRegister fs, FRegister ft); // R6 408 void CmpLeD(FRegister fd, FRegister fs, FRegister ft); // R6 409 void CmpUleD(FRegister fd, FRegister fs, FRegister ft); // R6 410 void CmpOrD(FRegister fd, FRegister fs, FRegister ft); // R6 411 void CmpUneD(FRegister fd, FRegister fs, FRegister ft); // R6 412 void CmpNeD(FRegister fd, FRegister fs, FRegister ft); // R6 413 void Movf(Register rd, Register rs, int cc = 0); // R2 414 void Movt(Register rd, Register rs, int cc = 0); // R2 415 void MovfS(FRegister fd, FRegister fs, int cc = 0); // R2 416 void MovfD(FRegister fd, FRegister fs, int cc = 0); // R2 417 void MovtS(FRegister fd, FRegister fs, int cc = 0); // R2 418 void MovtD(FRegister fd, FRegister fs, int cc = 0); // R2 419 void MovzS(FRegister fd, FRegister fs, Register rt); // R2 420 void MovzD(FRegister fd, FRegister fs, Register rt); // R2 421 void MovnS(FRegister fd, FRegister fs, Register rt); // R2 422 void MovnD(FRegister fd, FRegister fs, Register rt); // R2 423 void SelS(FRegister fd, FRegister fs, FRegister ft); // R6 424 void SelD(FRegister fd, FRegister fs, FRegister ft); // R6 425 void SeleqzS(FRegister fd, FRegister fs, FRegister ft); // R6 426 void SeleqzD(FRegister fd, FRegister fs, FRegister ft); // R6 427 void SelnezS(FRegister fd, FRegister fs, FRegister ft); // R6 428 void SelnezD(FRegister fd, FRegister fs, FRegister ft); // R6 429 void ClassS(FRegister fd, FRegister fs); // R6 430 void ClassD(FRegister fd, FRegister fs); // R6 431 void MinS(FRegister fd, FRegister fs, FRegister ft); // R6 432 void MinD(FRegister fd, FRegister fs, FRegister ft); // R6 433 void MaxS(FRegister fd, FRegister fs, FRegister ft); // R6 434 void MaxD(FRegister fd, FRegister fs, FRegister ft); // R6 435 436 void TruncLS(FRegister fd, FRegister fs); // R2+, FR=1 437 void TruncLD(FRegister fd, FRegister fs); // R2+, FR=1 438 void TruncWS(FRegister fd, FRegister fs); 439 void TruncWD(FRegister fd, FRegister fs); 440 void Cvtsw(FRegister fd, FRegister fs); 441 void Cvtdw(FRegister fd, FRegister fs); 442 void Cvtsd(FRegister fd, FRegister fs); 443 void Cvtds(FRegister fd, FRegister fs); 444 void Cvtsl(FRegister fd, FRegister fs); // R2+, FR=1 445 void Cvtdl(FRegister fd, FRegister fs); // R2+, FR=1 446 void FloorWS(FRegister fd, FRegister fs); 447 void FloorWD(FRegister fd, FRegister fs); 448 449 void Mfc1(Register rt, FRegister fs); 450 void Mtc1(Register rt, FRegister fs); 451 void Mfhc1(Register rt, FRegister fs); 452 void Mthc1(Register rt, FRegister fs); 453 void MoveFromFpuHigh(Register rt, FRegister fs); 454 void MoveToFpuHigh(Register rt, FRegister fs); 455 void Lwc1(FRegister ft, Register rs, uint16_t imm16); 456 void Ldc1(FRegister ft, Register rs, uint16_t imm16); 457 void Swc1(FRegister ft, Register rs, uint16_t imm16); 458 void Sdc1(FRegister ft, Register rs, uint16_t imm16); 459 460 void Break(); 461 void Nop(); 462 void NopIfNoReordering(); 463 void Move(Register rd, Register rs); 464 void Clear(Register rd); 465 void Not(Register rd, Register rs); 466 467 // Higher level composite instructions. 468 void LoadConst32(Register rd, int32_t value); 469 void LoadConst64(Register reg_hi, Register reg_lo, int64_t value); 470 void LoadDConst64(FRegister rd, int64_t value, Register temp); 471 void LoadSConst32(FRegister r, int32_t value, Register temp); 472 void Addiu32(Register rt, Register rs, int32_t value, Register rtmp = AT); 473 474 // These will generate R2 branches or R6 branches as appropriate and take care of 475 // the delay/forbidden slots. 476 void Bind(MipsLabel* label); 477 void B(MipsLabel* label); 478 void Bal(MipsLabel* label); 479 void Beq(Register rs, Register rt, MipsLabel* label); 480 void Bne(Register rs, Register rt, MipsLabel* label); 481 void Beqz(Register rt, MipsLabel* label); 482 void Bnez(Register rt, MipsLabel* label); 483 void Bltz(Register rt, MipsLabel* label); 484 void Bgez(Register rt, MipsLabel* label); 485 void Blez(Register rt, MipsLabel* label); 486 void Bgtz(Register rt, MipsLabel* label); 487 void Blt(Register rs, Register rt, MipsLabel* label); 488 void Bge(Register rs, Register rt, MipsLabel* label); 489 void Bltu(Register rs, Register rt, MipsLabel* label); 490 void Bgeu(Register rs, Register rt, MipsLabel* label); 491 void Bc1f(MipsLabel* label); // R2 492 void Bc1f(int cc, MipsLabel* label); // R2 493 void Bc1t(MipsLabel* label); // R2 494 void Bc1t(int cc, MipsLabel* label); // R2 495 void Bc1eqz(FRegister ft, MipsLabel* label); // R6 496 void Bc1nez(FRegister ft, MipsLabel* label); // R6 497 498 void EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset, size_t size); 499 void AdjustBaseAndOffset(Register& base, 500 int32_t& offset, 501 bool is_doubleword, 502 bool is_float = false); 503 504 private: 505 // This will be used as an argument for loads/stores 506 // when there is no need for implicit null checks. 507 struct NoImplicitNullChecker { operatorNoImplicitNullChecker508 void operator()() const {} 509 }; 510 511 public: 512 template <typename ImplicitNullChecker = NoImplicitNullChecker> 513 void StoreConstToOffset(StoreOperandType type, 514 int64_t value, 515 Register base, 516 int32_t offset, 517 Register temp, 518 ImplicitNullChecker null_checker = NoImplicitNullChecker()) { 519 // We permit `base` and `temp` to coincide (however, we check that neither is AT), 520 // in which case the `base` register may be overwritten in the process. 521 CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base. 522 AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword)); 523 uint32_t low = Low32Bits(value); 524 uint32_t high = High32Bits(value); 525 Register reg; 526 // If the adjustment left `base` unchanged and equal to `temp`, we can't use `temp` 527 // to load and hold the value but we can use AT instead as AT hasn't been used yet. 528 // Otherwise, `temp` can be used for the value. And if `temp` is the same as the 529 // original `base` (that is, `base` prior to the adjustment), the original `base` 530 // register will be overwritten. 531 if (base == temp) { 532 temp = AT; 533 } 534 if (low == 0) { 535 reg = ZERO; 536 } else { 537 reg = temp; 538 LoadConst32(reg, low); 539 } 540 switch (type) { 541 case kStoreByte: 542 Sb(reg, base, offset); 543 break; 544 case kStoreHalfword: 545 Sh(reg, base, offset); 546 break; 547 case kStoreWord: 548 Sw(reg, base, offset); 549 break; 550 case kStoreDoubleword: 551 Sw(reg, base, offset); 552 null_checker(); 553 if (high == 0) { 554 reg = ZERO; 555 } else { 556 reg = temp; 557 if (high != low) { 558 LoadConst32(reg, high); 559 } 560 } 561 Sw(reg, base, offset + kMipsWordSize); 562 break; 563 default: 564 LOG(FATAL) << "UNREACHABLE"; 565 } 566 if (type != kStoreDoubleword) { 567 null_checker(); 568 } 569 } 570 571 template <typename ImplicitNullChecker = NoImplicitNullChecker> 572 void LoadFromOffset(LoadOperandType type, 573 Register reg, 574 Register base, 575 int32_t offset, 576 ImplicitNullChecker null_checker = NoImplicitNullChecker()) { 577 AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword)); 578 switch (type) { 579 case kLoadSignedByte: 580 Lb(reg, base, offset); 581 break; 582 case kLoadUnsignedByte: 583 Lbu(reg, base, offset); 584 break; 585 case kLoadSignedHalfword: 586 Lh(reg, base, offset); 587 break; 588 case kLoadUnsignedHalfword: 589 Lhu(reg, base, offset); 590 break; 591 case kLoadWord: 592 Lw(reg, base, offset); 593 break; 594 case kLoadDoubleword: 595 if (reg == base) { 596 // This will clobber the base when loading the lower register. Since we have to load the 597 // higher register as well, this will fail. Solution: reverse the order. 598 Lw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize); 599 null_checker(); 600 Lw(reg, base, offset); 601 } else { 602 Lw(reg, base, offset); 603 null_checker(); 604 Lw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize); 605 } 606 break; 607 default: 608 LOG(FATAL) << "UNREACHABLE"; 609 } 610 if (type != kLoadDoubleword) { 611 null_checker(); 612 } 613 } 614 615 template <typename ImplicitNullChecker = NoImplicitNullChecker> 616 void LoadSFromOffset(FRegister reg, 617 Register base, 618 int32_t offset, 619 ImplicitNullChecker null_checker = NoImplicitNullChecker()) { 620 AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true); 621 Lwc1(reg, base, offset); 622 null_checker(); 623 } 624 625 template <typename ImplicitNullChecker = NoImplicitNullChecker> 626 void LoadDFromOffset(FRegister reg, 627 Register base, 628 int32_t offset, 629 ImplicitNullChecker null_checker = NoImplicitNullChecker()) { 630 AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true); 631 if (IsAligned<kMipsDoublewordSize>(offset)) { 632 Ldc1(reg, base, offset); 633 null_checker(); 634 } else { 635 if (Is32BitFPU()) { 636 Lwc1(reg, base, offset); 637 null_checker(); 638 Lwc1(static_cast<FRegister>(reg + 1), base, offset + kMipsWordSize); 639 } else { 640 // 64-bit FPU. 641 Lwc1(reg, base, offset); 642 null_checker(); 643 Lw(T8, base, offset + kMipsWordSize); 644 Mthc1(T8, reg); 645 } 646 } 647 } 648 649 template <typename ImplicitNullChecker = NoImplicitNullChecker> 650 void StoreToOffset(StoreOperandType type, 651 Register reg, 652 Register base, 653 int32_t offset, 654 ImplicitNullChecker null_checker = NoImplicitNullChecker()) { 655 // Must not use AT as `reg`, so as not to overwrite the value being stored 656 // with the adjusted `base`. 657 CHECK_NE(reg, AT); 658 AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword)); 659 switch (type) { 660 case kStoreByte: 661 Sb(reg, base, offset); 662 break; 663 case kStoreHalfword: 664 Sh(reg, base, offset); 665 break; 666 case kStoreWord: 667 Sw(reg, base, offset); 668 break; 669 case kStoreDoubleword: 670 CHECK_NE(reg, base); 671 CHECK_NE(static_cast<Register>(reg + 1), base); 672 Sw(reg, base, offset); 673 null_checker(); 674 Sw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize); 675 break; 676 default: 677 LOG(FATAL) << "UNREACHABLE"; 678 } 679 if (type != kStoreDoubleword) { 680 null_checker(); 681 } 682 } 683 684 template <typename ImplicitNullChecker = NoImplicitNullChecker> 685 void StoreSToOffset(FRegister reg, 686 Register base, 687 int32_t offset, 688 ImplicitNullChecker null_checker = NoImplicitNullChecker()) { 689 AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true); 690 Swc1(reg, base, offset); 691 null_checker(); 692 } 693 694 template <typename ImplicitNullChecker = NoImplicitNullChecker> 695 void StoreDToOffset(FRegister reg, 696 Register base, 697 int32_t offset, 698 ImplicitNullChecker null_checker = NoImplicitNullChecker()) { 699 AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true); 700 if (IsAligned<kMipsDoublewordSize>(offset)) { 701 Sdc1(reg, base, offset); 702 null_checker(); 703 } else { 704 if (Is32BitFPU()) { 705 Swc1(reg, base, offset); 706 null_checker(); 707 Swc1(static_cast<FRegister>(reg + 1), base, offset + kMipsWordSize); 708 } else { 709 // 64-bit FPU. 710 Mfhc1(T8, reg); 711 Swc1(reg, base, offset); 712 null_checker(); 713 Sw(T8, base, offset + kMipsWordSize); 714 } 715 } 716 } 717 718 void LoadFromOffset(LoadOperandType type, Register reg, Register base, int32_t offset); 719 void LoadSFromOffset(FRegister reg, Register base, int32_t offset); 720 void LoadDFromOffset(FRegister reg, Register base, int32_t offset); 721 void StoreToOffset(StoreOperandType type, Register reg, Register base, int32_t offset); 722 void StoreSToOffset(FRegister reg, Register base, int32_t offset); 723 void StoreDToOffset(FRegister reg, Register base, int32_t offset); 724 725 // Emit data (e.g. encoded instruction or immediate) to the instruction stream. 726 void Emit(uint32_t value); 727 728 // Push/pop composite routines. 729 void Push(Register rs); 730 void Pop(Register rd); 731 void PopAndReturn(Register rd, Register rt); 732 733 // 734 // Heap poisoning. 735 // 736 737 // Poison a heap reference contained in `src` and store it in `dst`. PoisonHeapReference(Register dst,Register src)738 void PoisonHeapReference(Register dst, Register src) { 739 // dst = -src. 740 Subu(dst, ZERO, src); 741 } 742 // Poison a heap reference contained in `reg`. PoisonHeapReference(Register reg)743 void PoisonHeapReference(Register reg) { 744 // reg = -reg. 745 PoisonHeapReference(reg, reg); 746 } 747 // Unpoison a heap reference contained in `reg`. UnpoisonHeapReference(Register reg)748 void UnpoisonHeapReference(Register reg) { 749 // reg = -reg. 750 Subu(reg, ZERO, reg); 751 } 752 // Poison a heap reference contained in `reg` if heap poisoning is enabled. MaybePoisonHeapReference(Register reg)753 void MaybePoisonHeapReference(Register reg) { 754 if (kPoisonHeapReferences) { 755 PoisonHeapReference(reg); 756 } 757 } 758 // Unpoison a heap reference contained in `reg` if heap poisoning is enabled. MaybeUnpoisonHeapReference(Register reg)759 void MaybeUnpoisonHeapReference(Register reg) { 760 if (kPoisonHeapReferences) { 761 UnpoisonHeapReference(reg); 762 } 763 } 764 Bind(Label * label)765 void Bind(Label* label) OVERRIDE { 766 Bind(down_cast<MipsLabel*>(label)); 767 } Jump(Label * label ATTRIBUTE_UNUSED)768 void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE { 769 UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS"; 770 } 771 772 // Don't warn about a different virtual Bind/Jump in the base class. 773 using JNIBase::Bind; 774 using JNIBase::Jump; 775 776 // Create a new label that can be used with Jump/Bind calls. CreateLabel()777 std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE { 778 LOG(FATAL) << "Not implemented on MIPS32"; 779 UNREACHABLE(); 780 } 781 // Emit an unconditional jump to the label. Jump(JNIMacroLabel * label ATTRIBUTE_UNUSED)782 void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE { 783 LOG(FATAL) << "Not implemented on MIPS32"; 784 UNREACHABLE(); 785 } 786 // Emit a conditional jump to the label by applying a unary condition test to the register. Jump(JNIMacroLabel * label ATTRIBUTE_UNUSED,JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,ManagedRegister test ATTRIBUTE_UNUSED)787 void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED, 788 JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED, 789 ManagedRegister test ATTRIBUTE_UNUSED) OVERRIDE { 790 LOG(FATAL) << "Not implemented on MIPS32"; 791 UNREACHABLE(); 792 } 793 794 // Code at this offset will serve as the target for the Jump call. Bind(JNIMacroLabel * label ATTRIBUTE_UNUSED)795 void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE { 796 LOG(FATAL) << "Not implemented on MIPS32"; 797 UNREACHABLE(); 798 } 799 800 // Create a new literal with a given value. 801 // NOTE: Force the template parameter to be explicitly specified. 802 template <typename T> NewLiteral(typename Identity<T>::type value)803 Literal* NewLiteral(typename Identity<T>::type value) { 804 static_assert(std::is_integral<T>::value, "T must be an integral type."); 805 return NewLiteral(sizeof(value), reinterpret_cast<const uint8_t*>(&value)); 806 } 807 808 // Load label address using the base register (for R2 only) or using PC-relative loads 809 // (for R6 only; base_reg must be ZERO). To be used with data labels in the literal / 810 // jump table area only and not with regular code labels. 811 void LoadLabelAddress(Register dest_reg, Register base_reg, MipsLabel* label); 812 813 // Create a new literal with the given data. 814 Literal* NewLiteral(size_t size, const uint8_t* data); 815 816 // Load literal using the base register (for R2 only) or using PC-relative loads 817 // (for R6 only; base_reg must be ZERO). 818 void LoadLiteral(Register dest_reg, Register base_reg, Literal* literal); 819 820 // Create a jump table for the given labels that will be emitted when finalizing. 821 // When the table is emitted, offsets will be relative to the location of the table. 822 // The table location is determined by the location of its label (the label precedes 823 // the table data) and should be loaded using LoadLabelAddress(). 824 JumpTable* CreateJumpTable(std::vector<MipsLabel*>&& labels); 825 826 // 827 // Overridden common assembler high-level functionality. 828 // 829 830 // Emit code that will create an activation on the stack. 831 void BuildFrame(size_t frame_size, 832 ManagedRegister method_reg, 833 ArrayRef<const ManagedRegister> callee_save_regs, 834 const ManagedRegisterEntrySpills& entry_spills) OVERRIDE; 835 836 // Emit code that will remove an activation from the stack. 837 void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) 838 OVERRIDE; 839 840 void IncreaseFrameSize(size_t adjust) OVERRIDE; 841 void DecreaseFrameSize(size_t adjust) OVERRIDE; 842 843 // Store routines. 844 void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE; 845 void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE; 846 void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE; 847 848 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE; 849 850 void StoreStackOffsetToThread(ThreadOffset32 thr_offs, 851 FrameOffset fr_offs, 852 ManagedRegister mscratch) OVERRIDE; 853 854 void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE; 855 856 void StoreSpanning(FrameOffset dest, 857 ManagedRegister msrc, 858 FrameOffset in_off, 859 ManagedRegister mscratch) OVERRIDE; 860 861 // Load routines. 862 void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE; 863 864 void LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE; 865 866 void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; 867 868 void LoadRef(ManagedRegister mdest, 869 ManagedRegister base, 870 MemberOffset offs, 871 bool unpoison_reference) OVERRIDE; 872 873 void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE; 874 875 void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE; 876 877 // Copying routines. 878 void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE; 879 880 void CopyRawPtrFromThread(FrameOffset fr_offs, 881 ThreadOffset32 thr_offs, 882 ManagedRegister mscratch) OVERRIDE; 883 884 void CopyRawPtrToThread(ThreadOffset32 thr_offs, 885 FrameOffset fr_offs, 886 ManagedRegister mscratch) OVERRIDE; 887 888 void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE; 889 890 void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE; 891 892 void Copy(FrameOffset dest, 893 ManagedRegister src_base, 894 Offset src_offset, 895 ManagedRegister mscratch, 896 size_t size) OVERRIDE; 897 898 void Copy(ManagedRegister dest_base, 899 Offset dest_offset, 900 FrameOffset src, 901 ManagedRegister mscratch, 902 size_t size) OVERRIDE; 903 904 void Copy(FrameOffset dest, 905 FrameOffset src_base, 906 Offset src_offset, 907 ManagedRegister mscratch, 908 size_t size) OVERRIDE; 909 910 void Copy(ManagedRegister dest, 911 Offset dest_offset, 912 ManagedRegister src, 913 Offset src_offset, 914 ManagedRegister mscratch, 915 size_t size) OVERRIDE; 916 917 void Copy(FrameOffset dest, 918 Offset dest_offset, 919 FrameOffset src, 920 Offset src_offset, 921 ManagedRegister mscratch, 922 size_t size) OVERRIDE; 923 924 void MemoryBarrier(ManagedRegister) OVERRIDE; 925 926 // Sign extension. 927 void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE; 928 929 // Zero extension. 930 void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE; 931 932 // Exploit fast access in managed code to Thread::Current(). 933 void GetCurrentThread(ManagedRegister tr) OVERRIDE; 934 void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE; 935 936 // Set up out_reg to hold a Object** into the handle scope, or to be null if the 937 // value is null and null_allowed. in_reg holds a possibly stale reference 938 // that can be used to avoid loading the handle scope entry to see if the value is 939 // null. 940 void CreateHandleScopeEntry(ManagedRegister out_reg, 941 FrameOffset handlescope_offset, 942 ManagedRegister in_reg, 943 bool null_allowed) OVERRIDE; 944 945 // Set up out_off to hold a Object** into the handle scope, or to be null if the 946 // value is null and null_allowed. 947 void CreateHandleScopeEntry(FrameOffset out_off, 948 FrameOffset handlescope_offset, 949 ManagedRegister mscratch, 950 bool null_allowed) OVERRIDE; 951 952 // src holds a handle scope entry (Object**) load this into dst. 953 void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE; 954 955 // Heap::VerifyObject on src. In some cases (such as a reference to this) we 956 // know that src may not be null. 957 void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE; 958 void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE; 959 960 // Call to address held at [base+offset]. 961 void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE; 962 void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE; 963 void CallFromThread(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE; 964 965 // Generate code to check if Thread::Current()->exception_ is non-null 966 // and branch to a ExceptionSlowPath if it is. 967 void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE; 968 969 // Emit slow paths queued during assembly and promote short branches to long if needed. 970 void FinalizeCode() OVERRIDE; 971 972 // Emit branches and finalize all instructions. 973 void FinalizeInstructions(const MemoryRegion& region); 974 975 // Returns the (always-)current location of a label (can be used in class CodeGeneratorMIPS, 976 // must be used instead of MipsLabel::GetPosition()). 977 uint32_t GetLabelLocation(const MipsLabel* label) const; 978 979 // Get the final position of a label after local fixup based on the old position 980 // recorded before FinalizeCode(). 981 uint32_t GetAdjustedPosition(uint32_t old_position); 982 983 // R2 doesn't have PC-relative addressing, which we need to access literals. We simulate it by 984 // reading the PC value into a general-purpose register with the NAL instruction and then loading 985 // literals through this base register. The code generator calls this method (at most once per 986 // method being compiled) to bind a label to the location for which the PC value is acquired. 987 // The assembler then computes literal offsets relative to this label. 988 void BindPcRelBaseLabel(); 989 990 // Returns the location of the label bound with BindPcRelBaseLabel(). 991 uint32_t GetPcRelBaseLabelLocation() const; 992 993 // Note that PC-relative literal loads are handled as pseudo branches because they need very 994 // similar relocation and may similarly expand in size to accomodate for larger offsets relative 995 // to PC. 996 enum BranchCondition { 997 kCondLT, 998 kCondGE, 999 kCondLE, 1000 kCondGT, 1001 kCondLTZ, 1002 kCondGEZ, 1003 kCondLEZ, 1004 kCondGTZ, 1005 kCondEQ, 1006 kCondNE, 1007 kCondEQZ, 1008 kCondNEZ, 1009 kCondLTU, 1010 kCondGEU, 1011 kCondF, // Floating-point predicate false. 1012 kCondT, // Floating-point predicate true. 1013 kUncond, 1014 }; 1015 friend std::ostream& operator<<(std::ostream& os, const BranchCondition& rhs); 1016 1017 // Enables or disables instruction reordering (IOW, automatic filling of delay slots) 1018 // similarly to ".set reorder" / ".set noreorder" in traditional MIPS assembly. 1019 // Returns the last state, which may be useful for temporary enabling/disabling of 1020 // reordering. 1021 bool SetReorder(bool enable); 1022 1023 private: 1024 // Description of the last instruction in terms of input and output registers. 1025 // Used to make the decision of moving the instruction into a delay slot. 1026 struct DelaySlot { 1027 DelaySlot(); 1028 // Encoded instruction that may be used to fill the delay slot or 0 1029 // (0 conveniently represents NOP). 1030 uint32_t instruction_; 1031 // Mask of output GPRs for the instruction. 1032 uint32_t gpr_outs_mask_; 1033 // Mask of input GPRs for the instruction. 1034 uint32_t gpr_ins_mask_; 1035 // Mask of output FPRs for the instruction. 1036 uint32_t fpr_outs_mask_; 1037 // Mask of input FPRs for the instruction. 1038 uint32_t fpr_ins_mask_; 1039 // Mask of output FPU condition code flags for the instruction. 1040 uint32_t cc_outs_mask_; 1041 // Mask of input FPU condition code flags for the instruction. 1042 uint32_t cc_ins_mask_; 1043 // Branches never operate on the LO and HI registers, hence there's 1044 // no mask for LO and HI. 1045 }; 1046 1047 // Delay slot finite state machine's (DS FSM's) state. The FSM state is updated 1048 // upon every new instruction and label generated. The FSM detects instructions 1049 // suitable for delay slots and immediately preceded with labels. These are target 1050 // instructions for branches. If an unconditional R2 branch does not get its delay 1051 // slot filled with the immediately preceding instruction, it may instead get the 1052 // slot filled with the target instruction (the branch will need its offset 1053 // incremented past the target instruction). We call this "absorption". The FSM 1054 // records PCs of the target instructions suitable for this optimization. 1055 enum DsFsmState { 1056 kExpectingLabel, 1057 kExpectingInstruction, 1058 kExpectingCommit 1059 }; 1060 friend std::ostream& operator<<(std::ostream& os, const DsFsmState& rhs); 1061 1062 class Branch { 1063 public: 1064 enum Type { 1065 // R2 short branches. 1066 kUncondBranch, 1067 kCondBranch, 1068 kCall, 1069 // R2 near label. 1070 kLabel, 1071 // R2 near literal. 1072 kLiteral, 1073 // R2 long branches. 1074 kLongUncondBranch, 1075 kLongCondBranch, 1076 kLongCall, 1077 // R2 far label. 1078 kFarLabel, 1079 // R2 far literal. 1080 kFarLiteral, 1081 // R6 short branches. 1082 kR6UncondBranch, 1083 kR6CondBranch, 1084 kR6Call, 1085 // R6 near label. 1086 kR6Label, 1087 // R6 near literal. 1088 kR6Literal, 1089 // R6 long branches. 1090 kR6LongUncondBranch, 1091 kR6LongCondBranch, 1092 kR6LongCall, 1093 // R6 far label. 1094 kR6FarLabel, 1095 // R6 far literal. 1096 kR6FarLiteral, 1097 }; 1098 // Bit sizes of offsets defined as enums to minimize chance of typos. 1099 enum OffsetBits { 1100 kOffset16 = 16, 1101 kOffset18 = 18, 1102 kOffset21 = 21, 1103 kOffset23 = 23, 1104 kOffset28 = 28, 1105 kOffset32 = 32, 1106 }; 1107 1108 static constexpr uint32_t kUnresolved = 0xffffffff; // Unresolved target_ 1109 static constexpr int32_t kMaxBranchLength = 32; 1110 static constexpr int32_t kMaxBranchSize = kMaxBranchLength * sizeof(uint32_t); 1111 // The following two instruction encodings can never legally occur in branch delay 1112 // slots and are used as markers. 1113 // 1114 // kUnfilledDelaySlot means that the branch may use either the preceding or the target 1115 // instruction to fill its delay slot (the latter is only possible with unconditional 1116 // R2 branches and is termed here as "absorption"). 1117 static constexpr uint32_t kUnfilledDelaySlot = 0x10000000; // beq zero, zero, 0. 1118 // kUnfillableDelaySlot means that the branch cannot use an instruction (other than NOP) 1119 // to fill its delay slot. This is only used for unconditional R2 branches to prevent 1120 // absorption of the target instruction when reordering is disabled. 1121 static constexpr uint32_t kUnfillableDelaySlot = 0x13FF0000; // beq ra, ra, 0. 1122 1123 struct BranchInfo { 1124 // Branch length as a number of 4-byte-long instructions. 1125 uint32_t length; 1126 // Ordinal number (0-based) of the first (or the only) instruction that contains the branch's 1127 // PC-relative offset (or its most significant 16-bit half, which goes first). 1128 uint32_t instr_offset; 1129 // Different MIPS instructions with PC-relative offsets apply said offsets to slightly 1130 // different origins, e.g. to PC or PC+4. Encode the origin distance (as a number of 4-byte 1131 // instructions) from the instruction containing the offset. 1132 uint32_t pc_org; 1133 // How large (in bits) a PC-relative offset can be for a given type of branch (kR6CondBranch 1134 // is an exception: use kOffset23 for beqzc/bnezc). 1135 OffsetBits offset_size; 1136 // Some MIPS instructions with PC-relative offsets shift the offset by 2. Encode the shift 1137 // count. 1138 int offset_shift; 1139 }; 1140 static const BranchInfo branch_info_[/* Type */]; 1141 1142 // Unconditional branch or call. 1143 Branch(bool is_r6, uint32_t location, uint32_t target, bool is_call); 1144 // Conditional branch. 1145 Branch(bool is_r6, 1146 uint32_t location, 1147 uint32_t target, 1148 BranchCondition condition, 1149 Register lhs_reg, 1150 Register rhs_reg); 1151 // Label address (in literal area) or literal. 1152 Branch(bool is_r6, 1153 uint32_t location, 1154 Register dest_reg, 1155 Register base_reg, 1156 Type label_or_literal_type); 1157 1158 // Some conditional branches with lhs = rhs are effectively NOPs, while some 1159 // others are effectively unconditional. MIPSR6 conditional branches require lhs != rhs. 1160 // So, we need a way to identify such branches in order to emit no instructions for them 1161 // or change them to unconditional. 1162 static bool IsNop(BranchCondition condition, Register lhs, Register rhs); 1163 static bool IsUncond(BranchCondition condition, Register lhs, Register rhs); 1164 1165 static BranchCondition OppositeCondition(BranchCondition cond); 1166 1167 Type GetType() const; 1168 BranchCondition GetCondition() const; 1169 Register GetLeftRegister() const; 1170 Register GetRightRegister() const; 1171 uint32_t GetTarget() const; 1172 uint32_t GetLocation() const; 1173 uint32_t GetOldLocation() const; 1174 uint32_t GetPrecedingInstructionLength(Type type) const; 1175 uint32_t GetPrecedingInstructionSize(Type type) const; 1176 uint32_t GetLength() const; 1177 uint32_t GetOldLength() const; 1178 uint32_t GetSize() const; 1179 uint32_t GetOldSize() const; 1180 uint32_t GetEndLocation() const; 1181 uint32_t GetOldEndLocation() const; 1182 bool IsLong() const; 1183 bool IsResolved() const; 1184 1185 // Various helpers for branch delay slot management. 1186 bool CanHaveDelayedInstruction(const DelaySlot& delay_slot) const; 1187 void SetDelayedInstruction(uint32_t instruction); 1188 uint32_t GetDelayedInstruction() const; 1189 void DecrementLocations(); 1190 1191 // Returns the bit size of the signed offset that the branch instruction can handle. 1192 OffsetBits GetOffsetSize() const; 1193 1194 // Calculates the distance between two byte locations in the assembler buffer and 1195 // returns the number of bits needed to represent the distance as a signed integer. 1196 // 1197 // Branch instructions have signed offsets of 16, 19 (addiupc), 21 (beqzc/bnezc), 1198 // and 26 (bc) bits, which are additionally shifted left 2 positions at run time. 1199 // 1200 // Composite branches (made of several instructions) with longer reach have 32-bit 1201 // offsets encoded as 2 16-bit "halves" in two instructions (high half goes first). 1202 // The composite branches cover the range of PC + +/-2GB on MIPS32 CPUs. However, 1203 // the range is not end-to-end on MIPS64 (unless addresses are forced to zero- or 1204 // sign-extend from 32 to 64 bits by the appropriate CPU configuration). 1205 // Consider the following implementation of a long unconditional branch, for 1206 // example: 1207 // 1208 // auipc at, offset_31_16 // at = pc + sign_extend(offset_31_16) << 16 1209 // jic at, offset_15_0 // pc = at + sign_extend(offset_15_0) 1210 // 1211 // Both of the above instructions take 16-bit signed offsets as immediate operands. 1212 // When bit 15 of offset_15_0 is 1, it effectively causes subtraction of 0x10000 1213 // due to sign extension. This must be compensated for by incrementing offset_31_16 1214 // by 1. offset_31_16 can only be incremented by 1 if it's not 0x7FFF. If it is 1215 // 0x7FFF, adding 1 will overflow the positive offset into the negative range. 1216 // Therefore, the long branch range is something like from PC - 0x80000000 to 1217 // PC + 0x7FFF7FFF, IOW, shorter by 32KB on one side. 1218 // 1219 // The returned values are therefore: 18, 21, 23, 28 and 32. There's also a special 1220 // case with the addiu instruction and a 16 bit offset. 1221 static OffsetBits GetOffsetSizeNeeded(uint32_t location, uint32_t target); 1222 1223 // Resolve a branch when the target is known. 1224 void Resolve(uint32_t target); 1225 1226 // Relocate a branch by a given delta if needed due to expansion of this or another 1227 // branch at a given location by this delta (just changes location_ and target_). 1228 void Relocate(uint32_t expand_location, uint32_t delta); 1229 1230 // If the branch is short, changes its type to long. 1231 void PromoteToLong(); 1232 1233 // If necessary, updates the type by promoting a short branch to a long branch 1234 // based on the branch location and target. Returns the amount (in bytes) by 1235 // which the branch size has increased. 1236 // max_short_distance caps the maximum distance between location_ and target_ 1237 // that is allowed for short branches. This is for debugging/testing purposes. 1238 // max_short_distance = 0 forces all short branches to become long. 1239 // Use the implicit default argument when not debugging/testing. 1240 uint32_t PromoteIfNeeded(uint32_t location, 1241 uint32_t max_short_distance = std::numeric_limits<uint32_t>::max()); 1242 1243 // Returns the location of the instruction(s) containing the offset. 1244 uint32_t GetOffsetLocation() const; 1245 1246 // Calculates and returns the offset ready for encoding in the branch instruction(s). 1247 uint32_t GetOffset(uint32_t location) const; 1248 1249 private: 1250 // Completes branch construction by determining and recording its type. 1251 void InitializeType(Type initial_type, bool is_r6); 1252 // Helper for the above. 1253 void InitShortOrLong(OffsetBits ofs_size, Type short_type, Type long_type); 1254 1255 uint32_t old_location_; // Offset into assembler buffer in bytes. 1256 uint32_t location_; // Offset into assembler buffer in bytes. 1257 uint32_t target_; // Offset into assembler buffer in bytes. 1258 1259 uint32_t lhs_reg_; // Left-hand side register in conditional branches or 1260 // FPU condition code. Destination register in literals. 1261 uint32_t rhs_reg_; // Right-hand side register in conditional branches. 1262 // Base register in literals (ZERO on R6). 1263 BranchCondition condition_; // Condition for conditional branches. 1264 1265 Type type_; // Current type of the branch. 1266 Type old_type_; // Initial type of the branch. 1267 1268 uint32_t delayed_instruction_; // Encoded instruction for the delay slot or 1269 // kUnfilledDelaySlot if none but fillable or 1270 // kUnfillableDelaySlot if none and unfillable 1271 // (the latter is only used for unconditional R2 1272 // branches). 1273 }; 1274 friend std::ostream& operator<<(std::ostream& os, const Branch::Type& rhs); 1275 friend std::ostream& operator<<(std::ostream& os, const Branch::OffsetBits& rhs); 1276 1277 uint32_t EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct); 1278 uint32_t EmitI(int opcode, Register rs, Register rt, uint16_t imm); 1279 uint32_t EmitI21(int opcode, Register rs, uint32_t imm21); 1280 uint32_t EmitI26(int opcode, uint32_t imm26); 1281 uint32_t EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct); 1282 uint32_t EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm); 1283 void EmitBcondR2(BranchCondition cond, Register rs, Register rt, uint16_t imm16); 1284 void EmitBcondR6(BranchCondition cond, Register rs, Register rt, uint32_t imm16_21); 1285 1286 void Buncond(MipsLabel* label); 1287 void Bcond(MipsLabel* label, BranchCondition condition, Register lhs, Register rhs = ZERO); 1288 void Call(MipsLabel* label); 1289 void FinalizeLabeledBranch(MipsLabel* label); 1290 1291 // Various helpers for branch delay slot management. 1292 void DsFsmInstr(uint32_t instruction, 1293 uint32_t gpr_outs_mask, 1294 uint32_t gpr_ins_mask, 1295 uint32_t fpr_outs_mask, 1296 uint32_t fpr_ins_mask, 1297 uint32_t cc_outs_mask, 1298 uint32_t cc_ins_mask); 1299 void DsFsmInstrNop(uint32_t instruction); 1300 void DsFsmInstrRrr(uint32_t instruction, Register out, Register in1, Register in2); 1301 void DsFsmInstrRrrr(uint32_t instruction, Register in1_out, Register in2, Register in3); 1302 void DsFsmInstrFff(uint32_t instruction, FRegister out, FRegister in1, FRegister in2); 1303 void DsFsmInstrFfff(uint32_t instruction, FRegister in1_out, FRegister in2, FRegister in3); 1304 void DsFsmInstrFffr(uint32_t instruction, FRegister in1_out, FRegister in2, Register in3); 1305 void DsFsmInstrRf(uint32_t instruction, Register out, FRegister in); 1306 void DsFsmInstrFr(uint32_t instruction, FRegister out, Register in); 1307 void DsFsmInstrFR(uint32_t instruction, FRegister in1, Register in2); 1308 void DsFsmInstrCff(uint32_t instruction, int cc_out, FRegister in1, FRegister in2); 1309 void DsFsmInstrRrrc(uint32_t instruction, Register in1_out, Register in2, int cc_in); 1310 void DsFsmInstrFffc(uint32_t instruction, FRegister in1_out, FRegister in2, int cc_in); 1311 void DsFsmLabel(); 1312 void DsFsmCommitLabel(); 1313 void DsFsmDropLabel(); 1314 void MoveInstructionToDelaySlot(Branch& branch); 1315 bool CanExchangeWithSlt(Register rs, Register rt) const; 1316 void ExchangeWithSlt(const DelaySlot& forwarded_slot); 1317 void GenerateSltForCondBranch(bool unsigned_slt, Register rs, Register rt); 1318 1319 Branch* GetBranch(uint32_t branch_id); 1320 const Branch* GetBranch(uint32_t branch_id) const; 1321 uint32_t GetBranchLocationOrPcRelBase(const MipsAssembler::Branch* branch) const; 1322 uint32_t GetBranchOrPcRelBaseForEncoding(const MipsAssembler::Branch* branch) const; 1323 1324 void EmitLiterals(); 1325 void ReserveJumpTableSpace(); 1326 void EmitJumpTables(); 1327 void PromoteBranches(); 1328 void EmitBranch(Branch* branch); 1329 void EmitBranches(); 1330 void PatchCFI(size_t number_of_delayed_adjust_pcs); 1331 1332 // Emits exception block. 1333 void EmitExceptionPoll(MipsExceptionSlowPath* exception); 1334 IsR6()1335 bool IsR6() const { 1336 if (isa_features_ != nullptr) { 1337 return isa_features_->IsR6(); 1338 } else { 1339 return false; 1340 } 1341 } 1342 Is32BitFPU()1343 bool Is32BitFPU() const { 1344 if (isa_features_ != nullptr) { 1345 return isa_features_->Is32BitFloatingPoint(); 1346 } else { 1347 return true; 1348 } 1349 } 1350 1351 // List of exception blocks to generate at the end of the code cache. 1352 std::vector<MipsExceptionSlowPath> exception_blocks_; 1353 1354 std::vector<Branch> branches_; 1355 1356 // Whether appending instructions at the end of the buffer or overwriting the existing ones. 1357 bool overwriting_; 1358 // The current overwrite location. 1359 uint32_t overwrite_location_; 1360 1361 // Whether instruction reordering (IOW, automatic filling of delay slots) is enabled. 1362 bool reordering_; 1363 // Information about the last instruction that may be used to fill a branch delay slot. 1364 DelaySlot delay_slot_; 1365 // Delay slot FSM state. 1366 DsFsmState ds_fsm_state_; 1367 // PC of the current labeled target instruction. 1368 uint32_t ds_fsm_target_pc_; 1369 // PCs of labeled target instructions. 1370 std::vector<uint32_t> ds_fsm_target_pcs_; 1371 1372 // Use std::deque<> for literal labels to allow insertions at the end 1373 // without invalidating pointers and references to existing elements. 1374 ArenaDeque<Literal> literals_; 1375 1376 // Jump table list. 1377 ArenaDeque<JumpTable> jump_tables_; 1378 1379 // There's no PC-relative addressing on MIPS32R2. So, in order to access literals relative to PC 1380 // we get PC using the NAL instruction. This label marks the position within the assembler buffer 1381 // that PC (from NAL) points to. 1382 MipsLabel pc_rel_base_label_; 1383 1384 // Data for GetAdjustedPosition(), see the description there. 1385 uint32_t last_position_adjustment_; 1386 uint32_t last_old_position_; 1387 uint32_t last_branch_id_; 1388 1389 const MipsInstructionSetFeatures* isa_features_; 1390 1391 DISALLOW_COPY_AND_ASSIGN(MipsAssembler); 1392 }; 1393 1394 } // namespace mips 1395 } // namespace art 1396 1397 #endif // ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_ 1398