1 //===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H 11 #define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H 12 13 #include "../RuntimeDyldMachO.h" 14 #include "llvm/Support/Endian.h" 15 16 #define DEBUG_TYPE "dyld" 17 18 namespace llvm { 19 20 class RuntimeDyldMachOAArch64 21 : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> { 22 public: 23 24 typedef uint64_t TargetPtrT; 25 RuntimeDyldMachOAArch64(RuntimeDyld::MemoryManager & MM,RuntimeDyld::SymbolResolver & Resolver)26 RuntimeDyldMachOAArch64(RuntimeDyld::MemoryManager &MM, 27 RuntimeDyld::SymbolResolver &Resolver) 28 : RuntimeDyldMachOCRTPBase(MM, Resolver) {} 29 getMaxStubSize()30 unsigned getMaxStubSize() override { return 8; } 31 getStubAlignment()32 unsigned getStubAlignment() override { return 8; } 33 34 /// Extract the addend encoded in the instruction / memory location. decodeAddend(const RelocationEntry & RE)35 int64_t decodeAddend(const RelocationEntry &RE) const { 36 const SectionEntry &Section = Sections[RE.SectionID]; 37 uint8_t *LocalAddress = Section.Address + RE.Offset; 38 unsigned NumBytes = 1 << RE.Size; 39 int64_t Addend = 0; 40 // Verify that the relocation has the correct size and alignment. 41 switch (RE.RelType) { 42 default: 43 llvm_unreachable("Unsupported relocation type!"); 44 case MachO::ARM64_RELOC_UNSIGNED: 45 assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size."); 46 break; 47 case MachO::ARM64_RELOC_BRANCH26: 48 case MachO::ARM64_RELOC_PAGE21: 49 case MachO::ARM64_RELOC_PAGEOFF12: 50 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: 51 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: 52 assert(NumBytes == 4 && "Invalid relocation size."); 53 assert((((uintptr_t)LocalAddress & 0x3) == 0) && 54 "Instruction address is not aligned to 4 bytes."); 55 break; 56 } 57 58 switch (RE.RelType) { 59 default: 60 llvm_unreachable("Unsupported relocation type!"); 61 case MachO::ARM64_RELOC_UNSIGNED: 62 // This could be an unaligned memory location. 63 if (NumBytes == 4) 64 Addend = *reinterpret_cast<support::ulittle32_t *>(LocalAddress); 65 else 66 Addend = *reinterpret_cast<support::ulittle64_t *>(LocalAddress); 67 break; 68 case MachO::ARM64_RELOC_BRANCH26: { 69 // Verify that the relocation points to the expected branch instruction. 70 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 71 assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction."); 72 73 // Get the 26 bit addend encoded in the branch instruction and sign-extend 74 // to 64 bit. The lower 2 bits are always zeros and are therefore implicit 75 // (<< 2). 76 Addend = (*p & 0x03FFFFFF) << 2; 77 Addend = SignExtend64(Addend, 28); 78 break; 79 } 80 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: 81 case MachO::ARM64_RELOC_PAGE21: { 82 // Verify that the relocation points to the expected adrp instruction. 83 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 84 assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction."); 85 86 // Get the 21 bit addend encoded in the adrp instruction and sign-extend 87 // to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are 88 // therefore implicit (<< 12). 89 Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12; 90 Addend = SignExtend64(Addend, 33); 91 break; 92 } 93 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: { 94 // Verify that the relocation points to one of the expected load / store 95 // instructions. 96 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 97 (void)p; 98 assert((*p & 0x3B000000) == 0x39000000 && 99 "Only expected load / store instructions."); 100 } // fall-through 101 case MachO::ARM64_RELOC_PAGEOFF12: { 102 // Verify that the relocation points to one of the expected load / store 103 // or add / sub instructions. 104 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 105 assert((((*p & 0x3B000000) == 0x39000000) || 106 ((*p & 0x11C00000) == 0x11000000) ) && 107 "Expected load / store or add/sub instruction."); 108 109 // Get the 12 bit addend encoded in the instruction. 110 Addend = (*p & 0x003FFC00) >> 10; 111 112 // Check which instruction we are decoding to obtain the implicit shift 113 // factor of the instruction. 114 int ImplicitShift = 0; 115 if ((*p & 0x3B000000) == 0x39000000) { // << load / store 116 // For load / store instructions the size is encoded in bits 31:30. 117 ImplicitShift = ((*p >> 30) & 0x3); 118 if (ImplicitShift == 0) { 119 // Check if this a vector op to get the correct shift value. 120 if ((*p & 0x04800000) == 0x04800000) 121 ImplicitShift = 4; 122 } 123 } 124 // Compensate for implicit shift. 125 Addend <<= ImplicitShift; 126 break; 127 } 128 } 129 return Addend; 130 } 131 132 /// Extract the addend encoded in the instruction. encodeAddend(uint8_t * LocalAddress,unsigned NumBytes,MachO::RelocationInfoType RelType,int64_t Addend)133 void encodeAddend(uint8_t *LocalAddress, unsigned NumBytes, 134 MachO::RelocationInfoType RelType, int64_t Addend) const { 135 // Verify that the relocation has the correct alignment. 136 switch (RelType) { 137 default: 138 llvm_unreachable("Unsupported relocation type!"); 139 case MachO::ARM64_RELOC_UNSIGNED: 140 assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size."); 141 break; 142 case MachO::ARM64_RELOC_BRANCH26: 143 case MachO::ARM64_RELOC_PAGE21: 144 case MachO::ARM64_RELOC_PAGEOFF12: 145 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: 146 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: 147 assert(NumBytes == 4 && "Invalid relocation size."); 148 assert((((uintptr_t)LocalAddress & 0x3) == 0) && 149 "Instruction address is not aligned to 4 bytes."); 150 break; 151 } 152 153 switch (RelType) { 154 default: 155 llvm_unreachable("Unsupported relocation type!"); 156 case MachO::ARM64_RELOC_UNSIGNED: 157 // This could be an unaligned memory location. 158 if (NumBytes == 4) 159 *reinterpret_cast<support::ulittle32_t *>(LocalAddress) = Addend; 160 else 161 *reinterpret_cast<support::ulittle64_t *>(LocalAddress) = Addend; 162 break; 163 case MachO::ARM64_RELOC_BRANCH26: { 164 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 165 // Verify that the relocation points to the expected branch instruction. 166 assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction."); 167 168 // Verify addend value. 169 assert((Addend & 0x3) == 0 && "Branch target is not aligned"); 170 assert(isInt<28>(Addend) && "Branch target is out of range."); 171 172 // Encode the addend as 26 bit immediate in the branch instruction. 173 *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF); 174 break; 175 } 176 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: 177 case MachO::ARM64_RELOC_PAGE21: { 178 // Verify that the relocation points to the expected adrp instruction. 179 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 180 assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction."); 181 182 // Check that the addend fits into 21 bits (+ 12 lower bits). 183 assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned."); 184 assert(isInt<33>(Addend) && "Invalid page reloc value."); 185 186 // Encode the addend into the instruction. 187 uint32_t ImmLoValue = ((uint64_t)Addend << 17) & 0x60000000; 188 uint32_t ImmHiValue = ((uint64_t)Addend >> 9) & 0x00FFFFE0; 189 *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue; 190 break; 191 } 192 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: { 193 // Verify that the relocation points to one of the expected load / store 194 // instructions. 195 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 196 assert((*p & 0x3B000000) == 0x39000000 && 197 "Only expected load / store instructions."); 198 (void)p; 199 } // fall-through 200 case MachO::ARM64_RELOC_PAGEOFF12: { 201 // Verify that the relocation points to one of the expected load / store 202 // or add / sub instructions. 203 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 204 assert((((*p & 0x3B000000) == 0x39000000) || 205 ((*p & 0x11C00000) == 0x11000000) ) && 206 "Expected load / store or add/sub instruction."); 207 208 // Check which instruction we are decoding to obtain the implicit shift 209 // factor of the instruction and verify alignment. 210 int ImplicitShift = 0; 211 if ((*p & 0x3B000000) == 0x39000000) { // << load / store 212 // For load / store instructions the size is encoded in bits 31:30. 213 ImplicitShift = ((*p >> 30) & 0x3); 214 switch (ImplicitShift) { 215 case 0: 216 // Check if this a vector op to get the correct shift value. 217 if ((*p & 0x04800000) == 0x04800000) { 218 ImplicitShift = 4; 219 assert(((Addend & 0xF) == 0) && 220 "128-bit LDR/STR not 16-byte aligned."); 221 } 222 break; 223 case 1: 224 assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned."); 225 break; 226 case 2: 227 assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned."); 228 break; 229 case 3: 230 assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned."); 231 break; 232 } 233 } 234 // Compensate for implicit shift. 235 Addend >>= ImplicitShift; 236 assert(isUInt<12>(Addend) && "Addend cannot be encoded."); 237 238 // Encode the addend into the instruction. 239 *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00); 240 break; 241 } 242 } 243 } 244 245 relocation_iterator processRelocationRef(unsigned SectionID,relocation_iterator RelI,const ObjectFile & BaseObjT,ObjSectionToIDMap & ObjSectionToID,StubMap & Stubs)246 processRelocationRef(unsigned SectionID, relocation_iterator RelI, 247 const ObjectFile &BaseObjT, 248 ObjSectionToIDMap &ObjSectionToID, 249 StubMap &Stubs) override { 250 const MachOObjectFile &Obj = 251 static_cast<const MachOObjectFile &>(BaseObjT); 252 MachO::any_relocation_info RelInfo = 253 Obj.getRelocation(RelI->getRawDataRefImpl()); 254 255 assert(!Obj.isRelocationScattered(RelInfo) && ""); 256 257 // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit 258 // addend for the following relocation. If found: (1) store the associated 259 // addend, (2) consume the next relocation, and (3) use the stored addend to 260 // override the addend. 261 int64_t ExplicitAddend = 0; 262 if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) { 263 assert(!Obj.getPlainRelocationExternal(RelInfo)); 264 assert(!Obj.getAnyRelocationPCRel(RelInfo)); 265 assert(Obj.getAnyRelocationLength(RelInfo) == 2); 266 int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo); 267 // Sign-extend the 24-bit to 64-bit. 268 ExplicitAddend = SignExtend64(RawAddend, 24); 269 ++RelI; 270 RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl()); 271 } 272 273 RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI)); 274 RE.Addend = decodeAddend(RE); 275 RelocationValueRef Value( 276 getRelocationValueRef(Obj, RelI, RE, ObjSectionToID)); 277 278 assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\ 279 "ARM64_RELOC_ADDEND and embedded addend in the instruction."); 280 if (ExplicitAddend) { 281 RE.Addend = ExplicitAddend; 282 Value.Offset = ExplicitAddend; 283 } 284 285 bool IsExtern = Obj.getPlainRelocationExternal(RelInfo); 286 if (!IsExtern && RE.IsPCRel) 287 makeValueAddendPCRel(Value, Obj, RelI, 1 << RE.Size); 288 289 RE.Addend = Value.Offset; 290 291 if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 || 292 RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12) 293 processGOTRelocation(RE, Value, Stubs); 294 else { 295 if (Value.SymbolName) 296 addRelocationForSymbol(RE, Value.SymbolName); 297 else 298 addRelocationForSection(RE, Value.SectionID); 299 } 300 301 return ++RelI; 302 } 303 resolveRelocation(const RelocationEntry & RE,uint64_t Value)304 void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override { 305 DEBUG(dumpRelocationToResolve(RE, Value)); 306 307 const SectionEntry &Section = Sections[RE.SectionID]; 308 uint8_t *LocalAddress = Section.Address + RE.Offset; 309 MachO::RelocationInfoType RelType = 310 static_cast<MachO::RelocationInfoType>(RE.RelType); 311 312 switch (RelType) { 313 default: 314 llvm_unreachable("Invalid relocation type!"); 315 case MachO::ARM64_RELOC_UNSIGNED: { 316 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported"); 317 // Mask in the target value a byte at a time (we don't have an alignment 318 // guarantee for the target address, so this is safest). 319 if (RE.Size < 2) 320 llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED"); 321 322 encodeAddend(LocalAddress, 1 << RE.Size, RelType, Value + RE.Addend); 323 break; 324 } 325 case MachO::ARM64_RELOC_BRANCH26: { 326 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported"); 327 // Check if branch is in range. 328 uint64_t FinalAddress = Section.LoadAddress + RE.Offset; 329 int64_t PCRelVal = Value - FinalAddress + RE.Addend; 330 encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal); 331 break; 332 } 333 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: 334 case MachO::ARM64_RELOC_PAGE21: { 335 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported"); 336 // Adjust for PC-relative relocation and offset. 337 uint64_t FinalAddress = Section.LoadAddress + RE.Offset; 338 int64_t PCRelVal = 339 ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096)); 340 encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal); 341 break; 342 } 343 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: 344 case MachO::ARM64_RELOC_PAGEOFF12: { 345 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported"); 346 // Add the offset from the symbol. 347 Value += RE.Addend; 348 // Mask out the page address and only use the lower 12 bits. 349 Value &= 0xFFF; 350 encodeAddend(LocalAddress, /*Size=*/4, RelType, Value); 351 break; 352 } 353 case MachO::ARM64_RELOC_SUBTRACTOR: 354 case MachO::ARM64_RELOC_POINTER_TO_GOT: 355 case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21: 356 case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12: 357 llvm_unreachable("Relocation type not yet implemented!"); 358 case MachO::ARM64_RELOC_ADDEND: 359 llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by " 360 "processRelocationRef!"); 361 } 362 } 363 finalizeSection(const ObjectFile & Obj,unsigned SectionID,const SectionRef & Section)364 void finalizeSection(const ObjectFile &Obj, unsigned SectionID, 365 const SectionRef &Section) {} 366 367 private: processGOTRelocation(const RelocationEntry & RE,RelocationValueRef & Value,StubMap & Stubs)368 void processGOTRelocation(const RelocationEntry &RE, 369 RelocationValueRef &Value, StubMap &Stubs) { 370 assert(RE.Size == 2); 371 SectionEntry &Section = Sections[RE.SectionID]; 372 StubMap::const_iterator i = Stubs.find(Value); 373 int64_t Offset; 374 if (i != Stubs.end()) 375 Offset = static_cast<int64_t>(i->second); 376 else { 377 // FIXME: There must be a better way to do this then to check and fix the 378 // alignment every time!!! 379 uintptr_t BaseAddress = uintptr_t(Section.Address); 380 uintptr_t StubAlignment = getStubAlignment(); 381 uintptr_t StubAddress = 382 (BaseAddress + Section.StubOffset + StubAlignment - 1) & 383 -StubAlignment; 384 unsigned StubOffset = StubAddress - BaseAddress; 385 Stubs[Value] = StubOffset; 386 assert(((StubAddress % getStubAlignment()) == 0) && 387 "GOT entry not aligned"); 388 RelocationEntry GOTRE(RE.SectionID, StubOffset, 389 MachO::ARM64_RELOC_UNSIGNED, Value.Offset, 390 /*IsPCRel=*/false, /*Size=*/3); 391 if (Value.SymbolName) 392 addRelocationForSymbol(GOTRE, Value.SymbolName); 393 else 394 addRelocationForSection(GOTRE, Value.SectionID); 395 Section.StubOffset = StubOffset + getMaxStubSize(); 396 Offset = static_cast<int64_t>(StubOffset); 397 } 398 RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, Offset, 399 RE.IsPCRel, RE.Size); 400 addRelocationForSection(TargetRE, RE.SectionID); 401 } 402 }; 403 } 404 405 #undef DEBUG_TYPE 406 407 #endif 408