1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/Support/AArch64TargetParser.h"
21
22 using namespace clang;
23 using namespace clang::targets;
24
25 const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
26 #define BUILTIN(ID, TYPE, ATTRS) \
27 {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
28 #include "clang/Basic/BuiltinsNEON.def"
29
30 #define BUILTIN(ID, TYPE, ATTRS) \
31 {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
32 #include "clang/Basic/BuiltinsSVE.def"
33
34 #define BUILTIN(ID, TYPE, ATTRS) \
35 {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
36 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
37 {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
38 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
39 {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
40 #include "clang/Basic/BuiltinsAArch64.def"
41 };
42
AArch64TargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)43 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
44 const TargetOptions &Opts)
45 : TargetInfo(Triple), ABI("aapcs") {
46 if (getTriple().isOSOpenBSD()) {
47 Int64Type = SignedLongLong;
48 IntMaxType = SignedLongLong;
49 } else {
50 if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
51 WCharType = UnsignedInt;
52
53 Int64Type = SignedLong;
54 IntMaxType = SignedLong;
55 }
56
57 // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
58 HasLegalHalfType = true;
59 HasFloat16 = true;
60
61 if (Triple.isArch64Bit())
62 LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
63 else
64 LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
65
66 MaxVectorAlign = 128;
67 MaxAtomicInlineWidth = 128;
68 MaxAtomicPromoteWidth = 128;
69
70 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
71 LongDoubleFormat = &llvm::APFloat::IEEEquad();
72
73 BFloat16Width = BFloat16Align = 16;
74 BFloat16Format = &llvm::APFloat::BFloat();
75
76 // Make __builtin_ms_va_list available.
77 HasBuiltinMSVaList = true;
78
79 // Make the SVE types available. Note that this deliberately doesn't
80 // depend on SveMode, since in principle it should be possible to turn
81 // SVE on and off within a translation unit. It should also be possible
82 // to compile the global declaration:
83 //
84 // __SVInt8_t *ptr;
85 //
86 // even without SVE.
87 HasAArch64SVETypes = true;
88
89 // {} in inline assembly are neon specifiers, not assembly variant
90 // specifiers.
91 NoAsmVariants = true;
92
93 // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
94 // contributes to the alignment of the containing aggregate in the same way
95 // a plain (non bit-field) member of that type would, without exception for
96 // zero-sized or anonymous bit-fields."
97 assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
98 UseZeroLengthBitfieldAlignment = true;
99
100 // AArch64 targets default to using the ARM C++ ABI.
101 TheCXXABI.set(TargetCXXABI::GenericAArch64);
102
103 if (Triple.getOS() == llvm::Triple::Linux)
104 this->MCountName = "\01_mcount";
105 else if (Triple.getOS() == llvm::Triple::UnknownOS)
106 this->MCountName =
107 Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
108 }
109
getABI() const110 StringRef AArch64TargetInfo::getABI() const { return ABI; }
111
setABI(const std::string & Name)112 bool AArch64TargetInfo::setABI(const std::string &Name) {
113 if (Name != "aapcs" && Name != "darwinpcs")
114 return false;
115
116 ABI = Name;
117 return true;
118 }
119
validateBranchProtection(StringRef Spec,BranchProtectionInfo & BPI,StringRef & Err) const120 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
121 BranchProtectionInfo &BPI,
122 StringRef &Err) const {
123 llvm::AArch64::ParsedBranchProtection PBP;
124 if (!llvm::AArch64::parseBranchProtection(Spec, PBP, Err))
125 return false;
126
127 BPI.SignReturnAddr =
128 llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
129 .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
130 .Case("all", LangOptions::SignReturnAddressScopeKind::All)
131 .Default(LangOptions::SignReturnAddressScopeKind::None);
132
133 if (PBP.Key == "a_key")
134 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
135 else
136 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
137
138 BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
139 return true;
140 }
141
isValidCPUName(StringRef Name) const142 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
143 return Name == "generic" ||
144 llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
145 }
146
setCPU(const std::string & Name)147 bool AArch64TargetInfo::setCPU(const std::string &Name) {
148 return isValidCPUName(Name);
149 }
150
fillValidCPUList(SmallVectorImpl<StringRef> & Values) const151 void AArch64TargetInfo::fillValidCPUList(
152 SmallVectorImpl<StringRef> &Values) const {
153 llvm::AArch64::fillValidCPUArchList(Values);
154 }
155
getTargetDefinesARMV81A(const LangOptions & Opts,MacroBuilder & Builder) const156 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
157 MacroBuilder &Builder) const {
158 Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
159 Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
160 Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
161 }
162
getTargetDefinesARMV82A(const LangOptions & Opts,MacroBuilder & Builder) const163 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
164 MacroBuilder &Builder) const {
165 // Also include the ARMv8.1 defines
166 getTargetDefinesARMV81A(Opts, Builder);
167 }
168
getTargetDefinesARMV83A(const LangOptions & Opts,MacroBuilder & Builder) const169 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
170 MacroBuilder &Builder) const {
171 Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
172 Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
173 // Also include the Armv8.2 defines
174 getTargetDefinesARMV82A(Opts, Builder);
175 }
176
getTargetDefinesARMV84A(const LangOptions & Opts,MacroBuilder & Builder) const177 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
178 MacroBuilder &Builder) const {
179 // Also include the Armv8.3 defines
180 getTargetDefinesARMV83A(Opts, Builder);
181 }
182
getTargetDefinesARMV85A(const LangOptions & Opts,MacroBuilder & Builder) const183 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
184 MacroBuilder &Builder) const {
185 // Also include the Armv8.4 defines
186 getTargetDefinesARMV84A(Opts, Builder);
187 }
188
getTargetDefinesARMV86A(const LangOptions & Opts,MacroBuilder & Builder) const189 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
190 MacroBuilder &Builder) const {
191 // Also include the Armv8.5 defines
192 // FIXME: Armv8.6 makes the following extensions mandatory:
193 // - __ARM_FEATURE_BF16
194 // - __ARM_FEATURE_MATMUL_INT8
195 // Handle them here.
196 getTargetDefinesARMV85A(Opts, Builder);
197 }
198
getTargetDefines(const LangOptions & Opts,MacroBuilder & Builder) const199 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
200 MacroBuilder &Builder) const {
201 // Target identification.
202 Builder.defineMacro("__aarch64__");
203 // For bare-metal.
204 if (getTriple().getOS() == llvm::Triple::UnknownOS &&
205 getTriple().isOSBinFormatELF())
206 Builder.defineMacro("__ELF__");
207
208 // Target properties.
209 if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
210 Builder.defineMacro("_LP64");
211 Builder.defineMacro("__LP64__");
212 }
213
214 std::string CodeModel = getTargetOpts().CodeModel;
215 if (CodeModel == "default")
216 CodeModel = "small";
217 for (char &c : CodeModel)
218 c = toupper(c);
219 Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
220
221 // ACLE predefines. Many can only have one possible value on v8 AArch64.
222 Builder.defineMacro("__ARM_ACLE", "200");
223 Builder.defineMacro("__ARM_ARCH", "8");
224 Builder.defineMacro("__ARM_ARCH_PROFILE", "'A'");
225
226 Builder.defineMacro("__ARM_64BIT_STATE", "1");
227 Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
228 Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
229
230 Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
231 Builder.defineMacro("__ARM_FEATURE_FMA", "1");
232 Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
233 Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
234 Builder.defineMacro("__ARM_FEATURE_DIV"); // For backwards compatibility
235 Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
236 Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
237
238 Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
239
240 // 0xe implies support for half, single and double precision operations.
241 Builder.defineMacro("__ARM_FP", "0xE");
242
243 // PCS specifies this for SysV variants, which is all we support. Other ABIs
244 // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
245 Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
246 Builder.defineMacro("__ARM_FP16_ARGS", "1");
247
248 if (Opts.UnsafeFPMath)
249 Builder.defineMacro("__ARM_FP_FAST", "1");
250
251 Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
252 Twine(Opts.WCharSize ? Opts.WCharSize : 4));
253
254 Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
255
256 if (FPU & NeonMode) {
257 Builder.defineMacro("__ARM_NEON", "1");
258 // 64-bit NEON supports half, single and double precision operations.
259 Builder.defineMacro("__ARM_NEON_FP", "0xE");
260 }
261
262 if (FPU & SveMode)
263 Builder.defineMacro("__ARM_FEATURE_SVE", "1");
264
265 if (HasSVE2)
266 Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
267
268 if (HasSVE2 && HasSVE2AES)
269 Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
270
271 if (HasSVE2 && HasSVE2BitPerm)
272 Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
273
274 if (HasSVE2 && HasSVE2SHA3)
275 Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
276
277 if (HasSVE2 && HasSVE2SM4)
278 Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
279
280 if (HasCRC)
281 Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
282
283 if (HasCrypto)
284 Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
285
286 if (HasUnaligned)
287 Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
288
289 if ((FPU & NeonMode) && HasFullFP16)
290 Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
291 if (HasFullFP16)
292 Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
293
294 if (HasDotProd)
295 Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
296
297 if (HasMTE)
298 Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
299
300 if (HasTME)
301 Builder.defineMacro("__ARM_FEATURE_TME", "1");
302
303 if (HasMatMul)
304 Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
305
306 if (HasLSE)
307 Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
308
309 if (HasBFloat16) {
310 Builder.defineMacro("__ARM_FEATURE_BF16", "1");
311 Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
312 Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
313 Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
314 }
315
316 if ((FPU & SveMode) && HasBFloat16) {
317 Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
318 }
319
320 if ((FPU & SveMode) && HasMatmulFP64)
321 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
322
323 if ((FPU & SveMode) && HasMatmulFP32)
324 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
325
326 if ((FPU & SveMode) && HasMatMul)
327 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
328
329 if ((FPU & NeonMode) && HasFP16FML)
330 Builder.defineMacro("__ARM_FEATURE_FP16FML", "1");
331
332 if (Opts.hasSignReturnAddress()) {
333 // Bitmask:
334 // 0: Protection using the A key
335 // 1: Protection using the B key
336 // 2: Protection including leaf functions
337 unsigned Value = 0;
338
339 if (Opts.isSignReturnAddressWithAKey())
340 Value |= (1 << 0);
341 else
342 Value |= (1 << 1);
343
344 if (Opts.isSignReturnAddressScopeAll())
345 Value |= (1 << 2);
346
347 Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
348 }
349
350 if (Opts.BranchTargetEnforcement)
351 Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
352
353 switch (ArchKind) {
354 default:
355 break;
356 case llvm::AArch64::ArchKind::ARMV8_1A:
357 getTargetDefinesARMV81A(Opts, Builder);
358 break;
359 case llvm::AArch64::ArchKind::ARMV8_2A:
360 getTargetDefinesARMV82A(Opts, Builder);
361 break;
362 case llvm::AArch64::ArchKind::ARMV8_3A:
363 getTargetDefinesARMV83A(Opts, Builder);
364 break;
365 case llvm::AArch64::ArchKind::ARMV8_4A:
366 getTargetDefinesARMV84A(Opts, Builder);
367 break;
368 case llvm::AArch64::ArchKind::ARMV8_5A:
369 getTargetDefinesARMV85A(Opts, Builder);
370 break;
371 case llvm::AArch64::ArchKind::ARMV8_6A:
372 getTargetDefinesARMV86A(Opts, Builder);
373 break;
374 }
375
376 // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
377 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
378 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
379 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
380 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
381
382 if (Opts.ArmSveVectorBits) {
383 Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.ArmSveVectorBits));
384 Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
385 }
386 }
387
getTargetBuiltins() const388 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
389 return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
390 Builtin::FirstTSBuiltin);
391 }
392
hasFeature(StringRef Feature) const393 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
394 return Feature == "aarch64" || Feature == "arm64" || Feature == "arm" ||
395 (Feature == "neon" && (FPU & NeonMode)) ||
396 ((Feature == "sve" || Feature == "sve2" || Feature == "sve2-bitperm" ||
397 Feature == "sve2-aes" || Feature == "sve2-sha3" ||
398 Feature == "sve2-sm4" || Feature == "f64mm" || Feature == "f32mm" ||
399 Feature == "i8mm" || Feature == "bf16") &&
400 (FPU & SveMode));
401 }
402
handleTargetFeatures(std::vector<std::string> & Features,DiagnosticsEngine & Diags)403 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
404 DiagnosticsEngine &Diags) {
405 FPU = FPUMode;
406 HasCRC = false;
407 HasCrypto = false;
408 HasUnaligned = true;
409 HasFullFP16 = false;
410 HasDotProd = false;
411 HasFP16FML = false;
412 HasMTE = false;
413 HasTME = false;
414 HasMatMul = false;
415 HasBFloat16 = false;
416 HasSVE2 = false;
417 HasSVE2AES = false;
418 HasSVE2SHA3 = false;
419 HasSVE2SM4 = false;
420 HasSVE2BitPerm = false;
421 HasMatmulFP64 = false;
422 HasMatmulFP32 = false;
423 HasLSE = false;
424
425 ArchKind = llvm::AArch64::ArchKind::ARMV8A;
426
427 for (const auto &Feature : Features) {
428 if (Feature == "+neon")
429 FPU |= NeonMode;
430 if (Feature == "+sve") {
431 FPU |= SveMode;
432 HasFullFP16 = 1;
433 }
434 if (Feature == "+sve2") {
435 FPU |= SveMode;
436 HasFullFP16 = 1;
437 HasSVE2 = 1;
438 }
439 if (Feature == "+sve2-aes") {
440 FPU |= SveMode;
441 HasFullFP16 = 1;
442 HasSVE2 = 1;
443 HasSVE2AES = 1;
444 }
445 if (Feature == "+sve2-sha3") {
446 FPU |= SveMode;
447 HasFullFP16 = 1;
448 HasSVE2 = 1;
449 HasSVE2SHA3 = 1;
450 }
451 if (Feature == "+sve2-sm4") {
452 FPU |= SveMode;
453 HasFullFP16 = 1;
454 HasSVE2 = 1;
455 HasSVE2SM4 = 1;
456 }
457 if (Feature == "+sve2-bitperm") {
458 FPU |= SveMode;
459 HasFullFP16 = 1;
460 HasSVE2 = 1;
461 HasSVE2BitPerm = 1;
462 }
463 if (Feature == "+f32mm") {
464 FPU |= SveMode;
465 HasMatmulFP32 = true;
466 }
467 if (Feature == "+f64mm") {
468 FPU |= SveMode;
469 HasMatmulFP64 = true;
470 }
471 if (Feature == "+crc")
472 HasCRC = true;
473 if (Feature == "+crypto")
474 HasCrypto = true;
475 if (Feature == "+strict-align")
476 HasUnaligned = false;
477 if (Feature == "+v8.1a")
478 ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
479 if (Feature == "+v8.2a")
480 ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
481 if (Feature == "+v8.3a")
482 ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
483 if (Feature == "+v8.4a")
484 ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
485 if (Feature == "+v8.5a")
486 ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
487 if (Feature == "+v8.6a")
488 ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
489 if (Feature == "+v8r")
490 ArchKind = llvm::AArch64::ArchKind::ARMV8R;
491 if (Feature == "+fullfp16")
492 HasFullFP16 = true;
493 if (Feature == "+dotprod")
494 HasDotProd = true;
495 if (Feature == "+fp16fml")
496 HasFP16FML = true;
497 if (Feature == "+mte")
498 HasMTE = true;
499 if (Feature == "+tme")
500 HasTME = true;
501 if (Feature == "+i8mm")
502 HasMatMul = true;
503 if (Feature == "+bf16")
504 HasBFloat16 = true;
505 if (Feature == "+lse")
506 HasLSE = true;
507 }
508
509 setDataLayout();
510
511 return true;
512 }
513
514 TargetInfo::CallingConvCheckResult
checkCallingConvention(CallingConv CC) const515 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
516 switch (CC) {
517 case CC_C:
518 case CC_Swift:
519 case CC_PreserveMost:
520 case CC_PreserveAll:
521 case CC_OpenCLKernel:
522 case CC_AArch64VectorCall:
523 case CC_Win64:
524 return CCCR_OK;
525 default:
526 return CCCR_Warning;
527 }
528 }
529
isCLZForZeroUndef() const530 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
531
getBuiltinVaListKind() const532 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
533 return TargetInfo::AArch64ABIBuiltinVaList;
534 }
535
536 const char *const AArch64TargetInfo::GCCRegNames[] = {
537 // 32-bit Integer registers
538 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
539 "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
540 "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
541
542 // 64-bit Integer registers
543 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
544 "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
545 "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
546
547 // 32-bit floating point regsisters
548 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
549 "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
550 "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
551
552 // 64-bit floating point regsisters
553 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
554 "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
555 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
556
557 // Neon vector registers
558 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
559 "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
560 "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
561
562 // SVE vector registers
563 "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10",
564 "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
565 "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
566
567 // SVE predicate registers
568 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
569 "p11", "p12", "p13", "p14", "p15"
570 };
571
getGCCRegNames() const572 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
573 return llvm::makeArrayRef(GCCRegNames);
574 }
575
576 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
577 {{"w31"}, "wsp"},
578 {{"x31"}, "sp"},
579 // GCC rN registers are aliases of xN registers.
580 {{"r0"}, "x0"},
581 {{"r1"}, "x1"},
582 {{"r2"}, "x2"},
583 {{"r3"}, "x3"},
584 {{"r4"}, "x4"},
585 {{"r5"}, "x5"},
586 {{"r6"}, "x6"},
587 {{"r7"}, "x7"},
588 {{"r8"}, "x8"},
589 {{"r9"}, "x9"},
590 {{"r10"}, "x10"},
591 {{"r11"}, "x11"},
592 {{"r12"}, "x12"},
593 {{"r13"}, "x13"},
594 {{"r14"}, "x14"},
595 {{"r15"}, "x15"},
596 {{"r16"}, "x16"},
597 {{"r17"}, "x17"},
598 {{"r18"}, "x18"},
599 {{"r19"}, "x19"},
600 {{"r20"}, "x20"},
601 {{"r21"}, "x21"},
602 {{"r22"}, "x22"},
603 {{"r23"}, "x23"},
604 {{"r24"}, "x24"},
605 {{"r25"}, "x25"},
606 {{"r26"}, "x26"},
607 {{"r27"}, "x27"},
608 {{"r28"}, "x28"},
609 {{"r29", "x29"}, "fp"},
610 {{"r30", "x30"}, "lr"},
611 // The S/D/Q and W/X registers overlap, but aren't really aliases; we
612 // don't want to substitute one of these for a different-sized one.
613 };
614
getGCCRegAliases() const615 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
616 return llvm::makeArrayRef(GCCRegAliases);
617 }
618
validateAsmConstraint(const char * & Name,TargetInfo::ConstraintInfo & Info) const619 bool AArch64TargetInfo::validateAsmConstraint(
620 const char *&Name, TargetInfo::ConstraintInfo &Info) const {
621 switch (*Name) {
622 default:
623 return false;
624 case 'w': // Floating point and SIMD registers (V0-V31)
625 Info.setAllowsRegister();
626 return true;
627 case 'I': // Constant that can be used with an ADD instruction
628 case 'J': // Constant that can be used with a SUB instruction
629 case 'K': // Constant that can be used with a 32-bit logical instruction
630 case 'L': // Constant that can be used with a 64-bit logical instruction
631 case 'M': // Constant that can be used as a 32-bit MOV immediate
632 case 'N': // Constant that can be used as a 64-bit MOV immediate
633 case 'Y': // Floating point constant zero
634 case 'Z': // Integer constant zero
635 return true;
636 case 'Q': // A memory reference with base register and no offset
637 Info.setAllowsMemory();
638 return true;
639 case 'S': // A symbolic address
640 Info.setAllowsRegister();
641 return true;
642 case 'U':
643 if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
644 // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
645 Info.setAllowsRegister();
646 Name += 2;
647 return true;
648 }
649 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
650 // Utf: A memory address suitable for ldp/stp in TF mode.
651 // Usa: An absolute symbolic address.
652 // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
653
654 // Better to return an error saying that it's an unrecognised constraint
655 // even if this is a valid constraint in gcc.
656 return false;
657 case 'z': // Zero register, wzr or xzr
658 Info.setAllowsRegister();
659 return true;
660 case 'x': // Floating point and SIMD registers (V0-V15)
661 Info.setAllowsRegister();
662 return true;
663 case 'y': // SVE registers (V0-V7)
664 Info.setAllowsRegister();
665 return true;
666 }
667 return false;
668 }
669
validateConstraintModifier(StringRef Constraint,char Modifier,unsigned Size,std::string & SuggestedModifier) const670 bool AArch64TargetInfo::validateConstraintModifier(
671 StringRef Constraint, char Modifier, unsigned Size,
672 std::string &SuggestedModifier) const {
673 // Strip off constraint modifiers.
674 while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
675 Constraint = Constraint.substr(1);
676
677 switch (Constraint[0]) {
678 default:
679 return true;
680 case 'z':
681 case 'r': {
682 switch (Modifier) {
683 case 'x':
684 case 'w':
685 // For now assume that the person knows what they're
686 // doing with the modifier.
687 return true;
688 default:
689 // By default an 'r' constraint will be in the 'x'
690 // registers.
691 if (Size == 64)
692 return true;
693
694 SuggestedModifier = "w";
695 return false;
696 }
697 }
698 }
699 }
700
getClobbers() const701 const char *AArch64TargetInfo::getClobbers() const { return ""; }
702
getEHDataRegisterNumber(unsigned RegNo) const703 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
704 if (RegNo == 0)
705 return 0;
706 if (RegNo == 1)
707 return 1;
708 return -1;
709 }
710
hasInt128Type() const711 bool AArch64TargetInfo::hasInt128Type() const { return true; }
712
AArch64leTargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)713 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
714 const TargetOptions &Opts)
715 : AArch64TargetInfo(Triple, Opts) {}
716
setDataLayout()717 void AArch64leTargetInfo::setDataLayout() {
718 if (getTriple().isOSBinFormatMachO()) {
719 if(getTriple().isArch32Bit())
720 resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128");
721 else
722 resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128");
723 } else
724 resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
725 }
726
getTargetDefines(const LangOptions & Opts,MacroBuilder & Builder) const727 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
728 MacroBuilder &Builder) const {
729 Builder.defineMacro("__AARCH64EL__");
730 AArch64TargetInfo::getTargetDefines(Opts, Builder);
731 }
732
AArch64beTargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)733 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
734 const TargetOptions &Opts)
735 : AArch64TargetInfo(Triple, Opts) {}
736
getTargetDefines(const LangOptions & Opts,MacroBuilder & Builder) const737 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
738 MacroBuilder &Builder) const {
739 Builder.defineMacro("__AARCH64EB__");
740 Builder.defineMacro("__AARCH_BIG_ENDIAN");
741 Builder.defineMacro("__ARM_BIG_ENDIAN");
742 AArch64TargetInfo::getTargetDefines(Opts, Builder);
743 }
744
setDataLayout()745 void AArch64beTargetInfo::setDataLayout() {
746 assert(!getTriple().isOSBinFormatMachO());
747 resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
748 }
749
WindowsARM64TargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)750 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
751 const TargetOptions &Opts)
752 : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
753
754 // This is an LLP64 platform.
755 // int:4, long:4, long long:8, long double:8.
756 IntWidth = IntAlign = 32;
757 LongWidth = LongAlign = 32;
758 DoubleAlign = LongLongAlign = 64;
759 LongDoubleWidth = LongDoubleAlign = 64;
760 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
761 IntMaxType = SignedLongLong;
762 Int64Type = SignedLongLong;
763 SizeType = UnsignedLongLong;
764 PtrDiffType = SignedLongLong;
765 IntPtrType = SignedLongLong;
766 }
767
setDataLayout()768 void WindowsARM64TargetInfo::setDataLayout() {
769 resetDataLayout(Triple.isOSBinFormatMachO()
770 ? "e-m:o-i64:64-i128:128-n32:64-S128"
771 : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128");
772 }
773
774 TargetInfo::BuiltinVaListKind
getBuiltinVaListKind() const775 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
776 return TargetInfo::CharPtrBuiltinVaList;
777 }
778
779 TargetInfo::CallingConvCheckResult
checkCallingConvention(CallingConv CC) const780 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
781 switch (CC) {
782 case CC_X86StdCall:
783 case CC_X86ThisCall:
784 case CC_X86FastCall:
785 case CC_X86VectorCall:
786 return CCCR_Ignore;
787 case CC_C:
788 case CC_OpenCLKernel:
789 case CC_PreserveMost:
790 case CC_PreserveAll:
791 case CC_Swift:
792 case CC_Win64:
793 return CCCR_OK;
794 default:
795 return CCCR_Warning;
796 }
797 }
798
MicrosoftARM64TargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)799 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
800 const TargetOptions &Opts)
801 : WindowsARM64TargetInfo(Triple, Opts) {
802 TheCXXABI.set(TargetCXXABI::Microsoft);
803 }
804
getTargetDefines(const LangOptions & Opts,MacroBuilder & Builder) const805 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
806 MacroBuilder &Builder) const {
807 WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
808 Builder.defineMacro("_M_ARM64", "1");
809 }
810
811 TargetInfo::CallingConvKind
getCallingConvKind(bool ClangABICompat4) const812 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
813 return CCK_MicrosoftWin64;
814 }
815
getMinGlobalAlign(uint64_t TypeSize) const816 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
817 unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
818
819 // MSVC does size based alignment for arm64 based on alignment section in
820 // below document, replicate that to keep alignment consistent with object
821 // files compiled by MSVC.
822 // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
823 if (TypeSize >= 512) { // TypeSize >= 64 bytes
824 Align = std::max(Align, 128u); // align type at least 16 bytes
825 } else if (TypeSize >= 64) { // TypeSize >= 8 bytes
826 Align = std::max(Align, 64u); // align type at least 8 butes
827 } else if (TypeSize >= 16) { // TypeSize >= 2 bytes
828 Align = std::max(Align, 32u); // align type at least 4 bytes
829 }
830 return Align;
831 }
832
MinGWARM64TargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)833 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
834 const TargetOptions &Opts)
835 : WindowsARM64TargetInfo(Triple, Opts) {
836 TheCXXABI.set(TargetCXXABI::GenericAArch64);
837 }
838
DarwinAArch64TargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)839 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
840 const TargetOptions &Opts)
841 : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
842 Int64Type = SignedLongLong;
843 if (getTriple().isArch32Bit())
844 IntMaxType = SignedLongLong;
845
846 WCharType = SignedInt;
847 UseSignedCharForObjCBool = false;
848
849 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
850 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
851
852 UseZeroLengthBitfieldAlignment = false;
853
854 if (getTriple().isArch32Bit()) {
855 UseBitFieldTypeAlignment = false;
856 ZeroLengthBitfieldBoundary = 32;
857 UseZeroLengthBitfieldAlignment = true;
858 TheCXXABI.set(TargetCXXABI::WatchOS);
859 } else
860 TheCXXABI.set(TargetCXXABI::AppleARM64);
861 }
862
getOSDefines(const LangOptions & Opts,const llvm::Triple & Triple,MacroBuilder & Builder) const863 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
864 const llvm::Triple &Triple,
865 MacroBuilder &Builder) const {
866 Builder.defineMacro("__AARCH64_SIMD__");
867 if (Triple.isArch32Bit())
868 Builder.defineMacro("__ARM64_ARCH_8_32__");
869 else
870 Builder.defineMacro("__ARM64_ARCH_8__");
871 Builder.defineMacro("__ARM_NEON__");
872 Builder.defineMacro("__LITTLE_ENDIAN__");
873 Builder.defineMacro("__REGISTER_PREFIX__", "");
874 Builder.defineMacro("__arm64", "1");
875 Builder.defineMacro("__arm64__", "1");
876
877 if (Triple.isArm64e())
878 Builder.defineMacro("__arm64e__", "1");
879
880 getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
881 }
882
883 TargetInfo::BuiltinVaListKind
getBuiltinVaListKind() const884 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
885 return TargetInfo::CharPtrBuiltinVaList;
886 }
887
888 // 64-bit RenderScript is aarch64
RenderScript64TargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)889 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
890 const TargetOptions &Opts)
891 : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
892 Triple.getOSName(),
893 Triple.getEnvironmentName()),
894 Opts) {
895 IsRenderScriptTarget = true;
896 }
897
getTargetDefines(const LangOptions & Opts,MacroBuilder & Builder) const898 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
899 MacroBuilder &Builder) const {
900 Builder.defineMacro("__RENDERSCRIPT__");
901 AArch64leTargetInfo::getTargetDefines(Opts, Builder);
902 }
903