xref: /freebsd/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/Support/AArch64TargetParser.h"
21 
22 using namespace clang;
23 using namespace clang::targets;
24 
25 const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
26 #define BUILTIN(ID, TYPE, ATTRS)                                               \
27    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
28 #include "clang/Basic/BuiltinsNEON.def"
29 
30 #define BUILTIN(ID, TYPE, ATTRS)                                               \
31    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
32 #include "clang/Basic/BuiltinsSVE.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
36 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
37   {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
38 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
39   {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
40 #include "clang/Basic/BuiltinsAArch64.def"
41 };
42 
43 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
44                                      const TargetOptions &Opts)
45     : TargetInfo(Triple), ABI("aapcs") {
46   if (getTriple().isOSOpenBSD()) {
47     Int64Type = SignedLongLong;
48     IntMaxType = SignedLongLong;
49   } else {
50     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
51       WCharType = UnsignedInt;
52 
53     Int64Type = SignedLong;
54     IntMaxType = SignedLong;
55   }
56 
57   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
58   HasLegalHalfType = true;
59   HasFloat16 = true;
60 
61   if (Triple.isArch64Bit())
62     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
63   else
64     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
65 
66   MaxVectorAlign = 128;
67   MaxAtomicInlineWidth = 128;
68   MaxAtomicPromoteWidth = 128;
69 
70   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
71   LongDoubleFormat = &llvm::APFloat::IEEEquad();
72 
73   BFloat16Width = BFloat16Align = 16;
74   BFloat16Format = &llvm::APFloat::BFloat();
75 
76   // Make __builtin_ms_va_list available.
77   HasBuiltinMSVaList = true;
78 
79   // Make the SVE types available.  Note that this deliberately doesn't
80   // depend on SveMode, since in principle it should be possible to turn
81   // SVE on and off within a translation unit.  It should also be possible
82   // to compile the global declaration:
83   //
84   // __SVInt8_t *ptr;
85   //
86   // even without SVE.
87   HasAArch64SVETypes = true;
88 
89   // {} in inline assembly are neon specifiers, not assembly variant
90   // specifiers.
91   NoAsmVariants = true;
92 
93   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
94   // contributes to the alignment of the containing aggregate in the same way
95   // a plain (non bit-field) member of that type would, without exception for
96   // zero-sized or anonymous bit-fields."
97   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
98   UseZeroLengthBitfieldAlignment = true;
99 
100   // AArch64 targets default to using the ARM C++ ABI.
101   TheCXXABI.set(TargetCXXABI::GenericAArch64);
102 
103   if (Triple.getOS() == llvm::Triple::Linux)
104     this->MCountName = "\01_mcount";
105   else if (Triple.getOS() == llvm::Triple::UnknownOS)
106     this->MCountName =
107         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
108 }
109 
110 StringRef AArch64TargetInfo::getABI() const { return ABI; }
111 
112 bool AArch64TargetInfo::setABI(const std::string &Name) {
113   if (Name != "aapcs" && Name != "darwinpcs")
114     return false;
115 
116   ABI = Name;
117   return true;
118 }
119 
120 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
121                                                  BranchProtectionInfo &BPI,
122                                                  StringRef &Err) const {
123   llvm::AArch64::ParsedBranchProtection PBP;
124   if (!llvm::AArch64::parseBranchProtection(Spec, PBP, Err))
125     return false;
126 
127   BPI.SignReturnAddr =
128       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
129           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
130           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
131           .Default(LangOptions::SignReturnAddressScopeKind::None);
132 
133   if (PBP.Key == "a_key")
134     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
135   else
136     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
137 
138   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
139   return true;
140 }
141 
142 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
143   return Name == "generic" ||
144          llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
145 }
146 
147 bool AArch64TargetInfo::setCPU(const std::string &Name) {
148   return isValidCPUName(Name);
149 }
150 
151 void AArch64TargetInfo::fillValidCPUList(
152     SmallVectorImpl<StringRef> &Values) const {
153   llvm::AArch64::fillValidCPUArchList(Values);
154 }
155 
156 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
157                                                 MacroBuilder &Builder) const {
158   // FIXME: Armv8.1 makes __ARM_FEATURE_CRC32 mandatory. Handle it here.
159   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
160 }
161 
162 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
163                                                 MacroBuilder &Builder) const {
164   // Also include the ARMv8.1 defines
165   getTargetDefinesARMV81A(Opts, Builder);
166 }
167 
168 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
169                                                 MacroBuilder &Builder) const {
170   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
171   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
172   // Also include the Armv8.2 defines
173   getTargetDefinesARMV82A(Opts, Builder);
174 }
175 
176 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
177                                                 MacroBuilder &Builder) const {
178   // Also include the Armv8.3 defines
179   // FIXME: Armv8.4 makes __ARM_FEATURE_ATOMICS, defined in GCC, mandatory.
180   // Add and handle it here.
181   getTargetDefinesARMV83A(Opts, Builder);
182 }
183 
184 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
185                                                 MacroBuilder &Builder) const {
186   // Also include the Armv8.4 defines
187   getTargetDefinesARMV84A(Opts, Builder);
188 }
189 
190 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
191                                                 MacroBuilder &Builder) const {
192   // Also include the Armv8.5 defines
193   // FIXME: Armv8.6 makes the following extensions mandatory:
194   // - __ARM_FEATURE_BF16
195   // - __ARM_FEATURE_MATMUL_INT8
196   // Handle them here.
197   getTargetDefinesARMV85A(Opts, Builder);
198 }
199 
200 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
201                                          MacroBuilder &Builder) const {
202   // Target identification.
203   Builder.defineMacro("__aarch64__");
204   // For bare-metal.
205   if (getTriple().getOS() == llvm::Triple::UnknownOS &&
206       getTriple().isOSBinFormatELF())
207     Builder.defineMacro("__ELF__");
208 
209   // Target properties.
210   if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
211     Builder.defineMacro("_LP64");
212     Builder.defineMacro("__LP64__");
213   }
214 
215   std::string CodeModel = getTargetOpts().CodeModel;
216   if (CodeModel == "default")
217     CodeModel = "small";
218   for (char &c : CodeModel)
219     c = toupper(c);
220   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
221 
222   // ACLE predefines. Many can only have one possible value on v8 AArch64.
223   Builder.defineMacro("__ARM_ACLE", "200");
224   Builder.defineMacro("__ARM_ARCH", "8");
225   Builder.defineMacro("__ARM_ARCH_PROFILE", "'A'");
226 
227   Builder.defineMacro("__ARM_64BIT_STATE", "1");
228   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
229   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
230 
231   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
232   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
233   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
234   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
235   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
236   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
237   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
238 
239   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
240 
241   // 0xe implies support for half, single and double precision operations.
242   Builder.defineMacro("__ARM_FP", "0xE");
243 
244   // PCS specifies this for SysV variants, which is all we support. Other ABIs
245   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
246   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
247   Builder.defineMacro("__ARM_FP16_ARGS", "1");
248 
249   if (Opts.UnsafeFPMath)
250     Builder.defineMacro("__ARM_FP_FAST", "1");
251 
252   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
253                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
254 
255   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
256 
257   if (FPU & NeonMode) {
258     Builder.defineMacro("__ARM_NEON", "1");
259     // 64-bit NEON supports half, single and double precision operations.
260     Builder.defineMacro("__ARM_NEON_FP", "0xE");
261   }
262 
263   if (FPU & SveMode)
264     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
265 
266   if (HasSVE2)
267     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
268 
269   if (HasSVE2 && HasSVE2AES)
270     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
271 
272   if (HasSVE2 && HasSVE2BitPerm)
273     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
274 
275   if (HasSVE2 && HasSVE2SHA3)
276     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
277 
278   if (HasSVE2 && HasSVE2SM4)
279     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
280 
281   if (HasCRC)
282     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
283 
284   if (HasCrypto)
285     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
286 
287   if (HasUnaligned)
288     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
289 
290   if ((FPU & NeonMode) && HasFullFP16)
291     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
292   if (HasFullFP16)
293    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
294 
295   if (HasDotProd)
296     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
297 
298   if (HasMTE)
299     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
300 
301   if (HasTME)
302     Builder.defineMacro("__ARM_FEATURE_TME", "1");
303 
304   if (HasMatMul)
305     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
306 
307   if (HasBFloat16) {
308     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
309     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
310     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
311     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
312   }
313 
314   if ((FPU & SveMode) && HasBFloat16) {
315     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
316   }
317 
318   if ((FPU & SveMode) && HasMatmulFP64)
319     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
320 
321   if ((FPU & SveMode) && HasMatmulFP32)
322     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
323 
324   if ((FPU & SveMode) && HasMatMul)
325     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
326 
327   if ((FPU & NeonMode) && HasFP16FML)
328     Builder.defineMacro("__ARM_FEATURE_FP16FML", "1");
329 
330   if (Opts.hasSignReturnAddress()) {
331     // Bitmask:
332     // 0: Protection using the A key
333     // 1: Protection using the B key
334     // 2: Protection including leaf functions
335     unsigned Value = 0;
336 
337     if (Opts.isSignReturnAddressWithAKey())
338       Value |= (1 << 0);
339     else
340       Value |= (1 << 1);
341 
342     if (Opts.isSignReturnAddressScopeAll())
343       Value |= (1 << 2);
344 
345     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
346   }
347 
348   if (Opts.BranchTargetEnforcement)
349     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
350 
351   switch (ArchKind) {
352   default:
353     break;
354   case llvm::AArch64::ArchKind::ARMV8_1A:
355     getTargetDefinesARMV81A(Opts, Builder);
356     break;
357   case llvm::AArch64::ArchKind::ARMV8_2A:
358     getTargetDefinesARMV82A(Opts, Builder);
359     break;
360   case llvm::AArch64::ArchKind::ARMV8_3A:
361     getTargetDefinesARMV83A(Opts, Builder);
362     break;
363   case llvm::AArch64::ArchKind::ARMV8_4A:
364     getTargetDefinesARMV84A(Opts, Builder);
365     break;
366   case llvm::AArch64::ArchKind::ARMV8_5A:
367     getTargetDefinesARMV85A(Opts, Builder);
368     break;
369   case llvm::AArch64::ArchKind::ARMV8_6A:
370     getTargetDefinesARMV86A(Opts, Builder);
371     break;
372   }
373 
374   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
375   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
376   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
377   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
378   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
379 }
380 
381 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
382   return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
383                                              Builtin::FirstTSBuiltin);
384 }
385 
386 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
387   return Feature == "aarch64" || Feature == "arm64" || Feature == "arm" ||
388          (Feature == "neon" && (FPU & NeonMode)) ||
389          ((Feature == "sve" || Feature == "sve2" || Feature == "sve2-bitperm" ||
390            Feature == "sve2-aes" || Feature == "sve2-sha3" ||
391            Feature == "sve2-sm4" || Feature == "f64mm" || Feature == "f32mm" ||
392            Feature == "i8mm" || Feature == "bf16") &&
393           (FPU & SveMode));
394 }
395 
396 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
397                                              DiagnosticsEngine &Diags) {
398   FPU = FPUMode;
399   HasCRC = false;
400   HasCrypto = false;
401   HasUnaligned = true;
402   HasFullFP16 = false;
403   HasDotProd = false;
404   HasFP16FML = false;
405   HasMTE = false;
406   HasTME = false;
407   HasMatMul = false;
408   HasBFloat16 = false;
409   HasSVE2 = false;
410   HasSVE2AES = false;
411   HasSVE2SHA3 = false;
412   HasSVE2SM4 = false;
413   HasSVE2BitPerm = false;
414   HasMatmulFP64 = false;
415   HasMatmulFP32 = false;
416 
417   ArchKind = llvm::AArch64::ArchKind::ARMV8A;
418 
419   for (const auto &Feature : Features) {
420     if (Feature == "+neon")
421       FPU |= NeonMode;
422     if (Feature == "+sve") {
423       FPU |= SveMode;
424       HasFullFP16 = 1;
425     }
426     if (Feature == "+sve2") {
427       FPU |= SveMode;
428       HasFullFP16 = 1;
429       HasSVE2 = 1;
430     }
431     if (Feature == "+sve2-aes") {
432       FPU |= SveMode;
433       HasFullFP16 = 1;
434       HasSVE2 = 1;
435       HasSVE2AES = 1;
436     }
437     if (Feature == "+sve2-sha3") {
438       FPU |= SveMode;
439       HasFullFP16 = 1;
440       HasSVE2 = 1;
441       HasSVE2SHA3 = 1;
442     }
443     if (Feature == "+sve2-sm4") {
444       FPU |= SveMode;
445       HasFullFP16 = 1;
446       HasSVE2 = 1;
447       HasSVE2SM4 = 1;
448     }
449     if (Feature == "+sve2-bitperm") {
450       FPU |= SveMode;
451       HasFullFP16 = 1;
452       HasSVE2 = 1;
453       HasSVE2BitPerm = 1;
454     }
455     if (Feature == "+f32mm") {
456       FPU |= SveMode;
457       HasMatmulFP32 = true;
458     }
459     if (Feature == "+f64mm") {
460       FPU |= SveMode;
461       HasMatmulFP64 = true;
462     }
463     if (Feature == "+crc")
464       HasCRC = true;
465     if (Feature == "+crypto")
466       HasCrypto = true;
467     if (Feature == "+strict-align")
468       HasUnaligned = false;
469     if (Feature == "+v8.1a")
470       ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
471     if (Feature == "+v8.2a")
472       ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
473     if (Feature == "+v8.3a")
474       ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
475     if (Feature == "+v8.4a")
476       ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
477     if (Feature == "+v8.5a")
478       ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
479     if (Feature == "+v8.6a")
480       ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
481     if (Feature == "+fullfp16")
482       HasFullFP16 = true;
483     if (Feature == "+dotprod")
484       HasDotProd = true;
485     if (Feature == "+fp16fml")
486       HasFP16FML = true;
487     if (Feature == "+mte")
488       HasMTE = true;
489     if (Feature == "+tme")
490       HasTME = true;
491     if (Feature == "+i8mm")
492       HasMatMul = true;
493     if (Feature == "+bf16")
494       HasBFloat16 = true;
495   }
496 
497   setDataLayout();
498 
499   return true;
500 }
501 
502 TargetInfo::CallingConvCheckResult
503 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
504   switch (CC) {
505   case CC_C:
506   case CC_Swift:
507   case CC_PreserveMost:
508   case CC_PreserveAll:
509   case CC_OpenCLKernel:
510   case CC_AArch64VectorCall:
511   case CC_Win64:
512     return CCCR_OK;
513   default:
514     return CCCR_Warning;
515   }
516 }
517 
518 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
519 
520 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
521   return TargetInfo::AArch64ABIBuiltinVaList;
522 }
523 
524 const char *const AArch64TargetInfo::GCCRegNames[] = {
525     // 32-bit Integer registers
526     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
527     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
528     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
529 
530     // 64-bit Integer registers
531     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
532     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
533     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
534 
535     // 32-bit floating point regsisters
536     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
537     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
538     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
539 
540     // 64-bit floating point regsisters
541     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
542     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
543     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
544 
545     // Neon vector registers
546     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
547     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
548     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
549 
550     // SVE vector registers
551     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
552     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
553     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
554 
555     // SVE predicate registers
556     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
557     "p11", "p12", "p13", "p14", "p15"
558 };
559 
560 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
561   return llvm::makeArrayRef(GCCRegNames);
562 }
563 
564 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
565     {{"w31"}, "wsp"},
566     {{"x31"}, "sp"},
567     // GCC rN registers are aliases of xN registers.
568     {{"r0"}, "x0"},
569     {{"r1"}, "x1"},
570     {{"r2"}, "x2"},
571     {{"r3"}, "x3"},
572     {{"r4"}, "x4"},
573     {{"r5"}, "x5"},
574     {{"r6"}, "x6"},
575     {{"r7"}, "x7"},
576     {{"r8"}, "x8"},
577     {{"r9"}, "x9"},
578     {{"r10"}, "x10"},
579     {{"r11"}, "x11"},
580     {{"r12"}, "x12"},
581     {{"r13"}, "x13"},
582     {{"r14"}, "x14"},
583     {{"r15"}, "x15"},
584     {{"r16"}, "x16"},
585     {{"r17"}, "x17"},
586     {{"r18"}, "x18"},
587     {{"r19"}, "x19"},
588     {{"r20"}, "x20"},
589     {{"r21"}, "x21"},
590     {{"r22"}, "x22"},
591     {{"r23"}, "x23"},
592     {{"r24"}, "x24"},
593     {{"r25"}, "x25"},
594     {{"r26"}, "x26"},
595     {{"r27"}, "x27"},
596     {{"r28"}, "x28"},
597     {{"r29", "x29"}, "fp"},
598     {{"r30", "x30"}, "lr"},
599     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
600     // don't want to substitute one of these for a different-sized one.
601 };
602 
603 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
604   return llvm::makeArrayRef(GCCRegAliases);
605 }
606 
607 bool AArch64TargetInfo::validateAsmConstraint(
608     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
609   switch (*Name) {
610   default:
611     return false;
612   case 'w': // Floating point and SIMD registers (V0-V31)
613     Info.setAllowsRegister();
614     return true;
615   case 'I': // Constant that can be used with an ADD instruction
616   case 'J': // Constant that can be used with a SUB instruction
617   case 'K': // Constant that can be used with a 32-bit logical instruction
618   case 'L': // Constant that can be used with a 64-bit logical instruction
619   case 'M': // Constant that can be used as a 32-bit MOV immediate
620   case 'N': // Constant that can be used as a 64-bit MOV immediate
621   case 'Y': // Floating point constant zero
622   case 'Z': // Integer constant zero
623     return true;
624   case 'Q': // A memory reference with base register and no offset
625     Info.setAllowsMemory();
626     return true;
627   case 'S': // A symbolic address
628     Info.setAllowsRegister();
629     return true;
630   case 'U':
631     if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
632       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
633       Info.setAllowsRegister();
634       Name += 2;
635       return true;
636     }
637     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
638     // Utf: A memory address suitable for ldp/stp in TF mode.
639     // Usa: An absolute symbolic address.
640     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
641 
642     // Better to return an error saying that it's an unrecognised constraint
643     // even if this is a valid constraint in gcc.
644     return false;
645   case 'z': // Zero register, wzr or xzr
646     Info.setAllowsRegister();
647     return true;
648   case 'x': // Floating point and SIMD registers (V0-V15)
649     Info.setAllowsRegister();
650     return true;
651   case 'y': // SVE registers (V0-V7)
652     Info.setAllowsRegister();
653     return true;
654   }
655   return false;
656 }
657 
658 bool AArch64TargetInfo::validateConstraintModifier(
659     StringRef Constraint, char Modifier, unsigned Size,
660     std::string &SuggestedModifier) const {
661   // Strip off constraint modifiers.
662   while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
663     Constraint = Constraint.substr(1);
664 
665   switch (Constraint[0]) {
666   default:
667     return true;
668   case 'z':
669   case 'r': {
670     switch (Modifier) {
671     case 'x':
672     case 'w':
673       // For now assume that the person knows what they're
674       // doing with the modifier.
675       return true;
676     default:
677       // By default an 'r' constraint will be in the 'x'
678       // registers.
679       if (Size == 64)
680         return true;
681 
682       SuggestedModifier = "w";
683       return false;
684     }
685   }
686   }
687 }
688 
689 const char *AArch64TargetInfo::getClobbers() const { return ""; }
690 
691 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
692   if (RegNo == 0)
693     return 0;
694   if (RegNo == 1)
695     return 1;
696   return -1;
697 }
698 
699 bool AArch64TargetInfo::hasInt128Type() const { return true; }
700 
701 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
702                                          const TargetOptions &Opts)
703     : AArch64TargetInfo(Triple, Opts) {}
704 
705 void AArch64leTargetInfo::setDataLayout() {
706   if (getTriple().isOSBinFormatMachO()) {
707     if(getTriple().isArch32Bit())
708       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128");
709     else
710       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128");
711   } else
712     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
713 }
714 
715 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
716                                            MacroBuilder &Builder) const {
717   Builder.defineMacro("__AARCH64EL__");
718   AArch64TargetInfo::getTargetDefines(Opts, Builder);
719 }
720 
721 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
722                                          const TargetOptions &Opts)
723     : AArch64TargetInfo(Triple, Opts) {}
724 
725 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
726                                            MacroBuilder &Builder) const {
727   Builder.defineMacro("__AARCH64EB__");
728   Builder.defineMacro("__AARCH_BIG_ENDIAN");
729   Builder.defineMacro("__ARM_BIG_ENDIAN");
730   AArch64TargetInfo::getTargetDefines(Opts, Builder);
731 }
732 
733 void AArch64beTargetInfo::setDataLayout() {
734   assert(!getTriple().isOSBinFormatMachO());
735   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
736 }
737 
738 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
739                                                const TargetOptions &Opts)
740     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
741 
742   // This is an LLP64 platform.
743   // int:4, long:4, long long:8, long double:8.
744   IntWidth = IntAlign = 32;
745   LongWidth = LongAlign = 32;
746   DoubleAlign = LongLongAlign = 64;
747   LongDoubleWidth = LongDoubleAlign = 64;
748   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
749   IntMaxType = SignedLongLong;
750   Int64Type = SignedLongLong;
751   SizeType = UnsignedLongLong;
752   PtrDiffType = SignedLongLong;
753   IntPtrType = SignedLongLong;
754 }
755 
756 void WindowsARM64TargetInfo::setDataLayout() {
757   resetDataLayout("e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128");
758 }
759 
760 TargetInfo::BuiltinVaListKind
761 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
762   return TargetInfo::CharPtrBuiltinVaList;
763 }
764 
765 TargetInfo::CallingConvCheckResult
766 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
767   switch (CC) {
768   case CC_X86StdCall:
769   case CC_X86ThisCall:
770   case CC_X86FastCall:
771   case CC_X86VectorCall:
772     return CCCR_Ignore;
773   case CC_C:
774   case CC_OpenCLKernel:
775   case CC_PreserveMost:
776   case CC_PreserveAll:
777   case CC_Swift:
778   case CC_Win64:
779     return CCCR_OK;
780   default:
781     return CCCR_Warning;
782   }
783 }
784 
785 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
786                                                    const TargetOptions &Opts)
787     : WindowsARM64TargetInfo(Triple, Opts) {
788   TheCXXABI.set(TargetCXXABI::Microsoft);
789 }
790 
791 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
792                                                 MacroBuilder &Builder) const {
793   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
794   Builder.defineMacro("_M_ARM64", "1");
795 }
796 
797 TargetInfo::CallingConvKind
798 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
799   return CCK_MicrosoftWin64;
800 }
801 
802 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
803   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
804 
805   // MSVC does size based alignment for arm64 based on alignment section in
806   // below document, replicate that to keep alignment consistent with object
807   // files compiled by MSVC.
808   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
809   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
810     Align = std::max(Align, 128u);    // align type at least 16 bytes
811   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
812     Align = std::max(Align, 64u);     // align type at least 8 butes
813   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
814     Align = std::max(Align, 32u);     // align type at least 4 bytes
815   }
816   return Align;
817 }
818 
819 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
820                                            const TargetOptions &Opts)
821     : WindowsARM64TargetInfo(Triple, Opts) {
822   TheCXXABI.set(TargetCXXABI::GenericAArch64);
823 }
824 
825 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
826                                                  const TargetOptions &Opts)
827     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
828   Int64Type = SignedLongLong;
829   if (getTriple().isArch32Bit())
830     IntMaxType = SignedLongLong;
831 
832   WCharType = SignedInt;
833   UseSignedCharForObjCBool = false;
834 
835   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
836   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
837 
838   UseZeroLengthBitfieldAlignment = false;
839 
840   if (getTriple().isArch32Bit()) {
841     UseBitFieldTypeAlignment = false;
842     ZeroLengthBitfieldBoundary = 32;
843     UseZeroLengthBitfieldAlignment = true;
844     TheCXXABI.set(TargetCXXABI::WatchOS);
845   } else
846     TheCXXABI.set(TargetCXXABI::iOS64);
847 }
848 
849 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
850                                            const llvm::Triple &Triple,
851                                            MacroBuilder &Builder) const {
852   Builder.defineMacro("__AARCH64_SIMD__");
853   if (Triple.isArch32Bit())
854     Builder.defineMacro("__ARM64_ARCH_8_32__");
855   else
856     Builder.defineMacro("__ARM64_ARCH_8__");
857   Builder.defineMacro("__ARM_NEON__");
858   Builder.defineMacro("__LITTLE_ENDIAN__");
859   Builder.defineMacro("__REGISTER_PREFIX__", "");
860   Builder.defineMacro("__arm64", "1");
861   Builder.defineMacro("__arm64__", "1");
862 
863   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
864 }
865 
866 TargetInfo::BuiltinVaListKind
867 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
868   return TargetInfo::CharPtrBuiltinVaList;
869 }
870 
871 // 64-bit RenderScript is aarch64
872 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
873                                                    const TargetOptions &Opts)
874     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
875                                        Triple.getOSName(),
876                                        Triple.getEnvironmentName()),
877                           Opts) {
878   IsRenderScriptTarget = true;
879 }
880 
881 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
882                                                 MacroBuilder &Builder) const {
883   Builder.defineMacro("__RENDERSCRIPT__");
884   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
885 }
886