xref: /freebsd/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp (revision 19261079b74319502c6ffa1249920079f0f69a72)
1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/Support/AArch64TargetParser.h"
21 
22 using namespace clang;
23 using namespace clang::targets;
24 
25 const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
26 #define BUILTIN(ID, TYPE, ATTRS)                                               \
27    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
28 #include "clang/Basic/BuiltinsNEON.def"
29 
30 #define BUILTIN(ID, TYPE, ATTRS)                                               \
31    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
32 #include "clang/Basic/BuiltinsSVE.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
36 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
37   {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
38 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
39   {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
40 #include "clang/Basic/BuiltinsAArch64.def"
41 };
42 
43 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
44                                      const TargetOptions &Opts)
45     : TargetInfo(Triple), ABI("aapcs") {
46   if (getTriple().isOSOpenBSD()) {
47     Int64Type = SignedLongLong;
48     IntMaxType = SignedLongLong;
49   } else {
50     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
51       WCharType = UnsignedInt;
52 
53     Int64Type = SignedLong;
54     IntMaxType = SignedLong;
55   }
56 
57   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
58   HasLegalHalfType = true;
59   HasFloat16 = true;
60 
61   if (Triple.isArch64Bit())
62     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
63   else
64     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
65 
66   MaxVectorAlign = 128;
67   MaxAtomicInlineWidth = 128;
68   MaxAtomicPromoteWidth = 128;
69 
70   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
71   LongDoubleFormat = &llvm::APFloat::IEEEquad();
72 
73   BFloat16Width = BFloat16Align = 16;
74   BFloat16Format = &llvm::APFloat::BFloat();
75 
76   // Make __builtin_ms_va_list available.
77   HasBuiltinMSVaList = true;
78 
79   // Make the SVE types available.  Note that this deliberately doesn't
80   // depend on SveMode, since in principle it should be possible to turn
81   // SVE on and off within a translation unit.  It should also be possible
82   // to compile the global declaration:
83   //
84   // __SVInt8_t *ptr;
85   //
86   // even without SVE.
87   HasAArch64SVETypes = true;
88 
89   // {} in inline assembly are neon specifiers, not assembly variant
90   // specifiers.
91   NoAsmVariants = true;
92 
93   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
94   // contributes to the alignment of the containing aggregate in the same way
95   // a plain (non bit-field) member of that type would, without exception for
96   // zero-sized or anonymous bit-fields."
97   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
98   UseZeroLengthBitfieldAlignment = true;
99 
100   // AArch64 targets default to using the ARM C++ ABI.
101   TheCXXABI.set(TargetCXXABI::GenericAArch64);
102 
103   if (Triple.getOS() == llvm::Triple::Linux)
104     this->MCountName = "\01_mcount";
105   else if (Triple.getOS() == llvm::Triple::UnknownOS)
106     this->MCountName =
107         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
108 }
109 
110 StringRef AArch64TargetInfo::getABI() const { return ABI; }
111 
112 bool AArch64TargetInfo::setABI(const std::string &Name) {
113   if (Name != "aapcs" && Name != "darwinpcs")
114     return false;
115 
116   ABI = Name;
117   return true;
118 }
119 
120 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
121                                                  BranchProtectionInfo &BPI,
122                                                  StringRef &Err) const {
123   llvm::AArch64::ParsedBranchProtection PBP;
124   if (!llvm::AArch64::parseBranchProtection(Spec, PBP, Err))
125     return false;
126 
127   BPI.SignReturnAddr =
128       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
129           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
130           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
131           .Default(LangOptions::SignReturnAddressScopeKind::None);
132 
133   if (PBP.Key == "a_key")
134     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
135   else
136     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
137 
138   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
139   return true;
140 }
141 
142 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
143   return Name == "generic" ||
144          llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
145 }
146 
147 bool AArch64TargetInfo::setCPU(const std::string &Name) {
148   return isValidCPUName(Name);
149 }
150 
151 void AArch64TargetInfo::fillValidCPUList(
152     SmallVectorImpl<StringRef> &Values) const {
153   llvm::AArch64::fillValidCPUArchList(Values);
154 }
155 
156 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
157                                                 MacroBuilder &Builder) const {
158   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
159   Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
160   Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
161 }
162 
163 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
164                                                 MacroBuilder &Builder) const {
165   // Also include the ARMv8.1 defines
166   getTargetDefinesARMV81A(Opts, Builder);
167 }
168 
169 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
170                                                 MacroBuilder &Builder) const {
171   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
172   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
173   // Also include the Armv8.2 defines
174   getTargetDefinesARMV82A(Opts, Builder);
175 }
176 
177 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
178                                                 MacroBuilder &Builder) const {
179   // Also include the Armv8.3 defines
180   getTargetDefinesARMV83A(Opts, Builder);
181 }
182 
183 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
184                                                 MacroBuilder &Builder) const {
185   // Also include the Armv8.4 defines
186   getTargetDefinesARMV84A(Opts, Builder);
187 }
188 
189 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
190                                                 MacroBuilder &Builder) const {
191   // Also include the Armv8.5 defines
192   // FIXME: Armv8.6 makes the following extensions mandatory:
193   // - __ARM_FEATURE_BF16
194   // - __ARM_FEATURE_MATMUL_INT8
195   // Handle them here.
196   getTargetDefinesARMV85A(Opts, Builder);
197 }
198 
199 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
200                                                 MacroBuilder &Builder) const {
201   // Also include the Armv8.6 defines
202   getTargetDefinesARMV86A(Opts, Builder);
203 }
204 
205 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
206                                          MacroBuilder &Builder) const {
207   // Target identification.
208   Builder.defineMacro("__aarch64__");
209   // For bare-metal.
210   if (getTriple().getOS() == llvm::Triple::UnknownOS &&
211       getTriple().isOSBinFormatELF())
212     Builder.defineMacro("__ELF__");
213 
214   // Target properties.
215   if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
216     Builder.defineMacro("_LP64");
217     Builder.defineMacro("__LP64__");
218   }
219 
220   std::string CodeModel = getTargetOpts().CodeModel;
221   if (CodeModel == "default")
222     CodeModel = "small";
223   for (char &c : CodeModel)
224     c = toupper(c);
225   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
226 
227   // ACLE predefines. Many can only have one possible value on v8 AArch64.
228   Builder.defineMacro("__ARM_ACLE", "200");
229   Builder.defineMacro("__ARM_ARCH", "8");
230   Builder.defineMacro("__ARM_ARCH_PROFILE", "'A'");
231 
232   Builder.defineMacro("__ARM_64BIT_STATE", "1");
233   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
234   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
235 
236   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
237   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
238   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
239   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
240   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
241   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
242   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
243 
244   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
245 
246   // 0xe implies support for half, single and double precision operations.
247   Builder.defineMacro("__ARM_FP", "0xE");
248 
249   // PCS specifies this for SysV variants, which is all we support. Other ABIs
250   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
251   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
252   Builder.defineMacro("__ARM_FP16_ARGS", "1");
253 
254   if (Opts.UnsafeFPMath)
255     Builder.defineMacro("__ARM_FP_FAST", "1");
256 
257   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
258                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
259 
260   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
261 
262   if (FPU & NeonMode) {
263     Builder.defineMacro("__ARM_NEON", "1");
264     // 64-bit NEON supports half, single and double precision operations.
265     Builder.defineMacro("__ARM_NEON_FP", "0xE");
266   }
267 
268   if (FPU & SveMode)
269     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
270 
271   if (HasSVE2)
272     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
273 
274   if (HasSVE2 && HasSVE2AES)
275     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
276 
277   if (HasSVE2 && HasSVE2BitPerm)
278     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
279 
280   if (HasSVE2 && HasSVE2SHA3)
281     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
282 
283   if (HasSVE2 && HasSVE2SM4)
284     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
285 
286   if (HasCRC)
287     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
288 
289   if (HasCrypto)
290     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
291 
292   if (HasUnaligned)
293     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
294 
295   if ((FPU & NeonMode) && HasFullFP16)
296     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
297   if (HasFullFP16)
298    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
299 
300   if (HasDotProd)
301     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
302 
303   if (HasMTE)
304     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
305 
306   if (HasTME)
307     Builder.defineMacro("__ARM_FEATURE_TME", "1");
308 
309   if (HasMatMul)
310     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
311 
312   if (HasLSE)
313     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
314 
315   if (HasBFloat16) {
316     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
317     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
318     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
319     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
320   }
321 
322   if ((FPU & SveMode) && HasBFloat16) {
323     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
324   }
325 
326   if ((FPU & SveMode) && HasMatmulFP64)
327     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
328 
329   if ((FPU & SveMode) && HasMatmulFP32)
330     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
331 
332   if ((FPU & SveMode) && HasMatMul)
333     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
334 
335   if ((FPU & NeonMode) && HasFP16FML)
336     Builder.defineMacro("__ARM_FEATURE_FP16FML", "1");
337 
338   if (Opts.hasSignReturnAddress()) {
339     // Bitmask:
340     // 0: Protection using the A key
341     // 1: Protection using the B key
342     // 2: Protection including leaf functions
343     unsigned Value = 0;
344 
345     if (Opts.isSignReturnAddressWithAKey())
346       Value |= (1 << 0);
347     else
348       Value |= (1 << 1);
349 
350     if (Opts.isSignReturnAddressScopeAll())
351       Value |= (1 << 2);
352 
353     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
354   }
355 
356   if (Opts.BranchTargetEnforcement)
357     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
358 
359   if (HasLS64)
360     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
361 
362   switch (ArchKind) {
363   default:
364     break;
365   case llvm::AArch64::ArchKind::ARMV8_1A:
366     getTargetDefinesARMV81A(Opts, Builder);
367     break;
368   case llvm::AArch64::ArchKind::ARMV8_2A:
369     getTargetDefinesARMV82A(Opts, Builder);
370     break;
371   case llvm::AArch64::ArchKind::ARMV8_3A:
372     getTargetDefinesARMV83A(Opts, Builder);
373     break;
374   case llvm::AArch64::ArchKind::ARMV8_4A:
375     getTargetDefinesARMV84A(Opts, Builder);
376     break;
377   case llvm::AArch64::ArchKind::ARMV8_5A:
378     getTargetDefinesARMV85A(Opts, Builder);
379     break;
380   case llvm::AArch64::ArchKind::ARMV8_6A:
381     getTargetDefinesARMV86A(Opts, Builder);
382     break;
383   case llvm::AArch64::ArchKind::ARMV8_7A:
384     getTargetDefinesARMV87A(Opts, Builder);
385     break;
386   }
387 
388   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
389   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
390   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
391   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
392   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
393 
394   if (Opts.ArmSveVectorBits) {
395     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.ArmSveVectorBits));
396     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
397   }
398 }
399 
400 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
401   return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
402                                              Builtin::FirstTSBuiltin);
403 }
404 
405 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
406   return Feature == "aarch64" || Feature == "arm64" || Feature == "arm" ||
407          (Feature == "neon" && (FPU & NeonMode)) ||
408          ((Feature == "sve" || Feature == "sve2" || Feature == "sve2-bitperm" ||
409            Feature == "sve2-aes" || Feature == "sve2-sha3" ||
410            Feature == "sve2-sm4" || Feature == "f64mm" || Feature == "f32mm" ||
411            Feature == "i8mm" || Feature == "bf16") &&
412           (FPU & SveMode));
413 }
414 
415 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
416                                              DiagnosticsEngine &Diags) {
417   FPU = FPUMode;
418   HasCRC = false;
419   HasCrypto = false;
420   HasUnaligned = true;
421   HasFullFP16 = false;
422   HasDotProd = false;
423   HasFP16FML = false;
424   HasMTE = false;
425   HasTME = false;
426   HasLS64 = false;
427   HasMatMul = false;
428   HasBFloat16 = false;
429   HasSVE2 = false;
430   HasSVE2AES = false;
431   HasSVE2SHA3 = false;
432   HasSVE2SM4 = false;
433   HasSVE2BitPerm = false;
434   HasMatmulFP64 = false;
435   HasMatmulFP32 = false;
436   HasLSE = false;
437 
438   ArchKind = llvm::AArch64::ArchKind::ARMV8A;
439 
440   for (const auto &Feature : Features) {
441     if (Feature == "+neon")
442       FPU |= NeonMode;
443     if (Feature == "+sve") {
444       FPU |= SveMode;
445       HasFullFP16 = 1;
446     }
447     if (Feature == "+sve2") {
448       FPU |= SveMode;
449       HasFullFP16 = 1;
450       HasSVE2 = 1;
451     }
452     if (Feature == "+sve2-aes") {
453       FPU |= SveMode;
454       HasFullFP16 = 1;
455       HasSVE2 = 1;
456       HasSVE2AES = 1;
457     }
458     if (Feature == "+sve2-sha3") {
459       FPU |= SveMode;
460       HasFullFP16 = 1;
461       HasSVE2 = 1;
462       HasSVE2SHA3 = 1;
463     }
464     if (Feature == "+sve2-sm4") {
465       FPU |= SveMode;
466       HasFullFP16 = 1;
467       HasSVE2 = 1;
468       HasSVE2SM4 = 1;
469     }
470     if (Feature == "+sve2-bitperm") {
471       FPU |= SveMode;
472       HasFullFP16 = 1;
473       HasSVE2 = 1;
474       HasSVE2BitPerm = 1;
475     }
476     if (Feature == "+f32mm") {
477       FPU |= SveMode;
478       HasMatmulFP32 = true;
479     }
480     if (Feature == "+f64mm") {
481       FPU |= SveMode;
482       HasMatmulFP64 = true;
483     }
484     if (Feature == "+crc")
485       HasCRC = true;
486     if (Feature == "+crypto")
487       HasCrypto = true;
488     if (Feature == "+strict-align")
489       HasUnaligned = false;
490     if (Feature == "+v8.1a")
491       ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
492     if (Feature == "+v8.2a")
493       ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
494     if (Feature == "+v8.3a")
495       ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
496     if (Feature == "+v8.4a")
497       ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
498     if (Feature == "+v8.5a")
499       ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
500     if (Feature == "+v8.6a")
501       ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
502     if (Feature == "+v8.7a")
503       ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
504     if (Feature == "+v8r")
505       ArchKind = llvm::AArch64::ArchKind::ARMV8R;
506     if (Feature == "+fullfp16")
507       HasFullFP16 = true;
508     if (Feature == "+dotprod")
509       HasDotProd = true;
510     if (Feature == "+fp16fml")
511       HasFP16FML = true;
512     if (Feature == "+mte")
513       HasMTE = true;
514     if (Feature == "+tme")
515       HasTME = true;
516     if (Feature == "+pauth")
517       HasPAuth = true;
518     if (Feature == "+i8mm")
519       HasMatMul = true;
520     if (Feature == "+bf16")
521       HasBFloat16 = true;
522     if (Feature == "+lse")
523       HasLSE = true;
524     if (Feature == "+ls64")
525       HasLS64 = true;
526     if (Feature == "+flagm")
527       HasFlagM = true;
528   }
529 
530   setDataLayout();
531 
532   return true;
533 }
534 
535 TargetInfo::CallingConvCheckResult
536 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
537   switch (CC) {
538   case CC_C:
539   case CC_Swift:
540   case CC_PreserveMost:
541   case CC_PreserveAll:
542   case CC_OpenCLKernel:
543   case CC_AArch64VectorCall:
544   case CC_Win64:
545     return CCCR_OK;
546   default:
547     return CCCR_Warning;
548   }
549 }
550 
551 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
552 
553 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
554   return TargetInfo::AArch64ABIBuiltinVaList;
555 }
556 
557 const char *const AArch64TargetInfo::GCCRegNames[] = {
558     // 32-bit Integer registers
559     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
560     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
561     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
562 
563     // 64-bit Integer registers
564     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
565     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
566     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
567 
568     // 32-bit floating point regsisters
569     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
570     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
571     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
572 
573     // 64-bit floating point regsisters
574     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
575     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
576     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
577 
578     // Neon vector registers
579     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
580     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
581     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
582 
583     // SVE vector registers
584     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
585     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
586     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
587 
588     // SVE predicate registers
589     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
590     "p11", "p12", "p13", "p14", "p15"
591 };
592 
593 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
594   return llvm::makeArrayRef(GCCRegNames);
595 }
596 
597 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
598     {{"w31"}, "wsp"},
599     {{"x31"}, "sp"},
600     // GCC rN registers are aliases of xN registers.
601     {{"r0"}, "x0"},
602     {{"r1"}, "x1"},
603     {{"r2"}, "x2"},
604     {{"r3"}, "x3"},
605     {{"r4"}, "x4"},
606     {{"r5"}, "x5"},
607     {{"r6"}, "x6"},
608     {{"r7"}, "x7"},
609     {{"r8"}, "x8"},
610     {{"r9"}, "x9"},
611     {{"r10"}, "x10"},
612     {{"r11"}, "x11"},
613     {{"r12"}, "x12"},
614     {{"r13"}, "x13"},
615     {{"r14"}, "x14"},
616     {{"r15"}, "x15"},
617     {{"r16"}, "x16"},
618     {{"r17"}, "x17"},
619     {{"r18"}, "x18"},
620     {{"r19"}, "x19"},
621     {{"r20"}, "x20"},
622     {{"r21"}, "x21"},
623     {{"r22"}, "x22"},
624     {{"r23"}, "x23"},
625     {{"r24"}, "x24"},
626     {{"r25"}, "x25"},
627     {{"r26"}, "x26"},
628     {{"r27"}, "x27"},
629     {{"r28"}, "x28"},
630     {{"r29", "x29"}, "fp"},
631     {{"r30", "x30"}, "lr"},
632     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
633     // don't want to substitute one of these for a different-sized one.
634 };
635 
636 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
637   return llvm::makeArrayRef(GCCRegAliases);
638 }
639 
640 bool AArch64TargetInfo::validateAsmConstraint(
641     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
642   switch (*Name) {
643   default:
644     return false;
645   case 'w': // Floating point and SIMD registers (V0-V31)
646     Info.setAllowsRegister();
647     return true;
648   case 'I': // Constant that can be used with an ADD instruction
649   case 'J': // Constant that can be used with a SUB instruction
650   case 'K': // Constant that can be used with a 32-bit logical instruction
651   case 'L': // Constant that can be used with a 64-bit logical instruction
652   case 'M': // Constant that can be used as a 32-bit MOV immediate
653   case 'N': // Constant that can be used as a 64-bit MOV immediate
654   case 'Y': // Floating point constant zero
655   case 'Z': // Integer constant zero
656     return true;
657   case 'Q': // A memory reference with base register and no offset
658     Info.setAllowsMemory();
659     return true;
660   case 'S': // A symbolic address
661     Info.setAllowsRegister();
662     return true;
663   case 'U':
664     if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
665       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
666       Info.setAllowsRegister();
667       Name += 2;
668       return true;
669     }
670     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
671     // Utf: A memory address suitable for ldp/stp in TF mode.
672     // Usa: An absolute symbolic address.
673     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
674 
675     // Better to return an error saying that it's an unrecognised constraint
676     // even if this is a valid constraint in gcc.
677     return false;
678   case 'z': // Zero register, wzr or xzr
679     Info.setAllowsRegister();
680     return true;
681   case 'x': // Floating point and SIMD registers (V0-V15)
682     Info.setAllowsRegister();
683     return true;
684   case 'y': // SVE registers (V0-V7)
685     Info.setAllowsRegister();
686     return true;
687   }
688   return false;
689 }
690 
691 bool AArch64TargetInfo::validateConstraintModifier(
692     StringRef Constraint, char Modifier, unsigned Size,
693     std::string &SuggestedModifier) const {
694   // Strip off constraint modifiers.
695   while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
696     Constraint = Constraint.substr(1);
697 
698   switch (Constraint[0]) {
699   default:
700     return true;
701   case 'z':
702   case 'r': {
703     switch (Modifier) {
704     case 'x':
705     case 'w':
706       // For now assume that the person knows what they're
707       // doing with the modifier.
708       return true;
709     default:
710       // By default an 'r' constraint will be in the 'x'
711       // registers.
712       if (Size == 64)
713         return true;
714 
715       SuggestedModifier = "w";
716       return false;
717     }
718   }
719   }
720 }
721 
722 const char *AArch64TargetInfo::getClobbers() const { return ""; }
723 
724 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
725   if (RegNo == 0)
726     return 0;
727   if (RegNo == 1)
728     return 1;
729   return -1;
730 }
731 
732 bool AArch64TargetInfo::hasInt128Type() const { return true; }
733 
734 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
735                                          const TargetOptions &Opts)
736     : AArch64TargetInfo(Triple, Opts) {}
737 
738 void AArch64leTargetInfo::setDataLayout() {
739   if (getTriple().isOSBinFormatMachO()) {
740     if(getTriple().isArch32Bit())
741       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128");
742     else
743       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128");
744   } else
745     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
746 }
747 
748 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
749                                            MacroBuilder &Builder) const {
750   Builder.defineMacro("__AARCH64EL__");
751   AArch64TargetInfo::getTargetDefines(Opts, Builder);
752 }
753 
754 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
755                                          const TargetOptions &Opts)
756     : AArch64TargetInfo(Triple, Opts) {}
757 
758 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
759                                            MacroBuilder &Builder) const {
760   Builder.defineMacro("__AARCH64EB__");
761   Builder.defineMacro("__AARCH_BIG_ENDIAN");
762   Builder.defineMacro("__ARM_BIG_ENDIAN");
763   AArch64TargetInfo::getTargetDefines(Opts, Builder);
764 }
765 
766 void AArch64beTargetInfo::setDataLayout() {
767   assert(!getTriple().isOSBinFormatMachO());
768   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
769 }
770 
771 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
772                                                const TargetOptions &Opts)
773     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
774 
775   // This is an LLP64 platform.
776   // int:4, long:4, long long:8, long double:8.
777   IntWidth = IntAlign = 32;
778   LongWidth = LongAlign = 32;
779   DoubleAlign = LongLongAlign = 64;
780   LongDoubleWidth = LongDoubleAlign = 64;
781   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
782   IntMaxType = SignedLongLong;
783   Int64Type = SignedLongLong;
784   SizeType = UnsignedLongLong;
785   PtrDiffType = SignedLongLong;
786   IntPtrType = SignedLongLong;
787 }
788 
789 void WindowsARM64TargetInfo::setDataLayout() {
790   resetDataLayout(Triple.isOSBinFormatMachO()
791                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
792                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128");
793 }
794 
795 TargetInfo::BuiltinVaListKind
796 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
797   return TargetInfo::CharPtrBuiltinVaList;
798 }
799 
800 TargetInfo::CallingConvCheckResult
801 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
802   switch (CC) {
803   case CC_X86StdCall:
804   case CC_X86ThisCall:
805   case CC_X86FastCall:
806   case CC_X86VectorCall:
807     return CCCR_Ignore;
808   case CC_C:
809   case CC_OpenCLKernel:
810   case CC_PreserveMost:
811   case CC_PreserveAll:
812   case CC_Swift:
813   case CC_Win64:
814     return CCCR_OK;
815   default:
816     return CCCR_Warning;
817   }
818 }
819 
820 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
821                                                    const TargetOptions &Opts)
822     : WindowsARM64TargetInfo(Triple, Opts) {
823   TheCXXABI.set(TargetCXXABI::Microsoft);
824 }
825 
826 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
827                                                 MacroBuilder &Builder) const {
828   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
829   Builder.defineMacro("_M_ARM64", "1");
830 }
831 
832 TargetInfo::CallingConvKind
833 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
834   return CCK_MicrosoftWin64;
835 }
836 
837 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
838   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
839 
840   // MSVC does size based alignment for arm64 based on alignment section in
841   // below document, replicate that to keep alignment consistent with object
842   // files compiled by MSVC.
843   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
844   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
845     Align = std::max(Align, 128u);    // align type at least 16 bytes
846   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
847     Align = std::max(Align, 64u);     // align type at least 8 butes
848   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
849     Align = std::max(Align, 32u);     // align type at least 4 bytes
850   }
851   return Align;
852 }
853 
854 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
855                                            const TargetOptions &Opts)
856     : WindowsARM64TargetInfo(Triple, Opts) {
857   TheCXXABI.set(TargetCXXABI::GenericAArch64);
858 }
859 
860 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
861                                                  const TargetOptions &Opts)
862     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
863   Int64Type = SignedLongLong;
864   if (getTriple().isArch32Bit())
865     IntMaxType = SignedLongLong;
866 
867   WCharType = SignedInt;
868   UseSignedCharForObjCBool = false;
869 
870   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
871   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
872 
873   UseZeroLengthBitfieldAlignment = false;
874 
875   if (getTriple().isArch32Bit()) {
876     UseBitFieldTypeAlignment = false;
877     ZeroLengthBitfieldBoundary = 32;
878     UseZeroLengthBitfieldAlignment = true;
879     TheCXXABI.set(TargetCXXABI::WatchOS);
880   } else
881     TheCXXABI.set(TargetCXXABI::AppleARM64);
882 }
883 
884 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
885                                            const llvm::Triple &Triple,
886                                            MacroBuilder &Builder) const {
887   Builder.defineMacro("__AARCH64_SIMD__");
888   if (Triple.isArch32Bit())
889     Builder.defineMacro("__ARM64_ARCH_8_32__");
890   else
891     Builder.defineMacro("__ARM64_ARCH_8__");
892   Builder.defineMacro("__ARM_NEON__");
893   Builder.defineMacro("__LITTLE_ENDIAN__");
894   Builder.defineMacro("__REGISTER_PREFIX__", "");
895   Builder.defineMacro("__arm64", "1");
896   Builder.defineMacro("__arm64__", "1");
897 
898   if (Triple.isArm64e())
899     Builder.defineMacro("__arm64e__", "1");
900 
901   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
902 }
903 
904 TargetInfo::BuiltinVaListKind
905 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
906   return TargetInfo::CharPtrBuiltinVaList;
907 }
908 
909 // 64-bit RenderScript is aarch64
910 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
911                                                    const TargetOptions &Opts)
912     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
913                                        Triple.getOSName(),
914                                        Triple.getEnvironmentName()),
915                           Opts) {
916   IsRenderScriptTarget = true;
917 }
918 
919 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
920                                                 MacroBuilder &Builder) const {
921   Builder.defineMacro("__RENDERSCRIPT__");
922   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
923 }
924