xref: /freebsd/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp (revision 8ddb146abcdf061be9f2c0db7e391697dafad85c)
1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/Support/AArch64TargetParser.h"
21 
22 using namespace clang;
23 using namespace clang::targets;
24 
25 const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
26 #define BUILTIN(ID, TYPE, ATTRS)                                               \
27    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
28 #include "clang/Basic/BuiltinsNEON.def"
29 
30 #define BUILTIN(ID, TYPE, ATTRS)                                               \
31    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
32 #include "clang/Basic/BuiltinsSVE.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
36 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
37   {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
38 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
39   {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
40 #include "clang/Basic/BuiltinsAArch64.def"
41 };
42 
43 static StringRef getArchVersionString(llvm::AArch64::ArchKind Kind) {
44   switch (Kind) {
45   case llvm::AArch64::ArchKind::ARMV9A:
46   case llvm::AArch64::ArchKind::ARMV9_1A:
47   case llvm::AArch64::ArchKind::ARMV9_2A:
48   case llvm::AArch64::ArchKind::ARMV9_3A:
49     return "9";
50   default:
51     return "8";
52   }
53 }
54 
55 StringRef AArch64TargetInfo::getArchProfile() const {
56   switch (ArchKind) {
57   case llvm::AArch64::ArchKind::ARMV8R:
58     return "R";
59   default:
60     return "A";
61   }
62 }
63 
64 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
65                                      const TargetOptions &Opts)
66     : TargetInfo(Triple), ABI("aapcs") {
67   if (getTriple().isOSOpenBSD()) {
68     Int64Type = SignedLongLong;
69     IntMaxType = SignedLongLong;
70   } else {
71     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
72       WCharType = UnsignedInt;
73 
74     Int64Type = SignedLong;
75     IntMaxType = SignedLong;
76   }
77 
78   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
79   HasLegalHalfType = true;
80   HasFloat16 = true;
81 
82   if (Triple.isArch64Bit())
83     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
84   else
85     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
86 
87   MaxVectorAlign = 128;
88   MaxAtomicInlineWidth = 128;
89   MaxAtomicPromoteWidth = 128;
90 
91   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
92   LongDoubleFormat = &llvm::APFloat::IEEEquad();
93 
94   BFloat16Width = BFloat16Align = 16;
95   BFloat16Format = &llvm::APFloat::BFloat();
96 
97   // Make __builtin_ms_va_list available.
98   HasBuiltinMSVaList = true;
99 
100   // Make the SVE types available.  Note that this deliberately doesn't
101   // depend on SveMode, since in principle it should be possible to turn
102   // SVE on and off within a translation unit.  It should also be possible
103   // to compile the global declaration:
104   //
105   // __SVInt8_t *ptr;
106   //
107   // even without SVE.
108   HasAArch64SVETypes = true;
109 
110   // {} in inline assembly are neon specifiers, not assembly variant
111   // specifiers.
112   NoAsmVariants = true;
113 
114   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
115   // contributes to the alignment of the containing aggregate in the same way
116   // a plain (non bit-field) member of that type would, without exception for
117   // zero-sized or anonymous bit-fields."
118   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
119   UseZeroLengthBitfieldAlignment = true;
120 
121   // AArch64 targets default to using the ARM C++ ABI.
122   TheCXXABI.set(TargetCXXABI::GenericAArch64);
123 
124   if (Triple.getOS() == llvm::Triple::Linux)
125     this->MCountName = "\01_mcount";
126   else if (Triple.getOS() == llvm::Triple::UnknownOS)
127     this->MCountName =
128         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
129 }
130 
131 StringRef AArch64TargetInfo::getABI() const { return ABI; }
132 
133 bool AArch64TargetInfo::setABI(const std::string &Name) {
134   if (Name != "aapcs" && Name != "darwinpcs")
135     return false;
136 
137   ABI = Name;
138   return true;
139 }
140 
141 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
142                                                  BranchProtectionInfo &BPI,
143                                                  StringRef &Err) const {
144   llvm::ARM::ParsedBranchProtection PBP;
145   if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
146     return false;
147 
148   BPI.SignReturnAddr =
149       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
150           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
151           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
152           .Default(LangOptions::SignReturnAddressScopeKind::None);
153 
154   if (PBP.Key == "a_key")
155     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
156   else
157     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
158 
159   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
160   return true;
161 }
162 
163 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
164   return Name == "generic" ||
165          llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
166 }
167 
168 bool AArch64TargetInfo::setCPU(const std::string &Name) {
169   return isValidCPUName(Name);
170 }
171 
172 void AArch64TargetInfo::fillValidCPUList(
173     SmallVectorImpl<StringRef> &Values) const {
174   llvm::AArch64::fillValidCPUArchList(Values);
175 }
176 
177 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
178                                                 MacroBuilder &Builder) const {
179   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
180   Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
181   Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
182 }
183 
184 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
185                                                 MacroBuilder &Builder) const {
186   // Also include the ARMv8.1 defines
187   getTargetDefinesARMV81A(Opts, Builder);
188 }
189 
190 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
191                                                 MacroBuilder &Builder) const {
192   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
193   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
194   // Also include the Armv8.2 defines
195   getTargetDefinesARMV82A(Opts, Builder);
196 }
197 
198 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
199                                                 MacroBuilder &Builder) const {
200   // Also include the Armv8.3 defines
201   getTargetDefinesARMV83A(Opts, Builder);
202 }
203 
204 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
205                                                 MacroBuilder &Builder) const {
206   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
207   // Also include the Armv8.4 defines
208   getTargetDefinesARMV84A(Opts, Builder);
209 }
210 
211 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
212                                                 MacroBuilder &Builder) const {
213   // Also include the Armv8.5 defines
214   // FIXME: Armv8.6 makes the following extensions mandatory:
215   // - __ARM_FEATURE_BF16
216   // - __ARM_FEATURE_MATMUL_INT8
217   // Handle them here.
218   getTargetDefinesARMV85A(Opts, Builder);
219 }
220 
221 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
222                                                 MacroBuilder &Builder) const {
223   // Also include the Armv8.6 defines
224   getTargetDefinesARMV86A(Opts, Builder);
225 }
226 
227 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
228                                                 MacroBuilder &Builder) const {
229   // Also include the Armv8.7 defines
230   getTargetDefinesARMV87A(Opts, Builder);
231 }
232 
233 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
234                                                MacroBuilder &Builder) const {
235   // Armv9-A maps to Armv8.5-A
236   getTargetDefinesARMV85A(Opts, Builder);
237 }
238 
239 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
240                                                 MacroBuilder &Builder) const {
241   // Armv9.1-A maps to Armv8.6-A
242   getTargetDefinesARMV86A(Opts, Builder);
243 }
244 
245 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
246                                                 MacroBuilder &Builder) const {
247   // Armv9.2-A maps to Armv8.7-A
248   getTargetDefinesARMV87A(Opts, Builder);
249 }
250 
251 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
252                                                 MacroBuilder &Builder) const {
253   // Armv9.3-A maps to Armv8.8-A
254   getTargetDefinesARMV88A(Opts, Builder);
255 }
256 
257 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
258                                          MacroBuilder &Builder) const {
259   // Target identification.
260   Builder.defineMacro("__aarch64__");
261   // For bare-metal.
262   if (getTriple().getOS() == llvm::Triple::UnknownOS &&
263       getTriple().isOSBinFormatELF())
264     Builder.defineMacro("__ELF__");
265 
266   // Target properties.
267   if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
268     Builder.defineMacro("_LP64");
269     Builder.defineMacro("__LP64__");
270   }
271 
272   std::string CodeModel = getTargetOpts().CodeModel;
273   if (CodeModel == "default")
274     CodeModel = "small";
275   for (char &c : CodeModel)
276     c = toupper(c);
277   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
278 
279   // ACLE predefines. Many can only have one possible value on v8 AArch64.
280   Builder.defineMacro("__ARM_ACLE", "200");
281   Builder.defineMacro("__ARM_ARCH", getArchVersionString(ArchKind));
282   Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + getArchProfile() + "'");
283 
284   Builder.defineMacro("__ARM_64BIT_STATE", "1");
285   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
286   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
287 
288   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
289   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
290   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
291   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
292   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
293   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
294   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
295 
296   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
297 
298   // 0xe implies support for half, single and double precision operations.
299   Builder.defineMacro("__ARM_FP", "0xE");
300 
301   // PCS specifies this for SysV variants, which is all we support. Other ABIs
302   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
303   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
304   Builder.defineMacro("__ARM_FP16_ARGS", "1");
305 
306   if (Opts.UnsafeFPMath)
307     Builder.defineMacro("__ARM_FP_FAST", "1");
308 
309   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
310                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
311 
312   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
313 
314   if (FPU & NeonMode) {
315     Builder.defineMacro("__ARM_NEON", "1");
316     // 64-bit NEON supports half, single and double precision operations.
317     Builder.defineMacro("__ARM_NEON_FP", "0xE");
318   }
319 
320   if (FPU & SveMode)
321     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
322 
323   if ((FPU & NeonMode) && (FPU & SveMode))
324     Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
325 
326   if (HasSVE2)
327     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
328 
329   if (HasSVE2 && HasSVE2AES)
330     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
331 
332   if (HasSVE2 && HasSVE2BitPerm)
333     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
334 
335   if (HasSVE2 && HasSVE2SHA3)
336     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
337 
338   if (HasSVE2 && HasSVE2SM4)
339     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
340 
341   if (HasCRC)
342     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
343 
344   // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
345   // macros for AES, SHA2, SHA3 and SM4
346   if (HasAES && HasSHA2)
347     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
348 
349   if (HasAES)
350     Builder.defineMacro("__ARM_FEATURE_AES", "1");
351 
352   if (HasSHA2)
353     Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
354 
355   if (HasSHA3) {
356     Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
357     Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
358   }
359 
360   if (HasSM4) {
361     Builder.defineMacro("__ARM_FEATURE_SM3", "1");
362     Builder.defineMacro("__ARM_FEATURE_SM4", "1");
363   }
364 
365   if (HasUnaligned)
366     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
367 
368   if ((FPU & NeonMode) && HasFullFP16)
369     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
370   if (HasFullFP16)
371    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
372 
373   if (HasDotProd)
374     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
375 
376   if (HasMTE)
377     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
378 
379   if (HasTME)
380     Builder.defineMacro("__ARM_FEATURE_TME", "1");
381 
382   if (HasMatMul)
383     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
384 
385   if (HasLSE)
386     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
387 
388   if (HasBFloat16) {
389     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
390     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
391     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
392     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
393   }
394 
395   if ((FPU & SveMode) && HasBFloat16) {
396     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
397   }
398 
399   if ((FPU & SveMode) && HasMatmulFP64)
400     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
401 
402   if ((FPU & SveMode) && HasMatmulFP32)
403     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
404 
405   if ((FPU & SveMode) && HasMatMul)
406     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
407 
408   if ((FPU & NeonMode) && HasFP16FML)
409     Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
410 
411   if (Opts.hasSignReturnAddress()) {
412     // Bitmask:
413     // 0: Protection using the A key
414     // 1: Protection using the B key
415     // 2: Protection including leaf functions
416     unsigned Value = 0;
417 
418     if (Opts.isSignReturnAddressWithAKey())
419       Value |= (1 << 0);
420     else
421       Value |= (1 << 1);
422 
423     if (Opts.isSignReturnAddressScopeAll())
424       Value |= (1 << 2);
425 
426     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
427   }
428 
429   if (Opts.BranchTargetEnforcement)
430     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
431 
432   if (HasLS64)
433     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
434 
435   if (HasRandGen)
436     Builder.defineMacro("__ARM_FEATURE_RNG", "1");
437 
438   switch (ArchKind) {
439   default:
440     break;
441   case llvm::AArch64::ArchKind::ARMV8_1A:
442     getTargetDefinesARMV81A(Opts, Builder);
443     break;
444   case llvm::AArch64::ArchKind::ARMV8_2A:
445     getTargetDefinesARMV82A(Opts, Builder);
446     break;
447   case llvm::AArch64::ArchKind::ARMV8_3A:
448     getTargetDefinesARMV83A(Opts, Builder);
449     break;
450   case llvm::AArch64::ArchKind::ARMV8_4A:
451     getTargetDefinesARMV84A(Opts, Builder);
452     break;
453   case llvm::AArch64::ArchKind::ARMV8_5A:
454     getTargetDefinesARMV85A(Opts, Builder);
455     break;
456   case llvm::AArch64::ArchKind::ARMV8_6A:
457     getTargetDefinesARMV86A(Opts, Builder);
458     break;
459   case llvm::AArch64::ArchKind::ARMV8_7A:
460     getTargetDefinesARMV87A(Opts, Builder);
461     break;
462   case llvm::AArch64::ArchKind::ARMV8_8A:
463     getTargetDefinesARMV88A(Opts, Builder);
464     break;
465   case llvm::AArch64::ArchKind::ARMV9A:
466     getTargetDefinesARMV9A(Opts, Builder);
467     break;
468   case llvm::AArch64::ArchKind::ARMV9_1A:
469     getTargetDefinesARMV91A(Opts, Builder);
470     break;
471   case llvm::AArch64::ArchKind::ARMV9_2A:
472     getTargetDefinesARMV92A(Opts, Builder);
473     break;
474   case llvm::AArch64::ArchKind::ARMV9_3A:
475     getTargetDefinesARMV93A(Opts, Builder);
476     break;
477   }
478 
479   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
480   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
481   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
482   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
483   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
484 
485   if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
486     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
487     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
488   }
489 }
490 
491 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
492   return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
493                                              Builtin::FirstTSBuiltin);
494 }
495 
496 Optional<std::pair<unsigned, unsigned>>
497 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
498   if (LangOpts.VScaleMin || LangOpts.VScaleMax)
499     return std::pair<unsigned, unsigned>(
500         LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
501 
502   if (hasFeature("sve"))
503     return std::pair<unsigned, unsigned>(1, 16);
504 
505   return None;
506 }
507 
508 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
509   return Feature == "aarch64" || Feature == "arm64" || Feature == "arm" ||
510          (Feature == "neon" && (FPU & NeonMode)) ||
511          ((Feature == "sve" || Feature == "sve2" || Feature == "sve2-bitperm" ||
512            Feature == "sve2-aes" || Feature == "sve2-sha3" ||
513            Feature == "sve2-sm4" || Feature == "f64mm" || Feature == "f32mm" ||
514            Feature == "i8mm" || Feature == "bf16") &&
515           (FPU & SveMode)) ||
516          (Feature == "ls64" && HasLS64);
517 }
518 
519 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
520                                              DiagnosticsEngine &Diags) {
521   FPU = FPUMode;
522   HasCRC = false;
523   HasCrypto = false;
524   HasAES = false;
525   HasSHA2 = false;
526   HasSHA3 = false;
527   HasSM4 = false;
528   HasUnaligned = true;
529   HasFullFP16 = false;
530   HasDotProd = false;
531   HasFP16FML = false;
532   HasMTE = false;
533   HasTME = false;
534   HasLS64 = false;
535   HasRandGen = false;
536   HasMatMul = false;
537   HasBFloat16 = false;
538   HasSVE2 = false;
539   HasSVE2AES = false;
540   HasSVE2SHA3 = false;
541   HasSVE2SM4 = false;
542   HasSVE2BitPerm = false;
543   HasMatmulFP64 = false;
544   HasMatmulFP32 = false;
545   HasLSE = false;
546   HasHBC = false;
547   HasMOPS = false;
548 
549   ArchKind = llvm::AArch64::ArchKind::INVALID;
550 
551   for (const auto &Feature : Features) {
552     if (Feature == "+neon")
553       FPU |= NeonMode;
554     if (Feature == "+sve") {
555       FPU |= SveMode;
556       HasFullFP16 = true;
557     }
558     if (Feature == "+sve2") {
559       FPU |= SveMode;
560       HasFullFP16 = true;
561       HasSVE2 = true;
562     }
563     if (Feature == "+sve2-aes") {
564       FPU |= SveMode;
565       HasFullFP16 = true;
566       HasSVE2 = true;
567       HasSVE2AES = true;
568     }
569     if (Feature == "+sve2-sha3") {
570       FPU |= SveMode;
571       HasFullFP16 = true;
572       HasSVE2 = true;
573       HasSVE2SHA3 = true;
574     }
575     if (Feature == "+sve2-sm4") {
576       FPU |= SveMode;
577       HasFullFP16 = true;
578       HasSVE2 = true;
579       HasSVE2SM4 = true;
580     }
581     if (Feature == "+sve2-bitperm") {
582       FPU |= SveMode;
583       HasFullFP16 = true;
584       HasSVE2 = true;
585       HasSVE2BitPerm = true;
586     }
587     if (Feature == "+f32mm") {
588       FPU |= SveMode;
589       HasMatmulFP32 = true;
590     }
591     if (Feature == "+f64mm") {
592       FPU |= SveMode;
593       HasMatmulFP64 = true;
594     }
595     if (Feature == "+crc")
596       HasCRC = true;
597     if (Feature == "+crypto")
598       HasCrypto = true;
599     if (Feature == "+aes")
600       HasAES = true;
601     if (Feature == "+sha2")
602       HasSHA2 = true;
603     if (Feature == "+sha3") {
604       HasSHA2 = true;
605       HasSHA3 = true;
606     }
607     if (Feature == "+sm4")
608       HasSM4 = true;
609     if (Feature == "+strict-align")
610       HasUnaligned = false;
611     if (Feature == "+v8a")
612       ArchKind = llvm::AArch64::ArchKind::ARMV8A;
613     if (Feature == "+v8.1a")
614       ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
615     if (Feature == "+v8.2a")
616       ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
617     if (Feature == "+v8.3a")
618       ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
619     if (Feature == "+v8.4a")
620       ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
621     if (Feature == "+v8.5a")
622       ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
623     if (Feature == "+v8.6a")
624       ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
625     if (Feature == "+v8.7a")
626       ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
627     if (Feature == "+v8.8a")
628       ArchKind = llvm::AArch64::ArchKind::ARMV8_8A;
629     if (Feature == "+v9a")
630       ArchKind = llvm::AArch64::ArchKind::ARMV9A;
631     if (Feature == "+v9.1a")
632       ArchKind = llvm::AArch64::ArchKind::ARMV9_1A;
633     if (Feature == "+v9.2a")
634       ArchKind = llvm::AArch64::ArchKind::ARMV9_2A;
635     if (Feature == "+v9.3a")
636       ArchKind = llvm::AArch64::ArchKind::ARMV9_3A;
637     if (Feature == "+v8r")
638       ArchKind = llvm::AArch64::ArchKind::ARMV8R;
639     if (Feature == "+fullfp16")
640       HasFullFP16 = true;
641     if (Feature == "+dotprod")
642       HasDotProd = true;
643     if (Feature == "+fp16fml")
644       HasFP16FML = true;
645     if (Feature == "+mte")
646       HasMTE = true;
647     if (Feature == "+tme")
648       HasTME = true;
649     if (Feature == "+pauth")
650       HasPAuth = true;
651     if (Feature == "+i8mm")
652       HasMatMul = true;
653     if (Feature == "+bf16")
654       HasBFloat16 = true;
655     if (Feature == "+lse")
656       HasLSE = true;
657     if (Feature == "+ls64")
658       HasLS64 = true;
659     if (Feature == "+rand")
660       HasRandGen = true;
661     if (Feature == "+flagm")
662       HasFlagM = true;
663     if (Feature == "+hbc")
664       HasHBC = true;
665   }
666 
667   setDataLayout();
668 
669   return true;
670 }
671 
672 TargetInfo::CallingConvCheckResult
673 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
674   switch (CC) {
675   case CC_C:
676   case CC_Swift:
677   case CC_SwiftAsync:
678   case CC_PreserveMost:
679   case CC_PreserveAll:
680   case CC_OpenCLKernel:
681   case CC_AArch64VectorCall:
682   case CC_Win64:
683     return CCCR_OK;
684   default:
685     return CCCR_Warning;
686   }
687 }
688 
689 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
690 
691 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
692   return TargetInfo::AArch64ABIBuiltinVaList;
693 }
694 
695 const char *const AArch64TargetInfo::GCCRegNames[] = {
696     // 32-bit Integer registers
697     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
698     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
699     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
700 
701     // 64-bit Integer registers
702     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
703     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
704     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
705 
706     // 32-bit floating point regsisters
707     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
708     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
709     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
710 
711     // 64-bit floating point regsisters
712     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
713     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
714     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
715 
716     // Neon vector registers
717     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
718     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
719     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
720 
721     // SVE vector registers
722     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
723     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
724     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
725 
726     // SVE predicate registers
727     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
728     "p11", "p12", "p13", "p14", "p15"
729 };
730 
731 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
732   return llvm::makeArrayRef(GCCRegNames);
733 }
734 
735 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
736     {{"w31"}, "wsp"},
737     {{"x31"}, "sp"},
738     // GCC rN registers are aliases of xN registers.
739     {{"r0"}, "x0"},
740     {{"r1"}, "x1"},
741     {{"r2"}, "x2"},
742     {{"r3"}, "x3"},
743     {{"r4"}, "x4"},
744     {{"r5"}, "x5"},
745     {{"r6"}, "x6"},
746     {{"r7"}, "x7"},
747     {{"r8"}, "x8"},
748     {{"r9"}, "x9"},
749     {{"r10"}, "x10"},
750     {{"r11"}, "x11"},
751     {{"r12"}, "x12"},
752     {{"r13"}, "x13"},
753     {{"r14"}, "x14"},
754     {{"r15"}, "x15"},
755     {{"r16"}, "x16"},
756     {{"r17"}, "x17"},
757     {{"r18"}, "x18"},
758     {{"r19"}, "x19"},
759     {{"r20"}, "x20"},
760     {{"r21"}, "x21"},
761     {{"r22"}, "x22"},
762     {{"r23"}, "x23"},
763     {{"r24"}, "x24"},
764     {{"r25"}, "x25"},
765     {{"r26"}, "x26"},
766     {{"r27"}, "x27"},
767     {{"r28"}, "x28"},
768     {{"r29", "x29"}, "fp"},
769     {{"r30", "x30"}, "lr"},
770     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
771     // don't want to substitute one of these for a different-sized one.
772 };
773 
774 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
775   return llvm::makeArrayRef(GCCRegAliases);
776 }
777 
778 bool AArch64TargetInfo::validateAsmConstraint(
779     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
780   switch (*Name) {
781   default:
782     return false;
783   case 'w': // Floating point and SIMD registers (V0-V31)
784     Info.setAllowsRegister();
785     return true;
786   case 'I': // Constant that can be used with an ADD instruction
787   case 'J': // Constant that can be used with a SUB instruction
788   case 'K': // Constant that can be used with a 32-bit logical instruction
789   case 'L': // Constant that can be used with a 64-bit logical instruction
790   case 'M': // Constant that can be used as a 32-bit MOV immediate
791   case 'N': // Constant that can be used as a 64-bit MOV immediate
792   case 'Y': // Floating point constant zero
793   case 'Z': // Integer constant zero
794     return true;
795   case 'Q': // A memory reference with base register and no offset
796     Info.setAllowsMemory();
797     return true;
798   case 'S': // A symbolic address
799     Info.setAllowsRegister();
800     return true;
801   case 'U':
802     if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
803       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
804       Info.setAllowsRegister();
805       Name += 2;
806       return true;
807     }
808     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
809     // Utf: A memory address suitable for ldp/stp in TF mode.
810     // Usa: An absolute symbolic address.
811     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
812 
813     // Better to return an error saying that it's an unrecognised constraint
814     // even if this is a valid constraint in gcc.
815     return false;
816   case 'z': // Zero register, wzr or xzr
817     Info.setAllowsRegister();
818     return true;
819   case 'x': // Floating point and SIMD registers (V0-V15)
820     Info.setAllowsRegister();
821     return true;
822   case 'y': // SVE registers (V0-V7)
823     Info.setAllowsRegister();
824     return true;
825   }
826   return false;
827 }
828 
829 bool AArch64TargetInfo::validateConstraintModifier(
830     StringRef Constraint, char Modifier, unsigned Size,
831     std::string &SuggestedModifier) const {
832   // Strip off constraint modifiers.
833   while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
834     Constraint = Constraint.substr(1);
835 
836   switch (Constraint[0]) {
837   default:
838     return true;
839   case 'z':
840   case 'r': {
841     switch (Modifier) {
842     case 'x':
843     case 'w':
844       // For now assume that the person knows what they're
845       // doing with the modifier.
846       return true;
847     default:
848       // By default an 'r' constraint will be in the 'x'
849       // registers.
850       if (Size == 64)
851         return true;
852 
853       if (Size == 512)
854         return HasLS64;
855 
856       SuggestedModifier = "w";
857       return false;
858     }
859   }
860   }
861 }
862 
863 const char *AArch64TargetInfo::getClobbers() const { return ""; }
864 
865 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
866   if (RegNo == 0)
867     return 0;
868   if (RegNo == 1)
869     return 1;
870   return -1;
871 }
872 
873 bool AArch64TargetInfo::hasInt128Type() const { return true; }
874 
875 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
876                                          const TargetOptions &Opts)
877     : AArch64TargetInfo(Triple, Opts) {}
878 
879 void AArch64leTargetInfo::setDataLayout() {
880   if (getTriple().isOSBinFormatMachO()) {
881     if(getTriple().isArch32Bit())
882       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
883     else
884       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
885   } else
886     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
887 }
888 
889 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
890                                            MacroBuilder &Builder) const {
891   Builder.defineMacro("__AARCH64EL__");
892   AArch64TargetInfo::getTargetDefines(Opts, Builder);
893 }
894 
895 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
896                                          const TargetOptions &Opts)
897     : AArch64TargetInfo(Triple, Opts) {}
898 
899 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
900                                            MacroBuilder &Builder) const {
901   Builder.defineMacro("__AARCH64EB__");
902   Builder.defineMacro("__AARCH_BIG_ENDIAN");
903   Builder.defineMacro("__ARM_BIG_ENDIAN");
904   AArch64TargetInfo::getTargetDefines(Opts, Builder);
905 }
906 
907 void AArch64beTargetInfo::setDataLayout() {
908   assert(!getTriple().isOSBinFormatMachO());
909   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
910 }
911 
912 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
913                                                const TargetOptions &Opts)
914     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
915 
916   // This is an LLP64 platform.
917   // int:4, long:4, long long:8, long double:8.
918   IntWidth = IntAlign = 32;
919   LongWidth = LongAlign = 32;
920   DoubleAlign = LongLongAlign = 64;
921   LongDoubleWidth = LongDoubleAlign = 64;
922   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
923   IntMaxType = SignedLongLong;
924   Int64Type = SignedLongLong;
925   SizeType = UnsignedLongLong;
926   PtrDiffType = SignedLongLong;
927   IntPtrType = SignedLongLong;
928 }
929 
930 void WindowsARM64TargetInfo::setDataLayout() {
931   resetDataLayout(Triple.isOSBinFormatMachO()
932                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
933                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
934                   Triple.isOSBinFormatMachO() ? "_" : "");
935 }
936 
937 TargetInfo::BuiltinVaListKind
938 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
939   return TargetInfo::CharPtrBuiltinVaList;
940 }
941 
942 TargetInfo::CallingConvCheckResult
943 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
944   switch (CC) {
945   case CC_X86StdCall:
946   case CC_X86ThisCall:
947   case CC_X86FastCall:
948   case CC_X86VectorCall:
949     return CCCR_Ignore;
950   case CC_C:
951   case CC_OpenCLKernel:
952   case CC_PreserveMost:
953   case CC_PreserveAll:
954   case CC_Swift:
955   case CC_SwiftAsync:
956   case CC_Win64:
957     return CCCR_OK;
958   default:
959     return CCCR_Warning;
960   }
961 }
962 
963 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
964                                                    const TargetOptions &Opts)
965     : WindowsARM64TargetInfo(Triple, Opts) {
966   TheCXXABI.set(TargetCXXABI::Microsoft);
967 }
968 
969 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
970                                                 MacroBuilder &Builder) const {
971   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
972   Builder.defineMacro("_M_ARM64", "1");
973 }
974 
975 TargetInfo::CallingConvKind
976 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
977   return CCK_MicrosoftWin64;
978 }
979 
980 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
981   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
982 
983   // MSVC does size based alignment for arm64 based on alignment section in
984   // below document, replicate that to keep alignment consistent with object
985   // files compiled by MSVC.
986   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
987   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
988     Align = std::max(Align, 128u);    // align type at least 16 bytes
989   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
990     Align = std::max(Align, 64u);     // align type at least 8 butes
991   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
992     Align = std::max(Align, 32u);     // align type at least 4 bytes
993   }
994   return Align;
995 }
996 
997 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
998                                            const TargetOptions &Opts)
999     : WindowsARM64TargetInfo(Triple, Opts) {
1000   TheCXXABI.set(TargetCXXABI::GenericAArch64);
1001 }
1002 
1003 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1004                                                  const TargetOptions &Opts)
1005     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1006   Int64Type = SignedLongLong;
1007   if (getTriple().isArch32Bit())
1008     IntMaxType = SignedLongLong;
1009 
1010   WCharType = SignedInt;
1011   UseSignedCharForObjCBool = false;
1012 
1013   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1014   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1015 
1016   UseZeroLengthBitfieldAlignment = false;
1017 
1018   if (getTriple().isArch32Bit()) {
1019     UseBitFieldTypeAlignment = false;
1020     ZeroLengthBitfieldBoundary = 32;
1021     UseZeroLengthBitfieldAlignment = true;
1022     TheCXXABI.set(TargetCXXABI::WatchOS);
1023   } else
1024     TheCXXABI.set(TargetCXXABI::AppleARM64);
1025 }
1026 
1027 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1028                                            const llvm::Triple &Triple,
1029                                            MacroBuilder &Builder) const {
1030   Builder.defineMacro("__AARCH64_SIMD__");
1031   if (Triple.isArch32Bit())
1032     Builder.defineMacro("__ARM64_ARCH_8_32__");
1033   else
1034     Builder.defineMacro("__ARM64_ARCH_8__");
1035   Builder.defineMacro("__ARM_NEON__");
1036   Builder.defineMacro("__LITTLE_ENDIAN__");
1037   Builder.defineMacro("__REGISTER_PREFIX__", "");
1038   Builder.defineMacro("__arm64", "1");
1039   Builder.defineMacro("__arm64__", "1");
1040 
1041   if (Triple.isArm64e())
1042     Builder.defineMacro("__arm64e__", "1");
1043 
1044   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1045 }
1046 
1047 TargetInfo::BuiltinVaListKind
1048 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1049   return TargetInfo::CharPtrBuiltinVaList;
1050 }
1051 
1052 // 64-bit RenderScript is aarch64
1053 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1054                                                    const TargetOptions &Opts)
1055     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1056                                        Triple.getOSName(),
1057                                        Triple.getEnvironmentName()),
1058                           Opts) {
1059   IsRenderScriptTarget = true;
1060 }
1061 
1062 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1063                                                 MacroBuilder &Builder) const {
1064   Builder.defineMacro("__RENDERSCRIPT__");
1065   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1066 }
1067