xref: /freebsd/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp (revision 397e83df75e0fcd0d3fcb95ae4d794cb7600fc89)
1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/TargetParser/AArch64TargetParser.h"
21 #include "llvm/TargetParser/ARMTargetParserCommon.h"
22 #include <optional>
23 
24 using namespace clang;
25 using namespace clang::targets;
26 
27 static constexpr Builtin::Info BuiltinInfo[] = {
28 #define BUILTIN(ID, TYPE, ATTRS)                                               \
29   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
30 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
31   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32 #include "clang/Basic/BuiltinsNEON.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
36 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
37   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38 #include "clang/Basic/BuiltinsSVE.def"
39 
40 #define BUILTIN(ID, TYPE, ATTRS)                                               \
41   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
42 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
43   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
44 #include "clang/Basic/BuiltinsSME.def"
45 
46 #define BUILTIN(ID, TYPE, ATTRS)                                               \
47   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
48 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
49   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
50 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
51   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
52 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
53   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
54 #include "clang/Basic/BuiltinsAArch64.def"
55 };
56 
57 void AArch64TargetInfo::setArchFeatures() {
58   if (*ArchInfo == llvm::AArch64::ARMV8R) {
59     HasDotProd = true;
60     HasDIT = true;
61     HasFlagM = true;
62     HasRCPC = true;
63     FPU |= NeonMode;
64     HasCCPP = true;
65     HasCRC = true;
66     HasLSE = true;
67     HasRDM = true;
68   } else if (ArchInfo->Version.getMajor() == 8) {
69     if (ArchInfo->Version.getMinor() >= 7u) {
70       HasWFxT = true;
71     }
72     if (ArchInfo->Version.getMinor() >= 6u) {
73       HasBFloat16 = true;
74       HasMatMul = true;
75     }
76     if (ArchInfo->Version.getMinor() >= 5u) {
77       HasAlternativeNZCV = true;
78       HasFRInt3264 = true;
79       HasSSBS = true;
80       HasSB = true;
81       HasPredRes = true;
82       HasBTI = true;
83     }
84     if (ArchInfo->Version.getMinor() >= 4u) {
85       HasDotProd = true;
86       HasDIT = true;
87       HasFlagM = true;
88     }
89     if (ArchInfo->Version.getMinor() >= 3u) {
90       HasRCPC = true;
91       FPU |= NeonMode;
92     }
93     if (ArchInfo->Version.getMinor() >= 2u) {
94       HasCCPP = true;
95     }
96     if (ArchInfo->Version.getMinor() >= 1u) {
97       HasCRC = true;
98       HasLSE = true;
99       HasRDM = true;
100     }
101   } else if (ArchInfo->Version.getMajor() == 9) {
102     if (ArchInfo->Version.getMinor() >= 2u) {
103       HasWFxT = true;
104     }
105     if (ArchInfo->Version.getMinor() >= 1u) {
106       HasBFloat16 = true;
107       HasMatMul = true;
108     }
109     FPU |= SveMode;
110     HasSVE2 = true;
111     HasFullFP16 = true;
112     HasAlternativeNZCV = true;
113     HasFRInt3264 = true;
114     HasSSBS = true;
115     HasSB = true;
116     HasPredRes = true;
117     HasBTI = true;
118     HasDotProd = true;
119     HasDIT = true;
120     HasFlagM = true;
121     HasRCPC = true;
122     FPU |= NeonMode;
123     HasCCPP = true;
124     HasCRC = true;
125     HasLSE = true;
126     HasRDM = true;
127   }
128 }
129 
130 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
131                                      const TargetOptions &Opts)
132     : TargetInfo(Triple), ABI("aapcs") {
133   if (getTriple().isOSOpenBSD()) {
134     Int64Type = SignedLongLong;
135     IntMaxType = SignedLongLong;
136   } else {
137     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
138       WCharType = UnsignedInt;
139 
140     Int64Type = SignedLong;
141     IntMaxType = SignedLong;
142   }
143 
144   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
145   HasLegalHalfType = true;
146   HalfArgsAndReturns = true;
147   HasFloat16 = true;
148   HasStrictFP = true;
149 
150   if (Triple.isArch64Bit())
151     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
152   else
153     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
154 
155   MaxVectorAlign = 128;
156   MaxAtomicInlineWidth = 128;
157   MaxAtomicPromoteWidth = 128;
158 
159   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
160   LongDoubleFormat = &llvm::APFloat::IEEEquad();
161 
162   BFloat16Width = BFloat16Align = 16;
163   BFloat16Format = &llvm::APFloat::BFloat();
164 
165   // Make __builtin_ms_va_list available.
166   HasBuiltinMSVaList = true;
167 
168   // Make the SVE types available.  Note that this deliberately doesn't
169   // depend on SveMode, since in principle it should be possible to turn
170   // SVE on and off within a translation unit.  It should also be possible
171   // to compile the global declaration:
172   //
173   // __SVInt8_t *ptr;
174   //
175   // even without SVE.
176   HasAArch64SVETypes = true;
177 
178   // {} in inline assembly are neon specifiers, not assembly variant
179   // specifiers.
180   NoAsmVariants = true;
181 
182   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
183   // contributes to the alignment of the containing aggregate in the same way
184   // a plain (non bit-field) member of that type would, without exception for
185   // zero-sized or anonymous bit-fields."
186   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
187   UseZeroLengthBitfieldAlignment = true;
188 
189   // AArch64 targets default to using the ARM C++ ABI.
190   TheCXXABI.set(TargetCXXABI::GenericAArch64);
191 
192   if (Triple.getOS() == llvm::Triple::Linux)
193     this->MCountName = "\01_mcount";
194   else if (Triple.getOS() == llvm::Triple::UnknownOS)
195     this->MCountName =
196         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
197 }
198 
199 StringRef AArch64TargetInfo::getABI() const { return ABI; }
200 
201 bool AArch64TargetInfo::setABI(const std::string &Name) {
202   if (Name != "aapcs" && Name != "darwinpcs")
203     return false;
204 
205   ABI = Name;
206   return true;
207 }
208 
209 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
210                                                  BranchProtectionInfo &BPI,
211                                                  StringRef &Err) const {
212   llvm::ARM::ParsedBranchProtection PBP;
213   if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
214     return false;
215 
216   BPI.SignReturnAddr =
217       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
218           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
219           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
220           .Default(LangOptions::SignReturnAddressScopeKind::None);
221 
222   if (PBP.Key == "a_key")
223     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
224   else
225     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
226 
227   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
228   BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
229   BPI.GuardedControlStack = PBP.GuardedControlStack;
230   return true;
231 }
232 
233 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
234   return Name == "generic" || llvm::AArch64::parseCpu(Name);
235 }
236 
237 bool AArch64TargetInfo::setCPU(const std::string &Name) {
238   return isValidCPUName(Name);
239 }
240 
241 void AArch64TargetInfo::fillValidCPUList(
242     SmallVectorImpl<StringRef> &Values) const {
243   llvm::AArch64::fillValidCPUArchList(Values);
244 }
245 
246 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
247                                                 MacroBuilder &Builder) const {
248   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
249 }
250 
251 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
252                                                 MacroBuilder &Builder) const {
253   // Also include the ARMv8.1 defines
254   getTargetDefinesARMV81A(Opts, Builder);
255 }
256 
257 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
258                                                 MacroBuilder &Builder) const {
259   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
260   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
261   // Also include the Armv8.2 defines
262   getTargetDefinesARMV82A(Opts, Builder);
263 }
264 
265 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
266                                                 MacroBuilder &Builder) const {
267   // Also include the Armv8.3 defines
268   getTargetDefinesARMV83A(Opts, Builder);
269 }
270 
271 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
272                                                 MacroBuilder &Builder) const {
273   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
274   Builder.defineMacro("__ARM_FEATURE_BTI", "1");
275   // Also include the Armv8.4 defines
276   getTargetDefinesARMV84A(Opts, Builder);
277 }
278 
279 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
280                                                 MacroBuilder &Builder) const {
281   // Also include the Armv8.5 defines
282   // FIXME: Armv8.6 makes the following extensions mandatory:
283   // - __ARM_FEATURE_BF16
284   // - __ARM_FEATURE_MATMUL_INT8
285   // Handle them here.
286   getTargetDefinesARMV85A(Opts, Builder);
287 }
288 
289 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
290                                                 MacroBuilder &Builder) const {
291   // Also include the Armv8.6 defines
292   getTargetDefinesARMV86A(Opts, Builder);
293 }
294 
295 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
296                                                 MacroBuilder &Builder) const {
297   // Also include the Armv8.7 defines
298   getTargetDefinesARMV87A(Opts, Builder);
299 }
300 
301 void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
302                                                 MacroBuilder &Builder) const {
303   // Also include the Armv8.8 defines
304   getTargetDefinesARMV88A(Opts, Builder);
305 }
306 
307 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
308                                                MacroBuilder &Builder) const {
309   // Armv9-A maps to Armv8.5-A
310   getTargetDefinesARMV85A(Opts, Builder);
311 }
312 
313 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
314                                                 MacroBuilder &Builder) const {
315   // Armv9.1-A maps to Armv8.6-A
316   getTargetDefinesARMV86A(Opts, Builder);
317 }
318 
319 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
320                                                 MacroBuilder &Builder) const {
321   // Armv9.2-A maps to Armv8.7-A
322   getTargetDefinesARMV87A(Opts, Builder);
323 }
324 
325 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
326                                                 MacroBuilder &Builder) const {
327   // Armv9.3-A maps to Armv8.8-A
328   getTargetDefinesARMV88A(Opts, Builder);
329 }
330 
331 void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
332                                                 MacroBuilder &Builder) const {
333   // Armv9.4-A maps to Armv8.9-A
334   getTargetDefinesARMV89A(Opts, Builder);
335 }
336 
337 void AArch64TargetInfo::getTargetDefinesARMV95A(const LangOptions &Opts,
338                                                 MacroBuilder &Builder) const {
339   // Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
340   getTargetDefinesARMV94A(Opts, Builder);
341 }
342 
343 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
344                                          MacroBuilder &Builder) const {
345   // Target identification.
346   if (getTriple().isWindowsArm64EC()) {
347     // Define the same set of macros as would be defined on x86_64 to ensure that
348     // ARM64EC datatype layouts match those of x86_64 compiled code
349     Builder.defineMacro("__amd64__");
350     Builder.defineMacro("__amd64");
351     Builder.defineMacro("__x86_64");
352     Builder.defineMacro("__x86_64__");
353     Builder.defineMacro("__arm64ec__");
354   } else {
355     Builder.defineMacro("__aarch64__");
356   }
357 
358   // Inline assembly supports AArch64 flag outputs.
359   Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
360 
361   std::string CodeModel = getTargetOpts().CodeModel;
362   if (CodeModel == "default")
363     CodeModel = "small";
364   for (char &c : CodeModel)
365     c = toupper(c);
366   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
367 
368   // ACLE predefines. Many can only have one possible value on v8 AArch64.
369   Builder.defineMacro("__ARM_ACLE", "200");
370   Builder.defineMacro("__ARM_ARCH",
371                       std::to_string(ArchInfo->Version.getMajor()));
372   Builder.defineMacro("__ARM_ARCH_PROFILE",
373                       std::string("'") + (char)ArchInfo->Profile + "'");
374 
375   Builder.defineMacro("__ARM_64BIT_STATE", "1");
376   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
377   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
378 
379   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
380   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
381   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
382   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
383   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
384   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
385   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
386 
387   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
388 
389   // These macros are set when Clang can parse declarations with these
390   // attributes.
391   Builder.defineMacro("__ARM_STATE_ZA", "1");
392   Builder.defineMacro("__ARM_STATE_ZT0", "1");
393 
394   // 0xe implies support for half, single and double precision operations.
395   if (FPU & FPUMode)
396     Builder.defineMacro("__ARM_FP", "0xE");
397 
398   // PCS specifies this for SysV variants, which is all we support. Other ABIs
399   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
400   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
401   Builder.defineMacro("__ARM_FP16_ARGS", "1");
402 
403   if (Opts.UnsafeFPMath)
404     Builder.defineMacro("__ARM_FP_FAST", "1");
405 
406   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
407                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
408 
409   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
410 
411   if (FPU & NeonMode) {
412     Builder.defineMacro("__ARM_NEON", "1");
413     // 64-bit NEON supports half, single and double precision operations.
414     Builder.defineMacro("__ARM_NEON_FP", "0xE");
415   }
416 
417   if (FPU & SveMode)
418     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
419 
420   if ((FPU & NeonMode) && (FPU & SveMode))
421     Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
422 
423   if (HasSVE2)
424     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
425 
426   if (HasSVE2 && HasSVE2AES)
427     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
428 
429   if (HasSVE2 && HasSVE2BitPerm)
430     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
431 
432   if (HasSVE2 && HasSVE2SHA3)
433     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
434 
435   if (HasSVE2 && HasSVE2SM4)
436     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
437 
438   if (HasSME) {
439     Builder.defineMacro("__ARM_FEATURE_SME");
440     Builder.defineMacro("__ARM_FEATURE_LOCALLY_STREAMING", "1");
441   }
442 
443   if (HasSME2) {
444     Builder.defineMacro("__ARM_FEATURE_SME");
445     Builder.defineMacro("__ARM_FEATURE_SME2");
446     Builder.defineMacro("__ARM_FEATURE_LOCALLY_STREAMING", "1");
447   }
448 
449   if (HasCRC)
450     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
451 
452   if (HasRCPC3)
453     Builder.defineMacro("__ARM_FEATURE_RCPC", "3");
454   else if (HasRCPC)
455     Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
456 
457   if (HasFMV)
458     Builder.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
459 
460   // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
461   // macros for AES, SHA2, SHA3 and SM4
462   if (HasAES && HasSHA2)
463     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
464 
465   if (HasAES)
466     Builder.defineMacro("__ARM_FEATURE_AES", "1");
467 
468   if (HasSHA2)
469     Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
470 
471   if (HasSHA3) {
472     Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
473     Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
474   }
475 
476   if (HasSM4) {
477     Builder.defineMacro("__ARM_FEATURE_SM3", "1");
478     Builder.defineMacro("__ARM_FEATURE_SM4", "1");
479   }
480 
481   if (HasPAuth)
482     Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
483 
484   if (HasUnaligned)
485     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
486 
487   if ((FPU & NeonMode) && HasFullFP16)
488     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
489   if (HasFullFP16)
490    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
491 
492   if (HasDotProd)
493     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
494 
495   if (HasMTE)
496     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
497 
498   if (HasTME)
499     Builder.defineMacro("__ARM_FEATURE_TME", "1");
500 
501   if (HasMatMul)
502     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
503 
504   if (HasLSE)
505     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
506 
507   if (HasBFloat16) {
508     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
509     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
510     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
511     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
512   }
513 
514   if ((FPU & SveMode) && HasBFloat16) {
515     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
516   }
517 
518   if ((FPU & SveMode) && HasMatmulFP64)
519     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
520 
521   if ((FPU & SveMode) && HasMatmulFP32)
522     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
523 
524   if ((FPU & SveMode) && HasMatMul)
525     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
526 
527   if ((FPU & NeonMode) && HasFP16FML)
528     Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
529 
530   if (Opts.hasSignReturnAddress()) {
531     // Bitmask:
532     // 0: Protection using the A key
533     // 1: Protection using the B key
534     // 2: Protection including leaf functions
535     unsigned Value = 0;
536 
537     if (Opts.isSignReturnAddressWithAKey())
538       Value |= (1 << 0);
539     else
540       Value |= (1 << 1);
541 
542     if (Opts.isSignReturnAddressScopeAll())
543       Value |= (1 << 2);
544 
545     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
546   }
547 
548   if (Opts.BranchTargetEnforcement)
549     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
550 
551   if (Opts.GuardedControlStack)
552     Builder.defineMacro("__ARM_FEATURE_GCS_DEFAULT", "1");
553 
554   if (HasLS64)
555     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
556 
557   if (HasRandGen)
558     Builder.defineMacro("__ARM_FEATURE_RNG", "1");
559 
560   if (HasMOPS)
561     Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
562 
563   if (HasD128)
564     Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
565 
566   if (HasGCS)
567     Builder.defineMacro("__ARM_FEATURE_GCS", "1");
568 
569   if (*ArchInfo == llvm::AArch64::ARMV8_1A)
570     getTargetDefinesARMV81A(Opts, Builder);
571   else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
572     getTargetDefinesARMV82A(Opts, Builder);
573   else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
574     getTargetDefinesARMV83A(Opts, Builder);
575   else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
576     getTargetDefinesARMV84A(Opts, Builder);
577   else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
578     getTargetDefinesARMV85A(Opts, Builder);
579   else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
580     getTargetDefinesARMV86A(Opts, Builder);
581   else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
582     getTargetDefinesARMV87A(Opts, Builder);
583   else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
584     getTargetDefinesARMV88A(Opts, Builder);
585   else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
586     getTargetDefinesARMV89A(Opts, Builder);
587   else if (*ArchInfo == llvm::AArch64::ARMV9A)
588     getTargetDefinesARMV9A(Opts, Builder);
589   else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
590     getTargetDefinesARMV91A(Opts, Builder);
591   else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
592     getTargetDefinesARMV92A(Opts, Builder);
593   else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
594     getTargetDefinesARMV93A(Opts, Builder);
595   else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
596     getTargetDefinesARMV94A(Opts, Builder);
597   else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
598     getTargetDefinesARMV95A(Opts, Builder);
599 
600   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
601   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
602   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
603   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
604   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
605   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
606 
607   // Allow detection of fast FMA support.
608   Builder.defineMacro("__FP_FAST_FMA", "1");
609   Builder.defineMacro("__FP_FAST_FMAF", "1");
610 
611   // C/C++ operators work on both VLS and VLA SVE types
612   if (FPU & SveMode)
613     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
614 
615   if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
616     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
617   }
618 }
619 
620 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
621   return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
622                                          Builtin::FirstTSBuiltin);
623 }
624 
625 std::optional<std::pair<unsigned, unsigned>>
626 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
627   if (LangOpts.VScaleMin || LangOpts.VScaleMax)
628     return std::pair<unsigned, unsigned>(
629         LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
630 
631   if (hasFeature("sve"))
632     return std::pair<unsigned, unsigned>(1, 16);
633 
634   return std::nullopt;
635 }
636 
637 unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
638   if (Name == "default")
639     return 0;
640   if (auto Ext = llvm::AArch64::parseArchExtension(Name))
641     return Ext->FmvPriority;
642   return 0;
643 }
644 
645 unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
646   // Take the maximum priority as per feature cost, so more features win.
647   return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
648 }
649 
650 bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
651   if (auto Ext = llvm::AArch64::parseArchExtension(Name))
652     return !Ext->DependentFeatures.empty();
653   return false;
654 }
655 
656 StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
657   if (auto Ext = llvm::AArch64::parseArchExtension(Name))
658     return Ext->DependentFeatures;
659   return StringRef();
660 }
661 
662 bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
663   return llvm::AArch64::parseArchExtension(FeatureStr).has_value();
664 }
665 
666 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
667   return llvm::StringSwitch<bool>(Feature)
668       .Cases("aarch64", "arm64", "arm", true)
669       .Case("fmv", HasFMV)
670       .Cases("neon", "fp", "simd", FPU & NeonMode)
671       .Case("jscvt", HasJSCVT)
672       .Case("fcma", HasFCMA)
673       .Case("rng", HasRandGen)
674       .Case("flagm", HasFlagM)
675       .Case("flagm2", HasAlternativeNZCV)
676       .Case("fp16fml", HasFP16FML)
677       .Case("dotprod", HasDotProd)
678       .Case("sm4", HasSM4)
679       .Case("rdm", HasRDM)
680       .Case("lse", HasLSE)
681       .Case("crc", HasCRC)
682       .Case("sha2", HasSHA2)
683       .Case("sha3", HasSHA3)
684       .Cases("aes", "pmull", HasAES)
685       .Cases("fp16", "fullfp16", HasFullFP16)
686       .Case("dit", HasDIT)
687       .Case("dpb", HasCCPP)
688       .Case("dpb2", HasCCDP)
689       .Case("rcpc", HasRCPC)
690       .Case("frintts", HasFRInt3264)
691       .Case("i8mm", HasMatMul)
692       .Case("bf16", HasBFloat16)
693       .Case("sve", FPU & SveMode)
694       .Case("sve-bf16", FPU & SveMode && HasBFloat16)
695       .Case("sve-i8mm", FPU & SveMode && HasMatMul)
696       .Case("f32mm", FPU & SveMode && HasMatmulFP32)
697       .Case("f64mm", FPU & SveMode && HasMatmulFP64)
698       .Case("sve2", FPU & SveMode && HasSVE2)
699       .Case("sve2-pmull128", FPU & SveMode && HasSVE2AES)
700       .Case("sve2-bitperm", FPU & SveMode && HasSVE2BitPerm)
701       .Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
702       .Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
703       .Case("sme", HasSME)
704       .Case("sme2", HasSME2)
705       .Case("sme-f64f64", HasSMEF64F64)
706       .Case("sme-i16i64", HasSMEI16I64)
707       .Case("sme-fa64", HasSMEFA64)
708       .Cases("memtag", "memtag2", HasMTE)
709       .Case("sb", HasSB)
710       .Case("predres", HasPredRes)
711       .Cases("ssbs", "ssbs2", HasSSBS)
712       .Case("bti", HasBTI)
713       .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
714       .Case("wfxt", HasWFxT)
715       .Case("rcpc3", HasRCPC3)
716       .Default(false);
717 }
718 
719 void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
720                                           StringRef Name, bool Enabled) const {
721   Features[Name] = Enabled;
722   // If the feature is an architecture feature (like v8.2a), add all previous
723   // architecture versions and any dependant target features.
724   const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
725       llvm::AArch64::ArchInfo::findBySubArch(Name);
726 
727   if (!ArchInfo)
728     return; // Not an architecture, nothing more to do.
729 
730   // Disabling an architecture feature does not affect dependent features
731   if (!Enabled)
732     return;
733 
734   for (const auto *OtherArch : llvm::AArch64::ArchInfos)
735     if (ArchInfo->implies(*OtherArch))
736       Features[OtherArch->getSubArch()] = true;
737 
738   // Set any features implied by the architecture
739   std::vector<StringRef> CPUFeats;
740   if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
741     for (auto F : CPUFeats) {
742       assert(F[0] == '+' && "Expected + in target feature!");
743       Features[F.drop_front(1)] = true;
744     }
745   }
746 }
747 
748 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
749                                              DiagnosticsEngine &Diags) {
750   for (const auto &Feature : Features) {
751     if (Feature == "-fp-armv8")
752       HasNoFP = true;
753     if (Feature == "-neon")
754       HasNoNeon = true;
755     if (Feature == "-sve")
756       HasNoSVE = true;
757 
758     if (Feature == "+neon" || Feature == "+fp-armv8")
759       FPU |= NeonMode;
760     if (Feature == "+jscvt") {
761       HasJSCVT = true;
762       FPU |= NeonMode;
763     }
764     if (Feature == "+fcma") {
765       HasFCMA = true;
766       FPU |= NeonMode;
767     }
768 
769     if (Feature == "+sve") {
770       FPU |= NeonMode;
771       FPU |= SveMode;
772       HasFullFP16 = true;
773     }
774     if (Feature == "+sve2") {
775       FPU |= NeonMode;
776       FPU |= SveMode;
777       HasFullFP16 = true;
778       HasSVE2 = true;
779     }
780     if (Feature == "+sve2-aes") {
781       FPU |= NeonMode;
782       FPU |= SveMode;
783       HasFullFP16 = true;
784       HasSVE2 = true;
785       HasSVE2AES = true;
786     }
787     if (Feature == "+sve2-sha3") {
788       FPU |= NeonMode;
789       FPU |= SveMode;
790       HasFullFP16 = true;
791       HasSVE2 = true;
792       HasSVE2SHA3 = true;
793     }
794     if (Feature == "+sve2-sm4") {
795       FPU |= NeonMode;
796       FPU |= SveMode;
797       HasFullFP16 = true;
798       HasSVE2 = true;
799       HasSVE2SM4 = true;
800     }
801     if (Feature == "+sve2-bitperm") {
802       FPU |= NeonMode;
803       FPU |= SveMode;
804       HasFullFP16 = true;
805       HasSVE2 = true;
806       HasSVE2BitPerm = true;
807     }
808     if (Feature == "+f32mm") {
809       FPU |= NeonMode;
810       FPU |= SveMode;
811       HasFullFP16 = true;
812       HasMatmulFP32 = true;
813     }
814     if (Feature == "+f64mm") {
815       FPU |= NeonMode;
816       FPU |= SveMode;
817       HasFullFP16 = true;
818       HasMatmulFP64 = true;
819     }
820     if (Feature == "+sme") {
821       HasSME = true;
822       HasBFloat16 = true;
823       HasFullFP16 = true;
824     }
825     if (Feature == "+sme2") {
826       HasSME = true;
827       HasSME2 = true;
828       HasBFloat16 = true;
829       HasFullFP16 = true;
830     }
831     if (Feature == "+sme-f64f64") {
832       HasSME = true;
833       HasSMEF64F64 = true;
834       HasBFloat16 = true;
835       HasFullFP16 = true;
836     }
837     if (Feature == "+sme-i16i64") {
838       HasSME = true;
839       HasSMEI16I64 = true;
840       HasBFloat16 = true;
841       HasFullFP16 = true;
842     }
843     if (Feature == "+sme-fa64") {
844       FPU |= NeonMode;
845       FPU |= SveMode;
846       HasSME = true;
847       HasSVE2 = true;
848       HasSMEFA64 = true;
849     }
850     if (Feature == "+sb")
851       HasSB = true;
852     if (Feature == "+predres")
853       HasPredRes = true;
854     if (Feature == "+ssbs")
855       HasSSBS = true;
856     if (Feature == "+bti")
857       HasBTI = true;
858     if (Feature == "+wfxt")
859       HasWFxT = true;
860     if (Feature == "-fmv")
861       HasFMV = false;
862     if (Feature == "+crc")
863       HasCRC = true;
864     if (Feature == "+rcpc")
865       HasRCPC = true;
866     if (Feature == "+aes") {
867       FPU |= NeonMode;
868       HasAES = true;
869     }
870     if (Feature == "+sha2") {
871       FPU |= NeonMode;
872       HasSHA2 = true;
873     }
874     if (Feature == "+sha3") {
875       FPU |= NeonMode;
876       HasSHA2 = true;
877       HasSHA3 = true;
878     }
879     if (Feature == "+rdm") {
880       FPU |= NeonMode;
881       HasRDM = true;
882     }
883     if (Feature == "+dit")
884       HasDIT = true;
885     if (Feature == "+cccp")
886       HasCCPP = true;
887     if (Feature == "+ccdp") {
888       HasCCPP = true;
889       HasCCDP = true;
890     }
891     if (Feature == "+fptoint")
892       HasFRInt3264 = true;
893     if (Feature == "+sm4") {
894       FPU |= NeonMode;
895       HasSM4 = true;
896     }
897     if (Feature == "+strict-align")
898       HasUnaligned = false;
899     // All predecessor archs are added but select the latest one for ArchKind.
900     if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
901       ArchInfo = &llvm::AArch64::ARMV8A;
902     if (Feature == "+v8.1a" &&
903         ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
904       ArchInfo = &llvm::AArch64::ARMV8_1A;
905     if (Feature == "+v8.2a" &&
906         ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
907       ArchInfo = &llvm::AArch64::ARMV8_2A;
908     if (Feature == "+v8.3a" &&
909         ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
910       ArchInfo = &llvm::AArch64::ARMV8_3A;
911     if (Feature == "+v8.4a" &&
912         ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
913       ArchInfo = &llvm::AArch64::ARMV8_4A;
914     if (Feature == "+v8.5a" &&
915         ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
916       ArchInfo = &llvm::AArch64::ARMV8_5A;
917     if (Feature == "+v8.6a" &&
918         ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
919       ArchInfo = &llvm::AArch64::ARMV8_6A;
920     if (Feature == "+v8.7a" &&
921         ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
922       ArchInfo = &llvm::AArch64::ARMV8_7A;
923     if (Feature == "+v8.8a" &&
924         ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
925       ArchInfo = &llvm::AArch64::ARMV8_8A;
926     if (Feature == "+v8.9a" &&
927         ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
928       ArchInfo = &llvm::AArch64::ARMV8_9A;
929     if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
930       ArchInfo = &llvm::AArch64::ARMV9A;
931     if (Feature == "+v9.1a" &&
932         ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
933       ArchInfo = &llvm::AArch64::ARMV9_1A;
934     if (Feature == "+v9.2a" &&
935         ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
936       ArchInfo = &llvm::AArch64::ARMV9_2A;
937     if (Feature == "+v9.3a" &&
938         ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
939       ArchInfo = &llvm::AArch64::ARMV9_3A;
940     if (Feature == "+v9.4a" &&
941         ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
942       ArchInfo = &llvm::AArch64::ARMV9_4A;
943     if (Feature == "+v9.5a" &&
944         ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
945       ArchInfo = &llvm::AArch64::ARMV9_5A;
946     if (Feature == "+v8r")
947       ArchInfo = &llvm::AArch64::ARMV8R;
948     if (Feature == "+fullfp16") {
949       FPU |= NeonMode;
950       HasFullFP16 = true;
951     }
952     if (Feature == "+dotprod") {
953       FPU |= NeonMode;
954       HasDotProd = true;
955     }
956     if (Feature == "+fp16fml") {
957       FPU |= NeonMode;
958       HasFullFP16 = true;
959       HasFP16FML = true;
960     }
961     if (Feature == "+mte")
962       HasMTE = true;
963     if (Feature == "+tme")
964       HasTME = true;
965     if (Feature == "+pauth")
966       HasPAuth = true;
967     if (Feature == "+i8mm")
968       HasMatMul = true;
969     if (Feature == "+bf16")
970       HasBFloat16 = true;
971     if (Feature == "+lse")
972       HasLSE = true;
973     if (Feature == "+ls64")
974       HasLS64 = true;
975     if (Feature == "+rand")
976       HasRandGen = true;
977     if (Feature == "+flagm")
978       HasFlagM = true;
979     if (Feature == "+altnzcv") {
980       HasFlagM = true;
981       HasAlternativeNZCV = true;
982     }
983     if (Feature == "+mops")
984       HasMOPS = true;
985     if (Feature == "+d128")
986       HasD128 = true;
987     if (Feature == "+gcs")
988       HasGCS = true;
989     if (Feature == "+rcpc3")
990       HasRCPC3 = true;
991   }
992 
993   // Check features that are manually disabled by command line options.
994   // This needs to be checked after architecture-related features are handled,
995   // making sure they are properly disabled when required.
996   for (const auto &Feature : Features) {
997     if (Feature == "-d128")
998       HasD128 = false;
999   }
1000 
1001   setDataLayout();
1002   setArchFeatures();
1003 
1004   if (HasNoFP) {
1005     FPU &= ~FPUMode;
1006     FPU &= ~NeonMode;
1007     FPU &= ~SveMode;
1008   }
1009   if (HasNoNeon) {
1010     FPU &= ~NeonMode;
1011     FPU &= ~SveMode;
1012   }
1013   if (HasNoSVE)
1014     FPU &= ~SveMode;
1015 
1016   return true;
1017 }
1018 
1019 bool AArch64TargetInfo::initFeatureMap(
1020     llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
1021     const std::vector<std::string> &FeaturesVec) const {
1022   std::vector<std::string> UpdatedFeaturesVec;
1023   // Parse the CPU and add any implied features.
1024   std::optional<llvm::AArch64::CpuInfo> CpuInfo = llvm::AArch64::parseCpu(CPU);
1025   if (CpuInfo) {
1026     auto Exts = CpuInfo->getImpliedExtensions();
1027     std::vector<StringRef> CPUFeats;
1028     llvm::AArch64::getExtensionFeatures(Exts, CPUFeats);
1029     for (auto F : CPUFeats) {
1030       assert((F[0] == '+' || F[0] == '-') && "Expected +/- in target feature!");
1031       UpdatedFeaturesVec.push_back(F.str());
1032     }
1033   }
1034 
1035   // Process target and dependent features. This is done in two loops collecting
1036   // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
1037   // add target '+/-'features that can later disable some of features added on
1038   // the first loop. Function Multi Versioning features begin with '?'.
1039   for (const auto &Feature : FeaturesVec)
1040     if (((Feature[0] == '?' || Feature[0] == '+')) &&
1041         AArch64TargetInfo::doesFeatureAffectCodeGen(Feature.substr(1))) {
1042       StringRef DepFeatures =
1043           AArch64TargetInfo::getFeatureDependencies(Feature.substr(1));
1044       SmallVector<StringRef, 1> AttrFeatures;
1045       DepFeatures.split(AttrFeatures, ",");
1046       for (auto F : AttrFeatures)
1047         UpdatedFeaturesVec.push_back(F.str());
1048     }
1049   for (const auto &Feature : FeaturesVec)
1050     if (Feature[0] != '?') {
1051       std::string UpdatedFeature = Feature;
1052       if (Feature[0] == '+') {
1053         std::optional<llvm::AArch64::ExtensionInfo> Extension =
1054           llvm::AArch64::parseArchExtension(Feature.substr(1));
1055         if (Extension)
1056           UpdatedFeature = Extension->Feature.str();
1057       }
1058       UpdatedFeaturesVec.push_back(UpdatedFeature);
1059     }
1060 
1061   return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
1062 }
1063 
1064 // Parse AArch64 Target attributes, which are a comma separated list of:
1065 //  "arch=<arch>" - parsed to features as per -march=..
1066 //  "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1067 //  "tune=<cpu>" - TuneCPU set to <cpu>
1068 //  "feature", "no-feature" - Add (or remove) feature.
1069 //  "+feature", "+nofeature" - Add (or remove) feature.
1070 ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1071   ParsedTargetAttr Ret;
1072   if (Features == "default")
1073     return Ret;
1074   SmallVector<StringRef, 1> AttrFeatures;
1075   Features.split(AttrFeatures, ",");
1076   bool FoundArch = false;
1077 
1078   auto SplitAndAddFeatures = [](StringRef FeatString,
1079                                 std::vector<std::string> &Features) {
1080     SmallVector<StringRef, 8> SplitFeatures;
1081     FeatString.split(SplitFeatures, StringRef("+"), -1, false);
1082     for (StringRef Feature : SplitFeatures) {
1083       StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1084       if (!FeatureName.empty())
1085         Features.push_back(FeatureName.str());
1086       else
1087         // Pushing the original feature string to give a sema error later on
1088         // when they get checked.
1089         if (Feature.starts_with("no"))
1090           Features.push_back("-" + Feature.drop_front(2).str());
1091         else
1092           Features.push_back("+" + Feature.str());
1093     }
1094   };
1095 
1096   for (auto &Feature : AttrFeatures) {
1097     Feature = Feature.trim();
1098     if (Feature.starts_with("fpmath="))
1099       continue;
1100 
1101     if (Feature.starts_with("branch-protection=")) {
1102       Ret.BranchProtection = Feature.split('=').second.trim();
1103       continue;
1104     }
1105 
1106     if (Feature.starts_with("arch=")) {
1107       if (FoundArch)
1108         Ret.Duplicate = "arch=";
1109       FoundArch = true;
1110       std::pair<StringRef, StringRef> Split =
1111           Feature.split("=").second.trim().split("+");
1112       const llvm::AArch64::ArchInfo *AI = llvm::AArch64::parseArch(Split.first);
1113 
1114       // Parse the architecture version, adding the required features to
1115       // Ret.Features.
1116       if (!AI)
1117         continue;
1118       Ret.Features.push_back(AI->ArchFeature.str());
1119       // Add any extra features, after the +
1120       SplitAndAddFeatures(Split.second, Ret.Features);
1121     } else if (Feature.starts_with("cpu=")) {
1122       if (!Ret.CPU.empty())
1123         Ret.Duplicate = "cpu=";
1124       else {
1125         // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1126         // "+feat" features.
1127         std::pair<StringRef, StringRef> Split =
1128             Feature.split("=").second.trim().split("+");
1129         Ret.CPU = Split.first;
1130         SplitAndAddFeatures(Split.second, Ret.Features);
1131       }
1132     } else if (Feature.starts_with("tune=")) {
1133       if (!Ret.Tune.empty())
1134         Ret.Duplicate = "tune=";
1135       else
1136         Ret.Tune = Feature.split("=").second.trim();
1137     } else if (Feature.starts_with("+")) {
1138       SplitAndAddFeatures(Feature, Ret.Features);
1139     } else if (Feature.starts_with("no-")) {
1140       StringRef FeatureName =
1141           llvm::AArch64::getArchExtFeature(Feature.split("-").second);
1142       if (!FeatureName.empty())
1143         Ret.Features.push_back("-" + FeatureName.drop_front(1).str());
1144       else
1145         Ret.Features.push_back("-" + Feature.split("-").second.str());
1146     } else {
1147       // Try parsing the string to the internal target feature name. If it is
1148       // invalid, add the original string (which could already be an internal
1149       // name). These should be checked later by isValidFeatureName.
1150       StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1151       if (!FeatureName.empty())
1152         Ret.Features.push_back(FeatureName.str());
1153       else
1154         Ret.Features.push_back("+" + Feature.str());
1155     }
1156   }
1157   return Ret;
1158 }
1159 
1160 bool AArch64TargetInfo::hasBFloat16Type() const {
1161   return true;
1162 }
1163 
1164 TargetInfo::CallingConvCheckResult
1165 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1166   switch (CC) {
1167   case CC_C:
1168   case CC_Swift:
1169   case CC_SwiftAsync:
1170   case CC_PreserveMost:
1171   case CC_PreserveAll:
1172   case CC_OpenCLKernel:
1173   case CC_AArch64VectorCall:
1174   case CC_AArch64SVEPCS:
1175   case CC_Win64:
1176     return CCCR_OK;
1177   default:
1178     return CCCR_Warning;
1179   }
1180 }
1181 
1182 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1183 
1184 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1185   return TargetInfo::AArch64ABIBuiltinVaList;
1186 }
1187 
1188 const char *const AArch64TargetInfo::GCCRegNames[] = {
1189     // clang-format off
1190 
1191     // 32-bit Integer registers
1192     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1193     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1194     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1195 
1196     // 64-bit Integer registers
1197     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1198     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1199     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1200 
1201     // 32-bit floating point regsisters
1202     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1203     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1204     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1205 
1206     // 64-bit floating point regsisters
1207     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1208     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1209     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1210 
1211     // Neon vector registers
1212     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1213     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1214     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1215 
1216     // SVE vector registers
1217     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
1218     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1219     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1220 
1221     // SVE predicate registers
1222     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
1223     "p11", "p12", "p13", "p14", "p15",
1224 
1225     // SVE predicate-as-counter registers
1226     "pn0",  "pn1",  "pn2",  "pn3",  "pn4",  "pn5",  "pn6",  "pn7",  "pn8",
1227     "pn9",  "pn10", "pn11", "pn12", "pn13", "pn14", "pn15",
1228 
1229     // SME registers
1230     "za", "zt0",
1231 
1232     // clang-format on
1233 };
1234 
1235 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1236   return llvm::ArrayRef(GCCRegNames);
1237 }
1238 
1239 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1240     {{"w31"}, "wsp"},
1241     {{"x31"}, "sp"},
1242     // GCC rN registers are aliases of xN registers.
1243     {{"r0"}, "x0"},
1244     {{"r1"}, "x1"},
1245     {{"r2"}, "x2"},
1246     {{"r3"}, "x3"},
1247     {{"r4"}, "x4"},
1248     {{"r5"}, "x5"},
1249     {{"r6"}, "x6"},
1250     {{"r7"}, "x7"},
1251     {{"r8"}, "x8"},
1252     {{"r9"}, "x9"},
1253     {{"r10"}, "x10"},
1254     {{"r11"}, "x11"},
1255     {{"r12"}, "x12"},
1256     {{"r13"}, "x13"},
1257     {{"r14"}, "x14"},
1258     {{"r15"}, "x15"},
1259     {{"r16"}, "x16"},
1260     {{"r17"}, "x17"},
1261     {{"r18"}, "x18"},
1262     {{"r19"}, "x19"},
1263     {{"r20"}, "x20"},
1264     {{"r21"}, "x21"},
1265     {{"r22"}, "x22"},
1266     {{"r23"}, "x23"},
1267     {{"r24"}, "x24"},
1268     {{"r25"}, "x25"},
1269     {{"r26"}, "x26"},
1270     {{"r27"}, "x27"},
1271     {{"r28"}, "x28"},
1272     {{"r29", "x29"}, "fp"},
1273     {{"r30", "x30"}, "lr"},
1274     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1275     // don't want to substitute one of these for a different-sized one.
1276 };
1277 
1278 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1279   return llvm::ArrayRef(GCCRegAliases);
1280 }
1281 
1282 // Returns the length of cc constraint.
1283 static unsigned matchAsmCCConstraint(const char *Name) {
1284   constexpr unsigned len = 5;
1285   auto RV = llvm::StringSwitch<unsigned>(Name)
1286                 .Case("@cceq", len)
1287                 .Case("@ccne", len)
1288                 .Case("@cchs", len)
1289                 .Case("@cccs", len)
1290                 .Case("@cccc", len)
1291                 .Case("@cclo", len)
1292                 .Case("@ccmi", len)
1293                 .Case("@ccpl", len)
1294                 .Case("@ccvs", len)
1295                 .Case("@ccvc", len)
1296                 .Case("@cchi", len)
1297                 .Case("@ccls", len)
1298                 .Case("@ccge", len)
1299                 .Case("@cclt", len)
1300                 .Case("@ccgt", len)
1301                 .Case("@ccle", len)
1302                 .Default(0);
1303   return RV;
1304 }
1305 
1306 std::string
1307 AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1308   std::string R;
1309   switch (*Constraint) {
1310   case 'U': // Three-character constraint; add "@3" hint for later parsing.
1311     R = std::string("@3") + std::string(Constraint, 3);
1312     Constraint += 2;
1313     break;
1314   case '@':
1315     if (const unsigned Len = matchAsmCCConstraint(Constraint)) {
1316       std::string Converted = "{" + std::string(Constraint, Len) + "}";
1317       Constraint += Len - 1;
1318       return Converted;
1319     }
1320     return std::string(1, *Constraint);
1321   default:
1322     R = TargetInfo::convertConstraint(Constraint);
1323     break;
1324   }
1325   return R;
1326 }
1327 
1328 bool AArch64TargetInfo::validateAsmConstraint(
1329     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1330   switch (*Name) {
1331   default:
1332     return false;
1333   case 'w': // Floating point and SIMD registers (V0-V31)
1334     Info.setAllowsRegister();
1335     return true;
1336   case 'I': // Constant that can be used with an ADD instruction
1337   case 'J': // Constant that can be used with a SUB instruction
1338   case 'K': // Constant that can be used with a 32-bit logical instruction
1339   case 'L': // Constant that can be used with a 64-bit logical instruction
1340   case 'M': // Constant that can be used as a 32-bit MOV immediate
1341   case 'N': // Constant that can be used as a 64-bit MOV immediate
1342   case 'Y': // Floating point constant zero
1343   case 'Z': // Integer constant zero
1344     return true;
1345   case 'Q': // A memory reference with base register and no offset
1346     Info.setAllowsMemory();
1347     return true;
1348   case 'S': // A symbolic address
1349     Info.setAllowsRegister();
1350     return true;
1351   case 'U':
1352     if (Name[1] == 'p' &&
1353         (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
1354       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
1355       Info.setAllowsRegister();
1356       Name += 2;
1357       return true;
1358     }
1359     if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
1360       // Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
1361       Info.setAllowsRegister();
1362       Name += 2;
1363       return true;
1364     }
1365     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1366     // Utf: A memory address suitable for ldp/stp in TF mode.
1367     // Usa: An absolute symbolic address.
1368     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1369 
1370     // Better to return an error saying that it's an unrecognised constraint
1371     // even if this is a valid constraint in gcc.
1372     return false;
1373   case 'z': // Zero register, wzr or xzr
1374     Info.setAllowsRegister();
1375     return true;
1376   case 'x': // Floating point and SIMD registers (V0-V15)
1377     Info.setAllowsRegister();
1378     return true;
1379   case 'y': // SVE registers (V0-V7)
1380     Info.setAllowsRegister();
1381     return true;
1382   case '@':
1383     // CC condition
1384     if (const unsigned Len = matchAsmCCConstraint(Name)) {
1385       Name += Len - 1;
1386       Info.setAllowsRegister();
1387       return true;
1388     }
1389   }
1390   return false;
1391 }
1392 
1393 bool AArch64TargetInfo::validateConstraintModifier(
1394     StringRef Constraint, char Modifier, unsigned Size,
1395     std::string &SuggestedModifier) const {
1396   // Strip off constraint modifiers.
1397   Constraint = Constraint.ltrim("=+&");
1398 
1399   switch (Constraint[0]) {
1400   default:
1401     return true;
1402   case 'z':
1403   case 'r': {
1404     switch (Modifier) {
1405     case 'x':
1406     case 'w':
1407       // For now assume that the person knows what they're
1408       // doing with the modifier.
1409       return true;
1410     default:
1411       // By default an 'r' constraint will be in the 'x'
1412       // registers.
1413       if (Size == 64)
1414         return true;
1415 
1416       if (Size == 512)
1417         return HasLS64;
1418 
1419       SuggestedModifier = "w";
1420       return false;
1421     }
1422   }
1423   }
1424 }
1425 
1426 std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1427 
1428 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1429   if (RegNo == 0)
1430     return 0;
1431   if (RegNo == 1)
1432     return 1;
1433   return -1;
1434 }
1435 
1436 bool AArch64TargetInfo::hasInt128Type() const { return true; }
1437 
1438 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1439                                          const TargetOptions &Opts)
1440     : AArch64TargetInfo(Triple, Opts) {}
1441 
1442 void AArch64leTargetInfo::setDataLayout() {
1443   if (getTriple().isOSBinFormatMachO()) {
1444     if(getTriple().isArch32Bit())
1445       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
1446     else
1447       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
1448   } else
1449     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1450 }
1451 
1452 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1453                                            MacroBuilder &Builder) const {
1454   Builder.defineMacro("__AARCH64EL__");
1455   AArch64TargetInfo::getTargetDefines(Opts, Builder);
1456 }
1457 
1458 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1459                                          const TargetOptions &Opts)
1460     : AArch64TargetInfo(Triple, Opts) {}
1461 
1462 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1463                                            MacroBuilder &Builder) const {
1464   Builder.defineMacro("__AARCH64EB__");
1465   Builder.defineMacro("__AARCH_BIG_ENDIAN");
1466   Builder.defineMacro("__ARM_BIG_ENDIAN");
1467   AArch64TargetInfo::getTargetDefines(Opts, Builder);
1468 }
1469 
1470 void AArch64beTargetInfo::setDataLayout() {
1471   assert(!getTriple().isOSBinFormatMachO());
1472   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1473 }
1474 
1475 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1476                                                const TargetOptions &Opts)
1477     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1478 
1479   // This is an LLP64 platform.
1480   // int:4, long:4, long long:8, long double:8.
1481   IntWidth = IntAlign = 32;
1482   LongWidth = LongAlign = 32;
1483   DoubleAlign = LongLongAlign = 64;
1484   LongDoubleWidth = LongDoubleAlign = 64;
1485   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1486   IntMaxType = SignedLongLong;
1487   Int64Type = SignedLongLong;
1488   SizeType = UnsignedLongLong;
1489   PtrDiffType = SignedLongLong;
1490   IntPtrType = SignedLongLong;
1491 }
1492 
1493 void WindowsARM64TargetInfo::setDataLayout() {
1494   resetDataLayout(Triple.isOSBinFormatMachO()
1495                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
1496                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
1497                   Triple.isOSBinFormatMachO() ? "_" : "");
1498 }
1499 
1500 TargetInfo::BuiltinVaListKind
1501 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1502   return TargetInfo::CharPtrBuiltinVaList;
1503 }
1504 
1505 TargetInfo::CallingConvCheckResult
1506 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1507   switch (CC) {
1508   case CC_X86StdCall:
1509   case CC_X86ThisCall:
1510   case CC_X86FastCall:
1511   case CC_X86VectorCall:
1512     return CCCR_Ignore;
1513   case CC_C:
1514   case CC_OpenCLKernel:
1515   case CC_PreserveMost:
1516   case CC_PreserveAll:
1517   case CC_Swift:
1518   case CC_SwiftAsync:
1519   case CC_Win64:
1520     return CCCR_OK;
1521   default:
1522     return CCCR_Warning;
1523   }
1524 }
1525 
1526 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1527                                                    const TargetOptions &Opts)
1528     : WindowsARM64TargetInfo(Triple, Opts) {
1529   TheCXXABI.set(TargetCXXABI::Microsoft);
1530 }
1531 
1532 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1533                                                 MacroBuilder &Builder) const {
1534   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1535   if (getTriple().isWindowsArm64EC()) {
1536     Builder.defineMacro("_M_X64", "100");
1537     Builder.defineMacro("_M_AMD64", "100");
1538     Builder.defineMacro("_M_ARM64EC", "1");
1539   } else {
1540     Builder.defineMacro("_M_ARM64", "1");
1541   }
1542 }
1543 
1544 TargetInfo::CallingConvKind
1545 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1546   return CCK_MicrosoftWin64;
1547 }
1548 
1549 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
1550   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
1551 
1552   // MSVC does size based alignment for arm64 based on alignment section in
1553   // below document, replicate that to keep alignment consistent with object
1554   // files compiled by MSVC.
1555   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1556   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
1557     Align = std::max(Align, 128u);    // align type at least 16 bytes
1558   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
1559     Align = std::max(Align, 64u);     // align type at least 8 butes
1560   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
1561     Align = std::max(Align, 32u);     // align type at least 4 bytes
1562   }
1563   return Align;
1564 }
1565 
1566 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1567                                            const TargetOptions &Opts)
1568     : WindowsARM64TargetInfo(Triple, Opts) {
1569   TheCXXABI.set(TargetCXXABI::GenericAArch64);
1570 }
1571 
1572 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1573                                                  const TargetOptions &Opts)
1574     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1575   Int64Type = SignedLongLong;
1576   if (getTriple().isArch32Bit())
1577     IntMaxType = SignedLongLong;
1578 
1579   WCharType = SignedInt;
1580   UseSignedCharForObjCBool = false;
1581 
1582   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1583   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1584 
1585   UseZeroLengthBitfieldAlignment = false;
1586 
1587   if (getTriple().isArch32Bit()) {
1588     UseBitFieldTypeAlignment = false;
1589     ZeroLengthBitfieldBoundary = 32;
1590     UseZeroLengthBitfieldAlignment = true;
1591     TheCXXABI.set(TargetCXXABI::WatchOS);
1592   } else
1593     TheCXXABI.set(TargetCXXABI::AppleARM64);
1594 }
1595 
1596 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1597                                            const llvm::Triple &Triple,
1598                                            MacroBuilder &Builder) const {
1599   Builder.defineMacro("__AARCH64_SIMD__");
1600   if (Triple.isArch32Bit())
1601     Builder.defineMacro("__ARM64_ARCH_8_32__");
1602   else
1603     Builder.defineMacro("__ARM64_ARCH_8__");
1604   Builder.defineMacro("__ARM_NEON__");
1605   Builder.defineMacro("__REGISTER_PREFIX__", "");
1606   Builder.defineMacro("__arm64", "1");
1607   Builder.defineMacro("__arm64__", "1");
1608 
1609   if (Triple.isArm64e())
1610     Builder.defineMacro("__arm64e__", "1");
1611 
1612   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1613 }
1614 
1615 TargetInfo::BuiltinVaListKind
1616 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1617   return TargetInfo::CharPtrBuiltinVaList;
1618 }
1619 
1620 // 64-bit RenderScript is aarch64
1621 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1622                                                    const TargetOptions &Opts)
1623     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1624                                        Triple.getOSName(),
1625                                        Triple.getEnvironmentName()),
1626                           Opts) {
1627   IsRenderScriptTarget = true;
1628 }
1629 
1630 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1631                                                 MacroBuilder &Builder) const {
1632   Builder.defineMacro("__RENDERSCRIPT__");
1633   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1634 }
1635