xref: /freebsd/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp (revision 56727255ad47072ec2cc81b4ae728a099697b0e4)
1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/TargetParser/AArch64TargetParser.h"
21 #include "llvm/TargetParser/ARMTargetParserCommon.h"
22 #include <optional>
23 
24 using namespace clang;
25 using namespace clang::targets;
26 
27 static constexpr Builtin::Info BuiltinInfo[] = {
28 #define BUILTIN(ID, TYPE, ATTRS)                                               \
29   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
30 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
31   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32 #include "clang/Basic/BuiltinsNEON.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
36 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
37   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38 #include "clang/Basic/BuiltinsSVE.def"
39 
40 #define BUILTIN(ID, TYPE, ATTRS)                                               \
41   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
42 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
43   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
44 #include "clang/Basic/BuiltinsSME.def"
45 
46 #define BUILTIN(ID, TYPE, ATTRS)                                               \
47   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
48 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
49   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
50 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
51   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
52 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
53   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
54 #include "clang/Basic/BuiltinsAArch64.def"
55 };
56 
57 void AArch64TargetInfo::setArchFeatures() {
58   if (*ArchInfo == llvm::AArch64::ARMV8R) {
59     HasDotProd = true;
60     HasDIT = true;
61     HasFlagM = true;
62     HasRCPC = true;
63     FPU |= NeonMode;
64     HasCCPP = true;
65     HasCRC = true;
66     HasLSE = true;
67     HasRDM = true;
68   } else if (ArchInfo->Version.getMajor() == 8) {
69     if (ArchInfo->Version.getMinor() >= 7u) {
70       HasWFxT = true;
71     }
72     if (ArchInfo->Version.getMinor() >= 6u) {
73       HasBFloat16 = true;
74       HasMatMul = true;
75     }
76     if (ArchInfo->Version.getMinor() >= 5u) {
77       HasAlternativeNZCV = true;
78       HasFRInt3264 = true;
79       HasSSBS = true;
80       HasSB = true;
81       HasPredRes = true;
82       HasBTI = true;
83     }
84     if (ArchInfo->Version.getMinor() >= 4u) {
85       HasDotProd = true;
86       HasDIT = true;
87       HasFlagM = true;
88     }
89     if (ArchInfo->Version.getMinor() >= 3u) {
90       HasRCPC = true;
91       FPU |= NeonMode;
92     }
93     if (ArchInfo->Version.getMinor() >= 2u) {
94       HasCCPP = true;
95     }
96     if (ArchInfo->Version.getMinor() >= 1u) {
97       HasCRC = true;
98       HasLSE = true;
99       HasRDM = true;
100     }
101   } else if (ArchInfo->Version.getMajor() == 9) {
102     if (ArchInfo->Version.getMinor() >= 2u) {
103       HasWFxT = true;
104     }
105     if (ArchInfo->Version.getMinor() >= 1u) {
106       HasBFloat16 = true;
107       HasMatMul = true;
108     }
109     FPU |= SveMode;
110     HasSVE2 = true;
111     HasFullFP16 = true;
112     HasAlternativeNZCV = true;
113     HasFRInt3264 = true;
114     HasSSBS = true;
115     HasSB = true;
116     HasPredRes = true;
117     HasBTI = true;
118     HasDotProd = true;
119     HasDIT = true;
120     HasFlagM = true;
121     HasRCPC = true;
122     FPU |= NeonMode;
123     HasCCPP = true;
124     HasCRC = true;
125     HasLSE = true;
126     HasRDM = true;
127   }
128 }
129 
130 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
131                                      const TargetOptions &Opts)
132     : TargetInfo(Triple), ABI("aapcs") {
133   if (getTriple().isOSOpenBSD()) {
134     Int64Type = SignedLongLong;
135     IntMaxType = SignedLongLong;
136   } else {
137     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
138       WCharType = UnsignedInt;
139 
140     Int64Type = SignedLong;
141     IntMaxType = SignedLong;
142   }
143 
144   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
145   HasLegalHalfType = true;
146   HalfArgsAndReturns = true;
147   HasFloat16 = true;
148   HasStrictFP = true;
149 
150   if (Triple.isArch64Bit())
151     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
152   else
153     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
154 
155   MaxVectorAlign = 128;
156   MaxAtomicInlineWidth = 128;
157   MaxAtomicPromoteWidth = 128;
158 
159   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
160   LongDoubleFormat = &llvm::APFloat::IEEEquad();
161 
162   BFloat16Width = BFloat16Align = 16;
163   BFloat16Format = &llvm::APFloat::BFloat();
164 
165   // Make __builtin_ms_va_list available.
166   HasBuiltinMSVaList = true;
167 
168   // Make the SVE types available.  Note that this deliberately doesn't
169   // depend on SveMode, since in principle it should be possible to turn
170   // SVE on and off within a translation unit.  It should also be possible
171   // to compile the global declaration:
172   //
173   // __SVInt8_t *ptr;
174   //
175   // even without SVE.
176   HasAArch64SVETypes = true;
177 
178   // {} in inline assembly are neon specifiers, not assembly variant
179   // specifiers.
180   NoAsmVariants = true;
181 
182   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
183   // contributes to the alignment of the containing aggregate in the same way
184   // a plain (non bit-field) member of that type would, without exception for
185   // zero-sized or anonymous bit-fields."
186   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
187   UseZeroLengthBitfieldAlignment = true;
188 
189   // AArch64 targets default to using the ARM C++ ABI.
190   TheCXXABI.set(TargetCXXABI::GenericAArch64);
191 
192   if (Triple.getOS() == llvm::Triple::Linux)
193     this->MCountName = "\01_mcount";
194   else if (Triple.getOS() == llvm::Triple::UnknownOS)
195     this->MCountName =
196         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
197 }
198 
199 StringRef AArch64TargetInfo::getABI() const { return ABI; }
200 
201 bool AArch64TargetInfo::setABI(const std::string &Name) {
202   if (Name != "aapcs" && Name != "darwinpcs")
203     return false;
204 
205   ABI = Name;
206   return true;
207 }
208 
209 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
210                                                  BranchProtectionInfo &BPI,
211                                                  StringRef &Err) const {
212   llvm::ARM::ParsedBranchProtection PBP;
213   if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
214     return false;
215 
216   BPI.SignReturnAddr =
217       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
218           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
219           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
220           .Default(LangOptions::SignReturnAddressScopeKind::None);
221 
222   if (PBP.Key == "a_key")
223     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
224   else
225     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
226 
227   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
228   BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
229   BPI.GuardedControlStack = PBP.GuardedControlStack;
230   return true;
231 }
232 
233 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
234   return Name == "generic" || llvm::AArch64::parseCpu(Name);
235 }
236 
237 bool AArch64TargetInfo::setCPU(const std::string &Name) {
238   return isValidCPUName(Name);
239 }
240 
241 void AArch64TargetInfo::fillValidCPUList(
242     SmallVectorImpl<StringRef> &Values) const {
243   llvm::AArch64::fillValidCPUArchList(Values);
244 }
245 
246 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
247                                                 MacroBuilder &Builder) const {
248   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
249 }
250 
251 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
252                                                 MacroBuilder &Builder) const {
253   // Also include the ARMv8.1 defines
254   getTargetDefinesARMV81A(Opts, Builder);
255 }
256 
257 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
258                                                 MacroBuilder &Builder) const {
259   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
260   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
261   Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
262   // Also include the Armv8.2 defines
263   getTargetDefinesARMV82A(Opts, Builder);
264 }
265 
266 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
267                                                 MacroBuilder &Builder) const {
268   // Also include the Armv8.3 defines
269   getTargetDefinesARMV83A(Opts, Builder);
270 }
271 
272 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
273                                                 MacroBuilder &Builder) const {
274   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
275   Builder.defineMacro("__ARM_FEATURE_BTI", "1");
276   // Also include the Armv8.4 defines
277   getTargetDefinesARMV84A(Opts, Builder);
278 }
279 
280 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
281                                                 MacroBuilder &Builder) const {
282   // Also include the Armv8.5 defines
283   // FIXME: Armv8.6 makes the following extensions mandatory:
284   // - __ARM_FEATURE_BF16
285   // - __ARM_FEATURE_MATMUL_INT8
286   // Handle them here.
287   getTargetDefinesARMV85A(Opts, Builder);
288 }
289 
290 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
291                                                 MacroBuilder &Builder) const {
292   // Also include the Armv8.6 defines
293   getTargetDefinesARMV86A(Opts, Builder);
294 }
295 
296 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
297                                                 MacroBuilder &Builder) const {
298   // Also include the Armv8.7 defines
299   getTargetDefinesARMV87A(Opts, Builder);
300 }
301 
302 void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
303                                                 MacroBuilder &Builder) const {
304   // Also include the Armv8.8 defines
305   getTargetDefinesARMV88A(Opts, Builder);
306 }
307 
308 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
309                                                MacroBuilder &Builder) const {
310   // Armv9-A maps to Armv8.5-A
311   getTargetDefinesARMV85A(Opts, Builder);
312 }
313 
314 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
315                                                 MacroBuilder &Builder) const {
316   // Armv9.1-A maps to Armv8.6-A
317   getTargetDefinesARMV86A(Opts, Builder);
318 }
319 
320 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
321                                                 MacroBuilder &Builder) const {
322   // Armv9.2-A maps to Armv8.7-A
323   getTargetDefinesARMV87A(Opts, Builder);
324 }
325 
326 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
327                                                 MacroBuilder &Builder) const {
328   // Armv9.3-A maps to Armv8.8-A
329   getTargetDefinesARMV88A(Opts, Builder);
330 }
331 
332 void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
333                                                 MacroBuilder &Builder) const {
334   // Armv9.4-A maps to Armv8.9-A
335   getTargetDefinesARMV89A(Opts, Builder);
336 }
337 
338 void AArch64TargetInfo::getTargetDefinesARMV95A(const LangOptions &Opts,
339                                                 MacroBuilder &Builder) const {
340   // Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
341   getTargetDefinesARMV94A(Opts, Builder);
342 }
343 
344 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
345                                          MacroBuilder &Builder) const {
346   // Target identification.
347   if (getTriple().isWindowsArm64EC()) {
348     // Define the same set of macros as would be defined on x86_64 to ensure that
349     // ARM64EC datatype layouts match those of x86_64 compiled code
350     Builder.defineMacro("__amd64__");
351     Builder.defineMacro("__amd64");
352     Builder.defineMacro("__x86_64");
353     Builder.defineMacro("__x86_64__");
354     Builder.defineMacro("__arm64ec__");
355   } else {
356     Builder.defineMacro("__aarch64__");
357   }
358 
359   // Inline assembly supports AArch64 flag outputs.
360   Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
361 
362   std::string CodeModel = getTargetOpts().CodeModel;
363   if (CodeModel == "default")
364     CodeModel = "small";
365   for (char &c : CodeModel)
366     c = toupper(c);
367   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
368 
369   // ACLE predefines. Many can only have one possible value on v8 AArch64.
370   Builder.defineMacro("__ARM_ACLE", "200");
371   Builder.defineMacro("__ARM_ARCH",
372                       std::to_string(ArchInfo->Version.getMajor()));
373   Builder.defineMacro("__ARM_ARCH_PROFILE",
374                       std::string("'") + (char)ArchInfo->Profile + "'");
375 
376   Builder.defineMacro("__ARM_64BIT_STATE", "1");
377   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
378   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
379 
380   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
381   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
382   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
383   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
384   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
385   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
386   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
387 
388   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
389 
390   // These macros are set when Clang can parse declarations with these
391   // attributes.
392   Builder.defineMacro("__ARM_STATE_ZA", "1");
393   Builder.defineMacro("__ARM_STATE_ZT0", "1");
394 
395   // 0xe implies support for half, single and double precision operations.
396   if (FPU & FPUMode)
397     Builder.defineMacro("__ARM_FP", "0xE");
398 
399   // PCS specifies this for SysV variants, which is all we support. Other ABIs
400   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
401   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
402   Builder.defineMacro("__ARM_FP16_ARGS", "1");
403 
404   if (Opts.UnsafeFPMath)
405     Builder.defineMacro("__ARM_FP_FAST", "1");
406 
407   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
408                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
409 
410   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
411 
412   if (FPU & NeonMode) {
413     Builder.defineMacro("__ARM_NEON", "1");
414     // 64-bit NEON supports half, single and double precision operations.
415     Builder.defineMacro("__ARM_NEON_FP", "0xE");
416   }
417 
418   if (FPU & SveMode)
419     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
420 
421   if ((FPU & NeonMode) && (FPU & SveMode))
422     Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
423 
424   if (HasSVE2)
425     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
426 
427   if (HasSVE2 && HasSVE2AES)
428     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
429 
430   if (HasSVE2 && HasSVE2BitPerm)
431     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
432 
433   if (HasSVE2 && HasSVE2SHA3)
434     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
435 
436   if (HasSVE2 && HasSVE2SM4)
437     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
438 
439   if (HasSME) {
440     Builder.defineMacro("__ARM_FEATURE_SME");
441     Builder.defineMacro("__ARM_FEATURE_LOCALLY_STREAMING", "1");
442   }
443 
444   if (HasSME2) {
445     Builder.defineMacro("__ARM_FEATURE_SME");
446     Builder.defineMacro("__ARM_FEATURE_SME2");
447     Builder.defineMacro("__ARM_FEATURE_LOCALLY_STREAMING", "1");
448   }
449 
450   if (HasCRC)
451     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
452 
453   if (HasRCPC3)
454     Builder.defineMacro("__ARM_FEATURE_RCPC", "3");
455   else if (HasRCPC)
456     Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
457 
458   if (HasFMV)
459     Builder.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
460 
461   // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
462   // macros for AES, SHA2, SHA3 and SM4
463   if (HasAES && HasSHA2)
464     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
465 
466   if (HasAES)
467     Builder.defineMacro("__ARM_FEATURE_AES", "1");
468 
469   if (HasSHA2)
470     Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
471 
472   if (HasSHA3) {
473     Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
474     Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
475   }
476 
477   if (HasSM4) {
478     Builder.defineMacro("__ARM_FEATURE_SM3", "1");
479     Builder.defineMacro("__ARM_FEATURE_SM4", "1");
480   }
481 
482   if (HasPAuth)
483     Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
484 
485   if (HasUnaligned)
486     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
487 
488   if ((FPU & NeonMode) && HasFullFP16)
489     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
490   if (HasFullFP16)
491    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
492 
493   if (HasDotProd)
494     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
495 
496   if (HasMTE)
497     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
498 
499   if (HasTME)
500     Builder.defineMacro("__ARM_FEATURE_TME", "1");
501 
502   if (HasMatMul)
503     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
504 
505   if (HasLSE)
506     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
507 
508   if (HasBFloat16) {
509     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
510     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
511     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
512     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
513   }
514 
515   if ((FPU & SveMode) && HasBFloat16) {
516     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
517   }
518 
519   if ((FPU & SveMode) && HasMatmulFP64)
520     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
521 
522   if ((FPU & SveMode) && HasMatmulFP32)
523     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
524 
525   if ((FPU & SveMode) && HasMatMul)
526     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
527 
528   if ((FPU & NeonMode) && HasFP16FML)
529     Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
530 
531   if (Opts.hasSignReturnAddress()) {
532     // Bitmask:
533     // 0: Protection using the A key
534     // 1: Protection using the B key
535     // 2: Protection including leaf functions
536     unsigned Value = 0;
537 
538     if (Opts.isSignReturnAddressWithAKey())
539       Value |= (1 << 0);
540     else
541       Value |= (1 << 1);
542 
543     if (Opts.isSignReturnAddressScopeAll())
544       Value |= (1 << 2);
545 
546     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
547   }
548 
549   if (Opts.BranchTargetEnforcement)
550     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
551 
552   if (Opts.GuardedControlStack)
553     Builder.defineMacro("__ARM_FEATURE_GCS_DEFAULT", "1");
554 
555   if (HasLS64)
556     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
557 
558   if (HasRandGen)
559     Builder.defineMacro("__ARM_FEATURE_RNG", "1");
560 
561   if (HasMOPS)
562     Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
563 
564   if (HasD128)
565     Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
566 
567   if (HasGCS)
568     Builder.defineMacro("__ARM_FEATURE_GCS", "1");
569 
570   if (*ArchInfo == llvm::AArch64::ARMV8_1A)
571     getTargetDefinesARMV81A(Opts, Builder);
572   else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
573     getTargetDefinesARMV82A(Opts, Builder);
574   else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
575     getTargetDefinesARMV83A(Opts, Builder);
576   else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
577     getTargetDefinesARMV84A(Opts, Builder);
578   else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
579     getTargetDefinesARMV85A(Opts, Builder);
580   else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
581     getTargetDefinesARMV86A(Opts, Builder);
582   else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
583     getTargetDefinesARMV87A(Opts, Builder);
584   else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
585     getTargetDefinesARMV88A(Opts, Builder);
586   else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
587     getTargetDefinesARMV89A(Opts, Builder);
588   else if (*ArchInfo == llvm::AArch64::ARMV9A)
589     getTargetDefinesARMV9A(Opts, Builder);
590   else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
591     getTargetDefinesARMV91A(Opts, Builder);
592   else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
593     getTargetDefinesARMV92A(Opts, Builder);
594   else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
595     getTargetDefinesARMV93A(Opts, Builder);
596   else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
597     getTargetDefinesARMV94A(Opts, Builder);
598   else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
599     getTargetDefinesARMV95A(Opts, Builder);
600 
601   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
602   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
603   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
604   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
605   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
606   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
607 
608   // Allow detection of fast FMA support.
609   Builder.defineMacro("__FP_FAST_FMA", "1");
610   Builder.defineMacro("__FP_FAST_FMAF", "1");
611 
612   // C/C++ operators work on both VLS and VLA SVE types
613   if (FPU & SveMode)
614     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
615 
616   if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
617     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
618   }
619 }
620 
621 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
622   return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
623                                          Builtin::FirstTSBuiltin);
624 }
625 
626 std::optional<std::pair<unsigned, unsigned>>
627 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
628   if (LangOpts.VScaleMin || LangOpts.VScaleMax)
629     return std::pair<unsigned, unsigned>(
630         LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
631 
632   if (hasFeature("sve"))
633     return std::pair<unsigned, unsigned>(1, 16);
634 
635   return std::nullopt;
636 }
637 
638 unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
639   if (Name == "default")
640     return 0;
641   if (auto Ext = llvm::AArch64::parseArchExtension(Name))
642     return Ext->FmvPriority;
643   return 0;
644 }
645 
646 unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
647   // Take the maximum priority as per feature cost, so more features win.
648   return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
649 }
650 
651 bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
652   if (auto Ext = llvm::AArch64::parseArchExtension(Name))
653     return !Ext->DependentFeatures.empty();
654   return false;
655 }
656 
657 StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
658   if (auto Ext = llvm::AArch64::parseArchExtension(Name))
659     return Ext->DependentFeatures;
660   return StringRef();
661 }
662 
663 bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
664   return llvm::AArch64::parseArchExtension(FeatureStr).has_value();
665 }
666 
667 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
668   return llvm::StringSwitch<bool>(Feature)
669       .Cases("aarch64", "arm64", "arm", true)
670       .Case("fmv", HasFMV)
671       .Cases("neon", "fp", "simd", FPU & NeonMode)
672       .Case("jscvt", HasJSCVT)
673       .Case("fcma", HasFCMA)
674       .Case("rng", HasRandGen)
675       .Case("flagm", HasFlagM)
676       .Case("flagm2", HasAlternativeNZCV)
677       .Case("fp16fml", HasFP16FML)
678       .Case("dotprod", HasDotProd)
679       .Case("sm4", HasSM4)
680       .Case("rdm", HasRDM)
681       .Case("lse", HasLSE)
682       .Case("crc", HasCRC)
683       .Case("sha2", HasSHA2)
684       .Case("sha3", HasSHA3)
685       .Cases("aes", "pmull", HasAES)
686       .Cases("fp16", "fullfp16", HasFullFP16)
687       .Case("dit", HasDIT)
688       .Case("dpb", HasCCPP)
689       .Case("dpb2", HasCCDP)
690       .Case("rcpc", HasRCPC)
691       .Case("frintts", HasFRInt3264)
692       .Case("i8mm", HasMatMul)
693       .Case("bf16", HasBFloat16)
694       .Case("sve", FPU & SveMode)
695       .Case("sve-bf16", FPU & SveMode && HasBFloat16)
696       .Case("sve-i8mm", FPU & SveMode && HasMatMul)
697       .Case("f32mm", FPU & SveMode && HasMatmulFP32)
698       .Case("f64mm", FPU & SveMode && HasMatmulFP64)
699       .Case("sve2", FPU & SveMode && HasSVE2)
700       .Case("sve2-pmull128", FPU & SveMode && HasSVE2AES)
701       .Case("sve2-bitperm", FPU & SveMode && HasSVE2BitPerm)
702       .Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
703       .Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
704       .Case("sme", HasSME)
705       .Case("sme2", HasSME2)
706       .Case("sme-f64f64", HasSMEF64F64)
707       .Case("sme-i16i64", HasSMEI16I64)
708       .Case("sme-fa64", HasSMEFA64)
709       .Cases("memtag", "memtag2", HasMTE)
710       .Case("sb", HasSB)
711       .Case("predres", HasPredRes)
712       .Cases("ssbs", "ssbs2", HasSSBS)
713       .Case("bti", HasBTI)
714       .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
715       .Case("wfxt", HasWFxT)
716       .Case("rcpc3", HasRCPC3)
717       .Default(false);
718 }
719 
720 void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
721                                           StringRef Name, bool Enabled) const {
722   Features[Name] = Enabled;
723   // If the feature is an architecture feature (like v8.2a), add all previous
724   // architecture versions and any dependant target features.
725   const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
726       llvm::AArch64::ArchInfo::findBySubArch(Name);
727 
728   if (!ArchInfo)
729     return; // Not an architecture, nothing more to do.
730 
731   // Disabling an architecture feature does not affect dependent features
732   if (!Enabled)
733     return;
734 
735   for (const auto *OtherArch : llvm::AArch64::ArchInfos)
736     if (ArchInfo->implies(*OtherArch))
737       Features[OtherArch->getSubArch()] = true;
738 
739   // Set any features implied by the architecture
740   std::vector<StringRef> CPUFeats;
741   if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
742     for (auto F : CPUFeats) {
743       assert(F[0] == '+' && "Expected + in target feature!");
744       Features[F.drop_front(1)] = true;
745     }
746   }
747 }
748 
749 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
750                                              DiagnosticsEngine &Diags) {
751   for (const auto &Feature : Features) {
752     if (Feature == "-fp-armv8")
753       HasNoFP = true;
754     if (Feature == "-neon")
755       HasNoNeon = true;
756     if (Feature == "-sve")
757       HasNoSVE = true;
758 
759     if (Feature == "+neon" || Feature == "+fp-armv8")
760       FPU |= NeonMode;
761     if (Feature == "+jscvt") {
762       HasJSCVT = true;
763       FPU |= NeonMode;
764     }
765     if (Feature == "+fcma") {
766       HasFCMA = true;
767       FPU |= NeonMode;
768     }
769 
770     if (Feature == "+sve") {
771       FPU |= NeonMode;
772       FPU |= SveMode;
773       HasFullFP16 = true;
774     }
775     if (Feature == "+sve2") {
776       FPU |= NeonMode;
777       FPU |= SveMode;
778       HasFullFP16 = true;
779       HasSVE2 = true;
780     }
781     if (Feature == "+sve2-aes") {
782       FPU |= NeonMode;
783       FPU |= SveMode;
784       HasFullFP16 = true;
785       HasSVE2 = true;
786       HasSVE2AES = true;
787     }
788     if (Feature == "+sve2-sha3") {
789       FPU |= NeonMode;
790       FPU |= SveMode;
791       HasFullFP16 = true;
792       HasSVE2 = true;
793       HasSVE2SHA3 = true;
794     }
795     if (Feature == "+sve2-sm4") {
796       FPU |= NeonMode;
797       FPU |= SveMode;
798       HasFullFP16 = true;
799       HasSVE2 = true;
800       HasSVE2SM4 = true;
801     }
802     if (Feature == "+sve2-bitperm") {
803       FPU |= NeonMode;
804       FPU |= SveMode;
805       HasFullFP16 = true;
806       HasSVE2 = true;
807       HasSVE2BitPerm = true;
808     }
809     if (Feature == "+f32mm") {
810       FPU |= NeonMode;
811       FPU |= SveMode;
812       HasFullFP16 = true;
813       HasMatmulFP32 = true;
814     }
815     if (Feature == "+f64mm") {
816       FPU |= NeonMode;
817       FPU |= SveMode;
818       HasFullFP16 = true;
819       HasMatmulFP64 = true;
820     }
821     if (Feature == "+sme") {
822       HasSME = true;
823       HasBFloat16 = true;
824       HasFullFP16 = true;
825     }
826     if (Feature == "+sme2") {
827       HasSME = true;
828       HasSME2 = true;
829       HasBFloat16 = true;
830       HasFullFP16 = true;
831     }
832     if (Feature == "+sme-f64f64") {
833       HasSME = true;
834       HasSMEF64F64 = true;
835       HasBFloat16 = true;
836       HasFullFP16 = true;
837     }
838     if (Feature == "+sme-i16i64") {
839       HasSME = true;
840       HasSMEI16I64 = true;
841       HasBFloat16 = true;
842       HasFullFP16 = true;
843     }
844     if (Feature == "+sme-fa64") {
845       FPU |= NeonMode;
846       FPU |= SveMode;
847       HasSME = true;
848       HasSVE2 = true;
849       HasSMEFA64 = true;
850     }
851     if (Feature == "+sb")
852       HasSB = true;
853     if (Feature == "+predres")
854       HasPredRes = true;
855     if (Feature == "+ssbs")
856       HasSSBS = true;
857     if (Feature == "+bti")
858       HasBTI = true;
859     if (Feature == "+wfxt")
860       HasWFxT = true;
861     if (Feature == "-fmv")
862       HasFMV = false;
863     if (Feature == "+crc")
864       HasCRC = true;
865     if (Feature == "+rcpc")
866       HasRCPC = true;
867     if (Feature == "+aes") {
868       FPU |= NeonMode;
869       HasAES = true;
870     }
871     if (Feature == "+sha2") {
872       FPU |= NeonMode;
873       HasSHA2 = true;
874     }
875     if (Feature == "+sha3") {
876       FPU |= NeonMode;
877       HasSHA2 = true;
878       HasSHA3 = true;
879     }
880     if (Feature == "+rdm") {
881       FPU |= NeonMode;
882       HasRDM = true;
883     }
884     if (Feature == "+dit")
885       HasDIT = true;
886     if (Feature == "+cccp")
887       HasCCPP = true;
888     if (Feature == "+ccdp") {
889       HasCCPP = true;
890       HasCCDP = true;
891     }
892     if (Feature == "+fptoint")
893       HasFRInt3264 = true;
894     if (Feature == "+sm4") {
895       FPU |= NeonMode;
896       HasSM4 = true;
897     }
898     if (Feature == "+strict-align")
899       HasUnaligned = false;
900     // All predecessor archs are added but select the latest one for ArchKind.
901     if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
902       ArchInfo = &llvm::AArch64::ARMV8A;
903     if (Feature == "+v8.1a" &&
904         ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
905       ArchInfo = &llvm::AArch64::ARMV8_1A;
906     if (Feature == "+v8.2a" &&
907         ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
908       ArchInfo = &llvm::AArch64::ARMV8_2A;
909     if (Feature == "+v8.3a" &&
910         ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
911       ArchInfo = &llvm::AArch64::ARMV8_3A;
912     if (Feature == "+v8.4a" &&
913         ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
914       ArchInfo = &llvm::AArch64::ARMV8_4A;
915     if (Feature == "+v8.5a" &&
916         ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
917       ArchInfo = &llvm::AArch64::ARMV8_5A;
918     if (Feature == "+v8.6a" &&
919         ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
920       ArchInfo = &llvm::AArch64::ARMV8_6A;
921     if (Feature == "+v8.7a" &&
922         ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
923       ArchInfo = &llvm::AArch64::ARMV8_7A;
924     if (Feature == "+v8.8a" &&
925         ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
926       ArchInfo = &llvm::AArch64::ARMV8_8A;
927     if (Feature == "+v8.9a" &&
928         ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
929       ArchInfo = &llvm::AArch64::ARMV8_9A;
930     if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
931       ArchInfo = &llvm::AArch64::ARMV9A;
932     if (Feature == "+v9.1a" &&
933         ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
934       ArchInfo = &llvm::AArch64::ARMV9_1A;
935     if (Feature == "+v9.2a" &&
936         ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
937       ArchInfo = &llvm::AArch64::ARMV9_2A;
938     if (Feature == "+v9.3a" &&
939         ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
940       ArchInfo = &llvm::AArch64::ARMV9_3A;
941     if (Feature == "+v9.4a" &&
942         ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
943       ArchInfo = &llvm::AArch64::ARMV9_4A;
944     if (Feature == "+v9.5a" &&
945         ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
946       ArchInfo = &llvm::AArch64::ARMV9_5A;
947     if (Feature == "+v8r")
948       ArchInfo = &llvm::AArch64::ARMV8R;
949     if (Feature == "+fullfp16") {
950       FPU |= NeonMode;
951       HasFullFP16 = true;
952     }
953     if (Feature == "+dotprod") {
954       FPU |= NeonMode;
955       HasDotProd = true;
956     }
957     if (Feature == "+fp16fml") {
958       FPU |= NeonMode;
959       HasFullFP16 = true;
960       HasFP16FML = true;
961     }
962     if (Feature == "+mte")
963       HasMTE = true;
964     if (Feature == "+tme")
965       HasTME = true;
966     if (Feature == "+pauth")
967       HasPAuth = true;
968     if (Feature == "+i8mm")
969       HasMatMul = true;
970     if (Feature == "+bf16")
971       HasBFloat16 = true;
972     if (Feature == "+lse")
973       HasLSE = true;
974     if (Feature == "+ls64")
975       HasLS64 = true;
976     if (Feature == "+rand")
977       HasRandGen = true;
978     if (Feature == "+flagm")
979       HasFlagM = true;
980     if (Feature == "+altnzcv") {
981       HasFlagM = true;
982       HasAlternativeNZCV = true;
983     }
984     if (Feature == "+mops")
985       HasMOPS = true;
986     if (Feature == "+d128")
987       HasD128 = true;
988     if (Feature == "+gcs")
989       HasGCS = true;
990     if (Feature == "+rcpc3")
991       HasRCPC3 = true;
992   }
993 
994   // Check features that are manually disabled by command line options.
995   // This needs to be checked after architecture-related features are handled,
996   // making sure they are properly disabled when required.
997   for (const auto &Feature : Features) {
998     if (Feature == "-d128")
999       HasD128 = false;
1000   }
1001 
1002   setDataLayout();
1003   setArchFeatures();
1004 
1005   if (HasNoFP) {
1006     FPU &= ~FPUMode;
1007     FPU &= ~NeonMode;
1008     FPU &= ~SveMode;
1009   }
1010   if (HasNoNeon) {
1011     FPU &= ~NeonMode;
1012     FPU &= ~SveMode;
1013   }
1014   if (HasNoSVE)
1015     FPU &= ~SveMode;
1016 
1017   return true;
1018 }
1019 
1020 bool AArch64TargetInfo::initFeatureMap(
1021     llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
1022     const std::vector<std::string> &FeaturesVec) const {
1023   std::vector<std::string> UpdatedFeaturesVec;
1024   // Parse the CPU and add any implied features.
1025   std::optional<llvm::AArch64::CpuInfo> CpuInfo = llvm::AArch64::parseCpu(CPU);
1026   if (CpuInfo) {
1027     auto Exts = CpuInfo->getImpliedExtensions();
1028     std::vector<StringRef> CPUFeats;
1029     llvm::AArch64::getExtensionFeatures(Exts, CPUFeats);
1030     for (auto F : CPUFeats) {
1031       assert((F[0] == '+' || F[0] == '-') && "Expected +/- in target feature!");
1032       UpdatedFeaturesVec.push_back(F.str());
1033     }
1034   }
1035 
1036   // Process target and dependent features. This is done in two loops collecting
1037   // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
1038   // add target '+/-'features that can later disable some of features added on
1039   // the first loop. Function Multi Versioning features begin with '?'.
1040   for (const auto &Feature : FeaturesVec)
1041     if (((Feature[0] == '?' || Feature[0] == '+')) &&
1042         AArch64TargetInfo::doesFeatureAffectCodeGen(Feature.substr(1))) {
1043       StringRef DepFeatures =
1044           AArch64TargetInfo::getFeatureDependencies(Feature.substr(1));
1045       SmallVector<StringRef, 1> AttrFeatures;
1046       DepFeatures.split(AttrFeatures, ",");
1047       for (auto F : AttrFeatures)
1048         UpdatedFeaturesVec.push_back(F.str());
1049     }
1050   for (const auto &Feature : FeaturesVec)
1051     if (Feature[0] != '?') {
1052       std::string UpdatedFeature = Feature;
1053       if (Feature[0] == '+') {
1054         std::optional<llvm::AArch64::ExtensionInfo> Extension =
1055           llvm::AArch64::parseArchExtension(Feature.substr(1));
1056         if (Extension)
1057           UpdatedFeature = Extension->Feature.str();
1058       }
1059       UpdatedFeaturesVec.push_back(UpdatedFeature);
1060     }
1061 
1062   return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
1063 }
1064 
1065 // Parse AArch64 Target attributes, which are a comma separated list of:
1066 //  "arch=<arch>" - parsed to features as per -march=..
1067 //  "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1068 //  "tune=<cpu>" - TuneCPU set to <cpu>
1069 //  "feature", "no-feature" - Add (or remove) feature.
1070 //  "+feature", "+nofeature" - Add (or remove) feature.
1071 ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1072   ParsedTargetAttr Ret;
1073   if (Features == "default")
1074     return Ret;
1075   SmallVector<StringRef, 1> AttrFeatures;
1076   Features.split(AttrFeatures, ",");
1077   bool FoundArch = false;
1078 
1079   auto SplitAndAddFeatures = [](StringRef FeatString,
1080                                 std::vector<std::string> &Features) {
1081     SmallVector<StringRef, 8> SplitFeatures;
1082     FeatString.split(SplitFeatures, StringRef("+"), -1, false);
1083     for (StringRef Feature : SplitFeatures) {
1084       StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1085       if (!FeatureName.empty())
1086         Features.push_back(FeatureName.str());
1087       else
1088         // Pushing the original feature string to give a sema error later on
1089         // when they get checked.
1090         if (Feature.starts_with("no"))
1091           Features.push_back("-" + Feature.drop_front(2).str());
1092         else
1093           Features.push_back("+" + Feature.str());
1094     }
1095   };
1096 
1097   for (auto &Feature : AttrFeatures) {
1098     Feature = Feature.trim();
1099     if (Feature.starts_with("fpmath="))
1100       continue;
1101 
1102     if (Feature.starts_with("branch-protection=")) {
1103       Ret.BranchProtection = Feature.split('=').second.trim();
1104       continue;
1105     }
1106 
1107     if (Feature.starts_with("arch=")) {
1108       if (FoundArch)
1109         Ret.Duplicate = "arch=";
1110       FoundArch = true;
1111       std::pair<StringRef, StringRef> Split =
1112           Feature.split("=").second.trim().split("+");
1113       const llvm::AArch64::ArchInfo *AI = llvm::AArch64::parseArch(Split.first);
1114 
1115       // Parse the architecture version, adding the required features to
1116       // Ret.Features.
1117       if (!AI)
1118         continue;
1119       Ret.Features.push_back(AI->ArchFeature.str());
1120       // Add any extra features, after the +
1121       SplitAndAddFeatures(Split.second, Ret.Features);
1122     } else if (Feature.starts_with("cpu=")) {
1123       if (!Ret.CPU.empty())
1124         Ret.Duplicate = "cpu=";
1125       else {
1126         // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1127         // "+feat" features.
1128         std::pair<StringRef, StringRef> Split =
1129             Feature.split("=").second.trim().split("+");
1130         Ret.CPU = Split.first;
1131         SplitAndAddFeatures(Split.second, Ret.Features);
1132       }
1133     } else if (Feature.starts_with("tune=")) {
1134       if (!Ret.Tune.empty())
1135         Ret.Duplicate = "tune=";
1136       else
1137         Ret.Tune = Feature.split("=").second.trim();
1138     } else if (Feature.starts_with("+")) {
1139       SplitAndAddFeatures(Feature, Ret.Features);
1140     } else if (Feature.starts_with("no-")) {
1141       StringRef FeatureName =
1142           llvm::AArch64::getArchExtFeature(Feature.split("-").second);
1143       if (!FeatureName.empty())
1144         Ret.Features.push_back("-" + FeatureName.drop_front(1).str());
1145       else
1146         Ret.Features.push_back("-" + Feature.split("-").second.str());
1147     } else {
1148       // Try parsing the string to the internal target feature name. If it is
1149       // invalid, add the original string (which could already be an internal
1150       // name). These should be checked later by isValidFeatureName.
1151       StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1152       if (!FeatureName.empty())
1153         Ret.Features.push_back(FeatureName.str());
1154       else
1155         Ret.Features.push_back("+" + Feature.str());
1156     }
1157   }
1158   return Ret;
1159 }
1160 
1161 bool AArch64TargetInfo::hasBFloat16Type() const {
1162   return true;
1163 }
1164 
1165 TargetInfo::CallingConvCheckResult
1166 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1167   switch (CC) {
1168   case CC_C:
1169   case CC_Swift:
1170   case CC_SwiftAsync:
1171   case CC_PreserveMost:
1172   case CC_PreserveAll:
1173   case CC_OpenCLKernel:
1174   case CC_AArch64VectorCall:
1175   case CC_AArch64SVEPCS:
1176   case CC_Win64:
1177     return CCCR_OK;
1178   default:
1179     return CCCR_Warning;
1180   }
1181 }
1182 
1183 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1184 
1185 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1186   return TargetInfo::AArch64ABIBuiltinVaList;
1187 }
1188 
1189 const char *const AArch64TargetInfo::GCCRegNames[] = {
1190     // clang-format off
1191 
1192     // 32-bit Integer registers
1193     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1194     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1195     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1196 
1197     // 64-bit Integer registers
1198     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1199     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1200     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1201 
1202     // 32-bit floating point regsisters
1203     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1204     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1205     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1206 
1207     // 64-bit floating point regsisters
1208     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1209     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1210     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1211 
1212     // Neon vector registers
1213     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1214     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1215     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1216 
1217     // SVE vector registers
1218     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
1219     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1220     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1221 
1222     // SVE predicate registers
1223     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
1224     "p11", "p12", "p13", "p14", "p15",
1225 
1226     // SVE predicate-as-counter registers
1227     "pn0",  "pn1",  "pn2",  "pn3",  "pn4",  "pn5",  "pn6",  "pn7",  "pn8",
1228     "pn9",  "pn10", "pn11", "pn12", "pn13", "pn14", "pn15",
1229 
1230     // SME registers
1231     "za", "zt0",
1232 
1233     // clang-format on
1234 };
1235 
1236 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1237   return llvm::ArrayRef(GCCRegNames);
1238 }
1239 
1240 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1241     {{"w31"}, "wsp"},
1242     {{"x31"}, "sp"},
1243     // GCC rN registers are aliases of xN registers.
1244     {{"r0"}, "x0"},
1245     {{"r1"}, "x1"},
1246     {{"r2"}, "x2"},
1247     {{"r3"}, "x3"},
1248     {{"r4"}, "x4"},
1249     {{"r5"}, "x5"},
1250     {{"r6"}, "x6"},
1251     {{"r7"}, "x7"},
1252     {{"r8"}, "x8"},
1253     {{"r9"}, "x9"},
1254     {{"r10"}, "x10"},
1255     {{"r11"}, "x11"},
1256     {{"r12"}, "x12"},
1257     {{"r13"}, "x13"},
1258     {{"r14"}, "x14"},
1259     {{"r15"}, "x15"},
1260     {{"r16"}, "x16"},
1261     {{"r17"}, "x17"},
1262     {{"r18"}, "x18"},
1263     {{"r19"}, "x19"},
1264     {{"r20"}, "x20"},
1265     {{"r21"}, "x21"},
1266     {{"r22"}, "x22"},
1267     {{"r23"}, "x23"},
1268     {{"r24"}, "x24"},
1269     {{"r25"}, "x25"},
1270     {{"r26"}, "x26"},
1271     {{"r27"}, "x27"},
1272     {{"r28"}, "x28"},
1273     {{"r29", "x29"}, "fp"},
1274     {{"r30", "x30"}, "lr"},
1275     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1276     // don't want to substitute one of these for a different-sized one.
1277 };
1278 
1279 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1280   return llvm::ArrayRef(GCCRegAliases);
1281 }
1282 
1283 // Returns the length of cc constraint.
1284 static unsigned matchAsmCCConstraint(const char *Name) {
1285   constexpr unsigned len = 5;
1286   auto RV = llvm::StringSwitch<unsigned>(Name)
1287                 .Case("@cceq", len)
1288                 .Case("@ccne", len)
1289                 .Case("@cchs", len)
1290                 .Case("@cccs", len)
1291                 .Case("@cccc", len)
1292                 .Case("@cclo", len)
1293                 .Case("@ccmi", len)
1294                 .Case("@ccpl", len)
1295                 .Case("@ccvs", len)
1296                 .Case("@ccvc", len)
1297                 .Case("@cchi", len)
1298                 .Case("@ccls", len)
1299                 .Case("@ccge", len)
1300                 .Case("@cclt", len)
1301                 .Case("@ccgt", len)
1302                 .Case("@ccle", len)
1303                 .Default(0);
1304   return RV;
1305 }
1306 
1307 std::string
1308 AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1309   std::string R;
1310   switch (*Constraint) {
1311   case 'U': // Three-character constraint; add "@3" hint for later parsing.
1312     R = std::string("@3") + std::string(Constraint, 3);
1313     Constraint += 2;
1314     break;
1315   case '@':
1316     if (const unsigned Len = matchAsmCCConstraint(Constraint)) {
1317       std::string Converted = "{" + std::string(Constraint, Len) + "}";
1318       Constraint += Len - 1;
1319       return Converted;
1320     }
1321     return std::string(1, *Constraint);
1322   default:
1323     R = TargetInfo::convertConstraint(Constraint);
1324     break;
1325   }
1326   return R;
1327 }
1328 
1329 bool AArch64TargetInfo::validateAsmConstraint(
1330     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1331   switch (*Name) {
1332   default:
1333     return false;
1334   case 'w': // Floating point and SIMD registers (V0-V31)
1335     Info.setAllowsRegister();
1336     return true;
1337   case 'I': // Constant that can be used with an ADD instruction
1338   case 'J': // Constant that can be used with a SUB instruction
1339   case 'K': // Constant that can be used with a 32-bit logical instruction
1340   case 'L': // Constant that can be used with a 64-bit logical instruction
1341   case 'M': // Constant that can be used as a 32-bit MOV immediate
1342   case 'N': // Constant that can be used as a 64-bit MOV immediate
1343   case 'Y': // Floating point constant zero
1344   case 'Z': // Integer constant zero
1345     return true;
1346   case 'Q': // A memory reference with base register and no offset
1347     Info.setAllowsMemory();
1348     return true;
1349   case 'S': // A symbolic address
1350     Info.setAllowsRegister();
1351     return true;
1352   case 'U':
1353     if (Name[1] == 'p' &&
1354         (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
1355       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
1356       Info.setAllowsRegister();
1357       Name += 2;
1358       return true;
1359     }
1360     if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
1361       // Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
1362       Info.setAllowsRegister();
1363       Name += 2;
1364       return true;
1365     }
1366     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1367     // Utf: A memory address suitable for ldp/stp in TF mode.
1368     // Usa: An absolute symbolic address.
1369     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1370 
1371     // Better to return an error saying that it's an unrecognised constraint
1372     // even if this is a valid constraint in gcc.
1373     return false;
1374   case 'z': // Zero register, wzr or xzr
1375     Info.setAllowsRegister();
1376     return true;
1377   case 'x': // Floating point and SIMD registers (V0-V15)
1378     Info.setAllowsRegister();
1379     return true;
1380   case 'y': // SVE registers (V0-V7)
1381     Info.setAllowsRegister();
1382     return true;
1383   case '@':
1384     // CC condition
1385     if (const unsigned Len = matchAsmCCConstraint(Name)) {
1386       Name += Len - 1;
1387       Info.setAllowsRegister();
1388       return true;
1389     }
1390   }
1391   return false;
1392 }
1393 
1394 bool AArch64TargetInfo::validateConstraintModifier(
1395     StringRef Constraint, char Modifier, unsigned Size,
1396     std::string &SuggestedModifier) const {
1397   // Strip off constraint modifiers.
1398   Constraint = Constraint.ltrim("=+&");
1399 
1400   switch (Constraint[0]) {
1401   default:
1402     return true;
1403   case 'z':
1404   case 'r': {
1405     switch (Modifier) {
1406     case 'x':
1407     case 'w':
1408       // For now assume that the person knows what they're
1409       // doing with the modifier.
1410       return true;
1411     default:
1412       // By default an 'r' constraint will be in the 'x'
1413       // registers.
1414       if (Size == 64)
1415         return true;
1416 
1417       if (Size == 512)
1418         return HasLS64;
1419 
1420       SuggestedModifier = "w";
1421       return false;
1422     }
1423   }
1424   }
1425 }
1426 
1427 std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1428 
1429 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1430   if (RegNo == 0)
1431     return 0;
1432   if (RegNo == 1)
1433     return 1;
1434   return -1;
1435 }
1436 
1437 bool AArch64TargetInfo::hasInt128Type() const { return true; }
1438 
1439 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1440                                          const TargetOptions &Opts)
1441     : AArch64TargetInfo(Triple, Opts) {}
1442 
1443 void AArch64leTargetInfo::setDataLayout() {
1444   if (getTriple().isOSBinFormatMachO()) {
1445     if(getTriple().isArch32Bit())
1446       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
1447     else
1448       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
1449   } else
1450     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1451 }
1452 
1453 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1454                                            MacroBuilder &Builder) const {
1455   Builder.defineMacro("__AARCH64EL__");
1456   AArch64TargetInfo::getTargetDefines(Opts, Builder);
1457 }
1458 
1459 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1460                                          const TargetOptions &Opts)
1461     : AArch64TargetInfo(Triple, Opts) {}
1462 
1463 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1464                                            MacroBuilder &Builder) const {
1465   Builder.defineMacro("__AARCH64EB__");
1466   Builder.defineMacro("__AARCH_BIG_ENDIAN");
1467   Builder.defineMacro("__ARM_BIG_ENDIAN");
1468   AArch64TargetInfo::getTargetDefines(Opts, Builder);
1469 }
1470 
1471 void AArch64beTargetInfo::setDataLayout() {
1472   assert(!getTriple().isOSBinFormatMachO());
1473   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1474 }
1475 
1476 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1477                                                const TargetOptions &Opts)
1478     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1479 
1480   // This is an LLP64 platform.
1481   // int:4, long:4, long long:8, long double:8.
1482   IntWidth = IntAlign = 32;
1483   LongWidth = LongAlign = 32;
1484   DoubleAlign = LongLongAlign = 64;
1485   LongDoubleWidth = LongDoubleAlign = 64;
1486   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1487   IntMaxType = SignedLongLong;
1488   Int64Type = SignedLongLong;
1489   SizeType = UnsignedLongLong;
1490   PtrDiffType = SignedLongLong;
1491   IntPtrType = SignedLongLong;
1492 }
1493 
1494 void WindowsARM64TargetInfo::setDataLayout() {
1495   resetDataLayout(Triple.isOSBinFormatMachO()
1496                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
1497                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
1498                   Triple.isOSBinFormatMachO() ? "_" : "");
1499 }
1500 
1501 TargetInfo::BuiltinVaListKind
1502 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1503   return TargetInfo::CharPtrBuiltinVaList;
1504 }
1505 
1506 TargetInfo::CallingConvCheckResult
1507 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1508   switch (CC) {
1509   case CC_X86StdCall:
1510   case CC_X86ThisCall:
1511   case CC_X86FastCall:
1512   case CC_X86VectorCall:
1513     return CCCR_Ignore;
1514   case CC_C:
1515   case CC_OpenCLKernel:
1516   case CC_PreserveMost:
1517   case CC_PreserveAll:
1518   case CC_Swift:
1519   case CC_SwiftAsync:
1520   case CC_Win64:
1521     return CCCR_OK;
1522   default:
1523     return CCCR_Warning;
1524   }
1525 }
1526 
1527 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1528                                                    const TargetOptions &Opts)
1529     : WindowsARM64TargetInfo(Triple, Opts) {
1530   TheCXXABI.set(TargetCXXABI::Microsoft);
1531 }
1532 
1533 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1534                                                 MacroBuilder &Builder) const {
1535   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1536   if (getTriple().isWindowsArm64EC()) {
1537     Builder.defineMacro("_M_X64", "100");
1538     Builder.defineMacro("_M_AMD64", "100");
1539     Builder.defineMacro("_M_ARM64EC", "1");
1540   } else {
1541     Builder.defineMacro("_M_ARM64", "1");
1542   }
1543 }
1544 
1545 TargetInfo::CallingConvKind
1546 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1547   return CCK_MicrosoftWin64;
1548 }
1549 
1550 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
1551   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
1552 
1553   // MSVC does size based alignment for arm64 based on alignment section in
1554   // below document, replicate that to keep alignment consistent with object
1555   // files compiled by MSVC.
1556   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1557   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
1558     Align = std::max(Align, 128u);    // align type at least 16 bytes
1559   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
1560     Align = std::max(Align, 64u);     // align type at least 8 butes
1561   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
1562     Align = std::max(Align, 32u);     // align type at least 4 bytes
1563   }
1564   return Align;
1565 }
1566 
1567 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1568                                            const TargetOptions &Opts)
1569     : WindowsARM64TargetInfo(Triple, Opts) {
1570   TheCXXABI.set(TargetCXXABI::GenericAArch64);
1571 }
1572 
1573 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1574                                                  const TargetOptions &Opts)
1575     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1576   Int64Type = SignedLongLong;
1577   if (getTriple().isArch32Bit())
1578     IntMaxType = SignedLongLong;
1579 
1580   WCharType = SignedInt;
1581   UseSignedCharForObjCBool = false;
1582 
1583   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1584   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1585 
1586   UseZeroLengthBitfieldAlignment = false;
1587 
1588   if (getTriple().isArch32Bit()) {
1589     UseBitFieldTypeAlignment = false;
1590     ZeroLengthBitfieldBoundary = 32;
1591     UseZeroLengthBitfieldAlignment = true;
1592     TheCXXABI.set(TargetCXXABI::WatchOS);
1593   } else
1594     TheCXXABI.set(TargetCXXABI::AppleARM64);
1595 }
1596 
1597 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1598                                            const llvm::Triple &Triple,
1599                                            MacroBuilder &Builder) const {
1600   Builder.defineMacro("__AARCH64_SIMD__");
1601   if (Triple.isArch32Bit())
1602     Builder.defineMacro("__ARM64_ARCH_8_32__");
1603   else
1604     Builder.defineMacro("__ARM64_ARCH_8__");
1605   Builder.defineMacro("__ARM_NEON__");
1606   Builder.defineMacro("__REGISTER_PREFIX__", "");
1607   Builder.defineMacro("__arm64", "1");
1608   Builder.defineMacro("__arm64__", "1");
1609 
1610   if (Triple.isArm64e())
1611     Builder.defineMacro("__arm64e__", "1");
1612 
1613   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1614 }
1615 
1616 TargetInfo::BuiltinVaListKind
1617 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1618   return TargetInfo::CharPtrBuiltinVaList;
1619 }
1620 
1621 // 64-bit RenderScript is aarch64
1622 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1623                                                    const TargetOptions &Opts)
1624     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1625                                        Triple.getOSName(),
1626                                        Triple.getEnvironmentName()),
1627                           Opts) {
1628   IsRenderScriptTarget = true;
1629 }
1630 
1631 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1632                                                 MacroBuilder &Builder) const {
1633   Builder.defineMacro("__RENDERSCRIPT__");
1634   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1635 }
1636