xref: /freebsd/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/TargetParser/AArch64TargetParser.h"
21 #include "llvm/TargetParser/ARMTargetParserCommon.h"
22 #include <optional>
23 
24 using namespace clang;
25 using namespace clang::targets;
26 
27 static constexpr Builtin::Info BuiltinInfo[] = {
28 #define BUILTIN(ID, TYPE, ATTRS)                                               \
29   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
30 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
31   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32 #include "clang/Basic/BuiltinsNEON.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
36 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
37   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38 #include "clang/Basic/BuiltinsSVE.def"
39 
40 #define BUILTIN(ID, TYPE, ATTRS)                                               \
41   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
42 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
43   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
44 #include "clang/Basic/BuiltinsSME.def"
45 
46 #define BUILTIN(ID, TYPE, ATTRS)                                               \
47   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
48 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
49   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
50 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
51   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
52 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
53   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
54 #include "clang/Basic/BuiltinsAArch64.def"
55 };
56 
57 void AArch64TargetInfo::setArchFeatures() {
58   if (*ArchInfo == llvm::AArch64::ARMV8R) {
59     HasDotProd = true;
60     HasDIT = true;
61     HasFlagM = true;
62     HasRCPC = true;
63     FPU |= NeonMode;
64     HasCCPP = true;
65     HasCRC = true;
66     HasLSE = true;
67     HasRDM = true;
68   } else if (ArchInfo->Version.getMajor() == 8) {
69     if (ArchInfo->Version.getMinor() >= 7u) {
70       HasWFxT = true;
71     }
72     if (ArchInfo->Version.getMinor() >= 6u) {
73       HasBFloat16 = true;
74       HasMatMul = true;
75     }
76     if (ArchInfo->Version.getMinor() >= 5u) {
77       HasAlternativeNZCV = true;
78       HasFRInt3264 = true;
79       HasSSBS = true;
80       HasSB = true;
81       HasPredRes = true;
82       HasBTI = true;
83     }
84     if (ArchInfo->Version.getMinor() >= 4u) {
85       HasDotProd = true;
86       HasDIT = true;
87       HasFlagM = true;
88     }
89     if (ArchInfo->Version.getMinor() >= 3u) {
90       HasRCPC = true;
91       FPU |= NeonMode;
92     }
93     if (ArchInfo->Version.getMinor() >= 2u) {
94       HasCCPP = true;
95     }
96     if (ArchInfo->Version.getMinor() >= 1u) {
97       HasCRC = true;
98       HasLSE = true;
99       HasRDM = true;
100     }
101   } else if (ArchInfo->Version.getMajor() == 9) {
102     if (ArchInfo->Version.getMinor() >= 2u) {
103       HasWFxT = true;
104     }
105     if (ArchInfo->Version.getMinor() >= 1u) {
106       HasBFloat16 = true;
107       HasMatMul = true;
108     }
109     FPU |= SveMode;
110     HasSVE2 = true;
111     HasFullFP16 = true;
112     HasAlternativeNZCV = true;
113     HasFRInt3264 = true;
114     HasSSBS = true;
115     HasSB = true;
116     HasPredRes = true;
117     HasBTI = true;
118     HasDotProd = true;
119     HasDIT = true;
120     HasFlagM = true;
121     HasRCPC = true;
122     FPU |= NeonMode;
123     HasCCPP = true;
124     HasCRC = true;
125     HasLSE = true;
126     HasRDM = true;
127   }
128 }
129 
130 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
131                                      const TargetOptions &Opts)
132     : TargetInfo(Triple), ABI("aapcs") {
133   if (getTriple().isOSOpenBSD()) {
134     Int64Type = SignedLongLong;
135     IntMaxType = SignedLongLong;
136   } else {
137     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
138       WCharType = UnsignedInt;
139 
140     Int64Type = SignedLong;
141     IntMaxType = SignedLong;
142   }
143 
144   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
145   HasLegalHalfType = true;
146   HalfArgsAndReturns = true;
147   HasFloat16 = true;
148   HasStrictFP = true;
149 
150   if (Triple.isArch64Bit())
151     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
152   else
153     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
154 
155   MaxVectorAlign = 128;
156   MaxAtomicInlineWidth = 128;
157   MaxAtomicPromoteWidth = 128;
158 
159   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
160   LongDoubleFormat = &llvm::APFloat::IEEEquad();
161 
162   BFloat16Width = BFloat16Align = 16;
163   BFloat16Format = &llvm::APFloat::BFloat();
164 
165   // Make __builtin_ms_va_list available.
166   HasBuiltinMSVaList = true;
167 
168   // Make the SVE types available.  Note that this deliberately doesn't
169   // depend on SveMode, since in principle it should be possible to turn
170   // SVE on and off within a translation unit.  It should also be possible
171   // to compile the global declaration:
172   //
173   // __SVInt8_t *ptr;
174   //
175   // even without SVE.
176   HasAArch64SVETypes = true;
177 
178   // {} in inline assembly are neon specifiers, not assembly variant
179   // specifiers.
180   NoAsmVariants = true;
181 
182   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
183   // contributes to the alignment of the containing aggregate in the same way
184   // a plain (non bit-field) member of that type would, without exception for
185   // zero-sized or anonymous bit-fields."
186   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
187   UseZeroLengthBitfieldAlignment = true;
188 
189   // AArch64 targets default to using the ARM C++ ABI.
190   TheCXXABI.set(TargetCXXABI::GenericAArch64);
191 
192   if (Triple.getOS() == llvm::Triple::Linux)
193     this->MCountName = "\01_mcount";
194   else if (Triple.getOS() == llvm::Triple::UnknownOS)
195     this->MCountName =
196         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
197 }
198 
199 StringRef AArch64TargetInfo::getABI() const { return ABI; }
200 
201 bool AArch64TargetInfo::setABI(const std::string &Name) {
202   if (Name != "aapcs" && Name != "darwinpcs")
203     return false;
204 
205   ABI = Name;
206   return true;
207 }
208 
209 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
210                                                  BranchProtectionInfo &BPI,
211                                                  StringRef &Err) const {
212   llvm::ARM::ParsedBranchProtection PBP;
213   if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
214     return false;
215 
216   BPI.SignReturnAddr =
217       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
218           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
219           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
220           .Default(LangOptions::SignReturnAddressScopeKind::None);
221 
222   if (PBP.Key == "a_key")
223     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
224   else
225     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
226 
227   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
228   BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
229   return true;
230 }
231 
232 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
233   return Name == "generic" || llvm::AArch64::parseCpu(Name);
234 }
235 
236 bool AArch64TargetInfo::setCPU(const std::string &Name) {
237   return isValidCPUName(Name);
238 }
239 
240 void AArch64TargetInfo::fillValidCPUList(
241     SmallVectorImpl<StringRef> &Values) const {
242   llvm::AArch64::fillValidCPUArchList(Values);
243 }
244 
245 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
246                                                 MacroBuilder &Builder) const {
247   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
248 }
249 
250 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
251                                                 MacroBuilder &Builder) const {
252   // Also include the ARMv8.1 defines
253   getTargetDefinesARMV81A(Opts, Builder);
254 }
255 
256 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
257                                                 MacroBuilder &Builder) const {
258   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
259   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
260   Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
261   // Also include the Armv8.2 defines
262   getTargetDefinesARMV82A(Opts, Builder);
263 }
264 
265 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
266                                                 MacroBuilder &Builder) const {
267   // Also include the Armv8.3 defines
268   getTargetDefinesARMV83A(Opts, Builder);
269 }
270 
271 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
272                                                 MacroBuilder &Builder) const {
273   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
274   Builder.defineMacro("__ARM_FEATURE_BTI", "1");
275   // Also include the Armv8.4 defines
276   getTargetDefinesARMV84A(Opts, Builder);
277 }
278 
279 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
280                                                 MacroBuilder &Builder) const {
281   // Also include the Armv8.5 defines
282   // FIXME: Armv8.6 makes the following extensions mandatory:
283   // - __ARM_FEATURE_BF16
284   // - __ARM_FEATURE_MATMUL_INT8
285   // Handle them here.
286   getTargetDefinesARMV85A(Opts, Builder);
287 }
288 
289 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
290                                                 MacroBuilder &Builder) const {
291   // Also include the Armv8.6 defines
292   getTargetDefinesARMV86A(Opts, Builder);
293 }
294 
295 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
296                                                 MacroBuilder &Builder) const {
297   // Also include the Armv8.7 defines
298   getTargetDefinesARMV87A(Opts, Builder);
299 }
300 
301 void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
302                                                 MacroBuilder &Builder) const {
303   // Also include the Armv8.8 defines
304   getTargetDefinesARMV88A(Opts, Builder);
305 }
306 
307 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
308                                                MacroBuilder &Builder) const {
309   // Armv9-A maps to Armv8.5-A
310   getTargetDefinesARMV85A(Opts, Builder);
311 }
312 
313 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
314                                                 MacroBuilder &Builder) const {
315   // Armv9.1-A maps to Armv8.6-A
316   getTargetDefinesARMV86A(Opts, Builder);
317 }
318 
319 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
320                                                 MacroBuilder &Builder) const {
321   // Armv9.2-A maps to Armv8.7-A
322   getTargetDefinesARMV87A(Opts, Builder);
323 }
324 
325 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
326                                                 MacroBuilder &Builder) const {
327   // Armv9.3-A maps to Armv8.8-A
328   getTargetDefinesARMV88A(Opts, Builder);
329 }
330 
331 void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
332                                                 MacroBuilder &Builder) const {
333   // Armv9.4-A maps to Armv8.9-A
334   getTargetDefinesARMV89A(Opts, Builder);
335 }
336 
337 void AArch64TargetInfo::getTargetDefinesARMV95A(const LangOptions &Opts,
338                                                 MacroBuilder &Builder) const {
339   // Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
340   getTargetDefinesARMV94A(Opts, Builder);
341 }
342 
343 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
344                                          MacroBuilder &Builder) const {
345   // Target identification.
346   if (getTriple().isWindowsArm64EC()) {
347     // Define the same set of macros as would be defined on x86_64 to ensure that
348     // ARM64EC datatype layouts match those of x86_64 compiled code
349     Builder.defineMacro("__amd64__");
350     Builder.defineMacro("__amd64");
351     Builder.defineMacro("__x86_64");
352     Builder.defineMacro("__x86_64__");
353     Builder.defineMacro("__arm64ec__");
354   } else {
355     Builder.defineMacro("__aarch64__");
356   }
357 
358   // Inline assembly supports AArch64 flag outputs.
359   Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
360 
361   std::string CodeModel = getTargetOpts().CodeModel;
362   if (CodeModel == "default")
363     CodeModel = "small";
364   for (char &c : CodeModel)
365     c = toupper(c);
366   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
367 
368   // ACLE predefines. Many can only have one possible value on v8 AArch64.
369   Builder.defineMacro("__ARM_ACLE", "200");
370   Builder.defineMacro("__ARM_ARCH",
371                       std::to_string(ArchInfo->Version.getMajor()));
372   Builder.defineMacro("__ARM_ARCH_PROFILE",
373                       std::string("'") + (char)ArchInfo->Profile + "'");
374 
375   Builder.defineMacro("__ARM_64BIT_STATE", "1");
376   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
377   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
378 
379   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
380   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
381   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
382   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
383   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
384   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
385   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
386 
387   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
388 
389   // 0xe implies support for half, single and double precision operations.
390   if (FPU & FPUMode)
391     Builder.defineMacro("__ARM_FP", "0xE");
392 
393   // PCS specifies this for SysV variants, which is all we support. Other ABIs
394   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
395   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
396   Builder.defineMacro("__ARM_FP16_ARGS", "1");
397 
398   if (Opts.UnsafeFPMath)
399     Builder.defineMacro("__ARM_FP_FAST", "1");
400 
401   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
402                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
403 
404   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
405 
406   if (FPU & NeonMode) {
407     Builder.defineMacro("__ARM_NEON", "1");
408     // 64-bit NEON supports half, single and double precision operations.
409     Builder.defineMacro("__ARM_NEON_FP", "0xE");
410   }
411 
412   if (FPU & SveMode)
413     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
414 
415   if ((FPU & NeonMode) && (FPU & SveMode))
416     Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
417 
418   if (HasSVE2)
419     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
420 
421   if (HasSVE2 && HasSVE2AES)
422     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
423 
424   if (HasSVE2 && HasSVE2BitPerm)
425     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
426 
427   if (HasSVE2 && HasSVE2SHA3)
428     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
429 
430   if (HasSVE2 && HasSVE2SM4)
431     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
432 
433   if (HasCRC)
434     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
435 
436   if (HasRCPC3)
437     Builder.defineMacro("__ARM_FEATURE_RCPC", "3");
438   else if (HasRCPC)
439     Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
440 
441   if (HasFMV)
442     Builder.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
443 
444   // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
445   // macros for AES, SHA2, SHA3 and SM4
446   if (HasAES && HasSHA2)
447     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
448 
449   if (HasAES)
450     Builder.defineMacro("__ARM_FEATURE_AES", "1");
451 
452   if (HasSHA2)
453     Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
454 
455   if (HasSHA3) {
456     Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
457     Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
458   }
459 
460   if (HasSM4) {
461     Builder.defineMacro("__ARM_FEATURE_SM3", "1");
462     Builder.defineMacro("__ARM_FEATURE_SM4", "1");
463   }
464 
465   if (HasPAuth)
466     Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
467 
468   if (HasUnaligned)
469     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
470 
471   if ((FPU & NeonMode) && HasFullFP16)
472     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
473   if (HasFullFP16)
474    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
475 
476   if (HasDotProd)
477     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
478 
479   if (HasMTE)
480     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
481 
482   if (HasTME)
483     Builder.defineMacro("__ARM_FEATURE_TME", "1");
484 
485   if (HasMatMul)
486     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
487 
488   if (HasLSE)
489     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
490 
491   if (HasBFloat16) {
492     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
493     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
494     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
495     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
496   }
497 
498   if ((FPU & SveMode) && HasBFloat16) {
499     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
500   }
501 
502   if ((FPU & SveMode) && HasMatmulFP64)
503     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
504 
505   if ((FPU & SveMode) && HasMatmulFP32)
506     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
507 
508   if ((FPU & SveMode) && HasMatMul)
509     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
510 
511   if ((FPU & NeonMode) && HasFP16FML)
512     Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
513 
514   if (Opts.hasSignReturnAddress()) {
515     // Bitmask:
516     // 0: Protection using the A key
517     // 1: Protection using the B key
518     // 2: Protection including leaf functions
519     unsigned Value = 0;
520 
521     if (Opts.isSignReturnAddressWithAKey())
522       Value |= (1 << 0);
523     else
524       Value |= (1 << 1);
525 
526     if (Opts.isSignReturnAddressScopeAll())
527       Value |= (1 << 2);
528 
529     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
530   }
531 
532   if (Opts.BranchTargetEnforcement)
533     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
534 
535   if (HasLS64)
536     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
537 
538   if (HasRandGen)
539     Builder.defineMacro("__ARM_FEATURE_RNG", "1");
540 
541   if (HasMOPS)
542     Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
543 
544   if (HasD128)
545     Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
546 
547   if (*ArchInfo == llvm::AArch64::ARMV8_1A)
548     getTargetDefinesARMV81A(Opts, Builder);
549   else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
550     getTargetDefinesARMV82A(Opts, Builder);
551   else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
552     getTargetDefinesARMV83A(Opts, Builder);
553   else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
554     getTargetDefinesARMV84A(Opts, Builder);
555   else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
556     getTargetDefinesARMV85A(Opts, Builder);
557   else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
558     getTargetDefinesARMV86A(Opts, Builder);
559   else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
560     getTargetDefinesARMV87A(Opts, Builder);
561   else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
562     getTargetDefinesARMV88A(Opts, Builder);
563   else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
564     getTargetDefinesARMV89A(Opts, Builder);
565   else if (*ArchInfo == llvm::AArch64::ARMV9A)
566     getTargetDefinesARMV9A(Opts, Builder);
567   else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
568     getTargetDefinesARMV91A(Opts, Builder);
569   else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
570     getTargetDefinesARMV92A(Opts, Builder);
571   else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
572     getTargetDefinesARMV93A(Opts, Builder);
573   else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
574     getTargetDefinesARMV94A(Opts, Builder);
575   else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
576     getTargetDefinesARMV95A(Opts, Builder);
577 
578   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
579   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
580   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
581   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
582   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
583   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
584 
585   // Allow detection of fast FMA support.
586   Builder.defineMacro("__FP_FAST_FMA", "1");
587   Builder.defineMacro("__FP_FAST_FMAF", "1");
588 
589   // C/C++ operators work on both VLS and VLA SVE types
590   if (FPU & SveMode)
591     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
592 
593   if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
594     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
595   }
596 }
597 
598 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
599   return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
600                                          Builtin::FirstTSBuiltin);
601 }
602 
603 std::optional<std::pair<unsigned, unsigned>>
604 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
605   if (LangOpts.VScaleMin || LangOpts.VScaleMax)
606     return std::pair<unsigned, unsigned>(
607         LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
608 
609   if (hasFeature("sve"))
610     return std::pair<unsigned, unsigned>(1, 16);
611 
612   return std::nullopt;
613 }
614 
615 unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
616   if (Name == "default")
617     return 0;
618   for (const auto &E : llvm::AArch64::Extensions)
619     if (Name == E.Name)
620       return E.FmvPriority;
621   return 0;
622 }
623 
624 unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
625   // Take the maximum priority as per feature cost, so more features win.
626   return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
627 }
628 
629 bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
630   auto F = llvm::find_if(llvm::AArch64::Extensions, [&](const auto &E) {
631     return Name == E.Name && !E.DependentFeatures.empty();
632   });
633   return F != std::end(llvm::AArch64::Extensions);
634 }
635 
636 StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
637   auto F = llvm::find_if(llvm::AArch64::Extensions,
638                          [&](const auto &E) { return Name == E.Name; });
639   return F != std::end(llvm::AArch64::Extensions) ? F->DependentFeatures
640                                                   : StringRef();
641 }
642 
643 bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
644   for (const auto &E : llvm::AArch64::Extensions)
645     if (FeatureStr == E.Name)
646       return true;
647   return false;
648 }
649 
650 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
651   return llvm::StringSwitch<bool>(Feature)
652       .Cases("aarch64", "arm64", "arm", true)
653       .Case("fmv", HasFMV)
654       .Cases("neon", "fp", "simd", FPU & NeonMode)
655       .Case("jscvt", HasJSCVT)
656       .Case("fcma", HasFCMA)
657       .Case("rng", HasRandGen)
658       .Case("flagm", HasFlagM)
659       .Case("flagm2", HasAlternativeNZCV)
660       .Case("fp16fml", HasFP16FML)
661       .Case("dotprod", HasDotProd)
662       .Case("sm4", HasSM4)
663       .Case("rdm", HasRDM)
664       .Case("lse", HasLSE)
665       .Case("crc", HasCRC)
666       .Case("sha2", HasSHA2)
667       .Case("sha3", HasSHA3)
668       .Cases("aes", "pmull", HasAES)
669       .Cases("fp16", "fullfp16", HasFullFP16)
670       .Case("dit", HasDIT)
671       .Case("dpb", HasCCPP)
672       .Case("dpb2", HasCCDP)
673       .Case("rcpc", HasRCPC)
674       .Case("frintts", HasFRInt3264)
675       .Case("i8mm", HasMatMul)
676       .Case("bf16", HasBFloat16)
677       .Case("sve", FPU & SveMode)
678       .Case("sve-bf16", FPU & SveMode && HasBFloat16)
679       .Case("sve-i8mm", FPU & SveMode && HasMatMul)
680       .Case("f32mm", FPU & SveMode && HasMatmulFP32)
681       .Case("f64mm", FPU & SveMode && HasMatmulFP64)
682       .Case("sve2", FPU & SveMode && HasSVE2)
683       .Case("sve2-pmull128", FPU & SveMode && HasSVE2AES)
684       .Case("sve2-bitperm", FPU & SveMode && HasSVE2BitPerm)
685       .Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
686       .Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
687       .Case("sme", HasSME)
688       .Case("sme-f64f64", HasSMEF64F64)
689       .Case("sme-i16i64", HasSMEI16I64)
690       .Case("sme-fa64", HasSMEFA64)
691       .Cases("memtag", "memtag2", HasMTE)
692       .Case("sb", HasSB)
693       .Case("predres", HasPredRes)
694       .Cases("ssbs", "ssbs2", HasSSBS)
695       .Case("bti", HasBTI)
696       .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
697       .Case("wfxt", HasWFxT)
698       .Case("rcpc3", HasRCPC3)
699       .Default(false);
700 }
701 
702 void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
703                                           StringRef Name, bool Enabled) const {
704   Features[Name] = Enabled;
705   // If the feature is an architecture feature (like v8.2a), add all previous
706   // architecture versions and any dependant target features.
707   const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
708       llvm::AArch64::ArchInfo::findBySubArch(Name);
709 
710   if (!ArchInfo)
711     return; // Not an architecture, nothing more to do.
712 
713   // Disabling an architecture feature does not affect dependent features
714   if (!Enabled)
715     return;
716 
717   for (const auto *OtherArch : llvm::AArch64::ArchInfos)
718     if (ArchInfo->implies(*OtherArch))
719       Features[OtherArch->getSubArch()] = true;
720 
721   // Set any features implied by the architecture
722   std::vector<StringRef> CPUFeats;
723   if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
724     for (auto F : CPUFeats) {
725       assert(F[0] == '+' && "Expected + in target feature!");
726       Features[F.drop_front(1)] = true;
727     }
728   }
729 }
730 
731 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
732                                              DiagnosticsEngine &Diags) {
733   for (const auto &Feature : Features) {
734     if (Feature == "-fp-armv8")
735       HasNoFP = true;
736     if (Feature == "-neon")
737       HasNoNeon = true;
738     if (Feature == "-sve")
739       HasNoSVE = true;
740 
741     if (Feature == "+neon" || Feature == "+fp-armv8")
742       FPU |= NeonMode;
743     if (Feature == "+jscvt") {
744       HasJSCVT = true;
745       FPU |= NeonMode;
746     }
747     if (Feature == "+fcma") {
748       HasFCMA = true;
749       FPU |= NeonMode;
750     }
751 
752     if (Feature == "+sve") {
753       FPU |= NeonMode;
754       FPU |= SveMode;
755       HasFullFP16 = true;
756     }
757     if (Feature == "+sve2") {
758       FPU |= NeonMode;
759       FPU |= SveMode;
760       HasFullFP16 = true;
761       HasSVE2 = true;
762     }
763     if (Feature == "+sve2-aes") {
764       FPU |= NeonMode;
765       FPU |= SveMode;
766       HasFullFP16 = true;
767       HasSVE2 = true;
768       HasSVE2AES = true;
769     }
770     if (Feature == "+sve2-sha3") {
771       FPU |= NeonMode;
772       FPU |= SveMode;
773       HasFullFP16 = true;
774       HasSVE2 = true;
775       HasSVE2SHA3 = true;
776     }
777     if (Feature == "+sve2-sm4") {
778       FPU |= NeonMode;
779       FPU |= SveMode;
780       HasFullFP16 = true;
781       HasSVE2 = true;
782       HasSVE2SM4 = true;
783     }
784     if (Feature == "+sve2-bitperm") {
785       FPU |= NeonMode;
786       FPU |= SveMode;
787       HasFullFP16 = true;
788       HasSVE2 = true;
789       HasSVE2BitPerm = true;
790     }
791     if (Feature == "+f32mm") {
792       FPU |= NeonMode;
793       FPU |= SveMode;
794       HasFullFP16 = true;
795       HasMatmulFP32 = true;
796     }
797     if (Feature == "+f64mm") {
798       FPU |= NeonMode;
799       FPU |= SveMode;
800       HasFullFP16 = true;
801       HasMatmulFP64 = true;
802     }
803     if (Feature == "+sme") {
804       HasSME = true;
805       HasBFloat16 = true;
806       HasFullFP16 = true;
807     }
808     if (Feature == "+sme-f64f64") {
809       HasSME = true;
810       HasSMEF64F64 = true;
811       HasBFloat16 = true;
812       HasFullFP16 = true;
813     }
814     if (Feature == "+sme-i16i64") {
815       HasSME = true;
816       HasSMEI16I64 = true;
817       HasBFloat16 = true;
818       HasFullFP16 = true;
819     }
820     if (Feature == "+sme-fa64") {
821       FPU |= NeonMode;
822       FPU |= SveMode;
823       HasSME = true;
824       HasSVE2 = true;
825       HasSMEFA64 = true;
826     }
827     if (Feature == "+sb")
828       HasSB = true;
829     if (Feature == "+predres")
830       HasPredRes = true;
831     if (Feature == "+ssbs")
832       HasSSBS = true;
833     if (Feature == "+bti")
834       HasBTI = true;
835     if (Feature == "+wfxt")
836       HasWFxT = true;
837     if (Feature == "-fmv")
838       HasFMV = false;
839     if (Feature == "+crc")
840       HasCRC = true;
841     if (Feature == "+rcpc")
842       HasRCPC = true;
843     if (Feature == "+aes") {
844       FPU |= NeonMode;
845       HasAES = true;
846     }
847     if (Feature == "+sha2") {
848       FPU |= NeonMode;
849       HasSHA2 = true;
850     }
851     if (Feature == "+sha3") {
852       FPU |= NeonMode;
853       HasSHA2 = true;
854       HasSHA3 = true;
855     }
856     if (Feature == "+rdm") {
857       FPU |= NeonMode;
858       HasRDM = true;
859     }
860     if (Feature == "+dit")
861       HasDIT = true;
862     if (Feature == "+cccp")
863       HasCCPP = true;
864     if (Feature == "+ccdp") {
865       HasCCPP = true;
866       HasCCDP = true;
867     }
868     if (Feature == "+fptoint")
869       HasFRInt3264 = true;
870     if (Feature == "+sm4") {
871       FPU |= NeonMode;
872       HasSM4 = true;
873     }
874     if (Feature == "+strict-align")
875       HasUnaligned = false;
876     // All predecessor archs are added but select the latest one for ArchKind.
877     if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
878       ArchInfo = &llvm::AArch64::ARMV8A;
879     if (Feature == "+v8.1a" &&
880         ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
881       ArchInfo = &llvm::AArch64::ARMV8_1A;
882     if (Feature == "+v8.2a" &&
883         ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
884       ArchInfo = &llvm::AArch64::ARMV8_2A;
885     if (Feature == "+v8.3a" &&
886         ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
887       ArchInfo = &llvm::AArch64::ARMV8_3A;
888     if (Feature == "+v8.4a" &&
889         ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
890       ArchInfo = &llvm::AArch64::ARMV8_4A;
891     if (Feature == "+v8.5a" &&
892         ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
893       ArchInfo = &llvm::AArch64::ARMV8_5A;
894     if (Feature == "+v8.6a" &&
895         ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
896       ArchInfo = &llvm::AArch64::ARMV8_6A;
897     if (Feature == "+v8.7a" &&
898         ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
899       ArchInfo = &llvm::AArch64::ARMV8_7A;
900     if (Feature == "+v8.8a" &&
901         ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
902       ArchInfo = &llvm::AArch64::ARMV8_8A;
903     if (Feature == "+v8.9a" &&
904         ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
905       ArchInfo = &llvm::AArch64::ARMV8_9A;
906     if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
907       ArchInfo = &llvm::AArch64::ARMV9A;
908     if (Feature == "+v9.1a" &&
909         ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
910       ArchInfo = &llvm::AArch64::ARMV9_1A;
911     if (Feature == "+v9.2a" &&
912         ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
913       ArchInfo = &llvm::AArch64::ARMV9_2A;
914     if (Feature == "+v9.3a" &&
915         ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
916       ArchInfo = &llvm::AArch64::ARMV9_3A;
917     if (Feature == "+v9.4a" &&
918         ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
919       ArchInfo = &llvm::AArch64::ARMV9_4A;
920     if (Feature == "+v9.5a" &&
921         ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
922       ArchInfo = &llvm::AArch64::ARMV9_5A;
923     if (Feature == "+v8r")
924       ArchInfo = &llvm::AArch64::ARMV8R;
925     if (Feature == "+fullfp16") {
926       FPU |= NeonMode;
927       HasFullFP16 = true;
928     }
929     if (Feature == "+dotprod") {
930       FPU |= NeonMode;
931       HasDotProd = true;
932     }
933     if (Feature == "+fp16fml") {
934       FPU |= NeonMode;
935       HasFullFP16 = true;
936       HasFP16FML = true;
937     }
938     if (Feature == "+mte")
939       HasMTE = true;
940     if (Feature == "+tme")
941       HasTME = true;
942     if (Feature == "+pauth")
943       HasPAuth = true;
944     if (Feature == "+i8mm")
945       HasMatMul = true;
946     if (Feature == "+bf16")
947       HasBFloat16 = true;
948     if (Feature == "+lse")
949       HasLSE = true;
950     if (Feature == "+ls64")
951       HasLS64 = true;
952     if (Feature == "+rand")
953       HasRandGen = true;
954     if (Feature == "+flagm")
955       HasFlagM = true;
956     if (Feature == "+altnzcv") {
957       HasFlagM = true;
958       HasAlternativeNZCV = true;
959     }
960     if (Feature == "+mops")
961       HasMOPS = true;
962     if (Feature == "+d128")
963       HasD128 = true;
964     if (Feature == "+gcs")
965       HasGCS = true;
966     if (Feature == "+rcpc3")
967       HasRCPC3 = true;
968   }
969 
970   // Check features that are manually disabled by command line options.
971   // This needs to be checked after architecture-related features are handled,
972   // making sure they are properly disabled when required.
973   for (const auto &Feature : Features) {
974     if (Feature == "-d128")
975       HasD128 = false;
976   }
977 
978   setDataLayout();
979   setArchFeatures();
980 
981   if (HasNoFP) {
982     FPU &= ~FPUMode;
983     FPU &= ~NeonMode;
984     FPU &= ~SveMode;
985   }
986   if (HasNoNeon) {
987     FPU &= ~NeonMode;
988     FPU &= ~SveMode;
989   }
990   if (HasNoSVE)
991     FPU &= ~SveMode;
992 
993   return true;
994 }
995 
996 bool AArch64TargetInfo::initFeatureMap(
997     llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
998     const std::vector<std::string> &FeaturesVec) const {
999   std::vector<std::string> UpdatedFeaturesVec;
1000   // Parse the CPU and add any implied features.
1001   std::optional<llvm::AArch64::CpuInfo> CpuInfo = llvm::AArch64::parseCpu(CPU);
1002   if (CpuInfo) {
1003     auto Exts = CpuInfo->getImpliedExtensions();
1004     std::vector<StringRef> CPUFeats;
1005     llvm::AArch64::getExtensionFeatures(Exts, CPUFeats);
1006     for (auto F : CPUFeats) {
1007       assert((F[0] == '+' || F[0] == '-') && "Expected +/- in target feature!");
1008       UpdatedFeaturesVec.push_back(F.str());
1009     }
1010   }
1011 
1012   // Process target and dependent features. This is done in two loops collecting
1013   // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
1014   // add target '+/-'features that can later disable some of features added on
1015   // the first loop. Function Multi Versioning features begin with '?'.
1016   for (const auto &Feature : FeaturesVec)
1017     if (((Feature[0] == '?' || Feature[0] == '+')) &&
1018         AArch64TargetInfo::doesFeatureAffectCodeGen(Feature.substr(1))) {
1019       StringRef DepFeatures =
1020           AArch64TargetInfo::getFeatureDependencies(Feature.substr(1));
1021       SmallVector<StringRef, 1> AttrFeatures;
1022       DepFeatures.split(AttrFeatures, ",");
1023       for (auto F : AttrFeatures)
1024         UpdatedFeaturesVec.push_back(F.str());
1025     }
1026   for (const auto &Feature : FeaturesVec)
1027     if (Feature[0] != '?') {
1028       std::string UpdatedFeature = Feature;
1029       if (Feature[0] == '+') {
1030         std::optional<llvm::AArch64::ExtensionInfo> Extension =
1031           llvm::AArch64::parseArchExtension(Feature.substr(1));
1032         if (Extension)
1033           UpdatedFeature = Extension->Feature.str();
1034       }
1035       UpdatedFeaturesVec.push_back(UpdatedFeature);
1036     }
1037 
1038   return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
1039 }
1040 
1041 // Parse AArch64 Target attributes, which are a comma separated list of:
1042 //  "arch=<arch>" - parsed to features as per -march=..
1043 //  "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1044 //  "tune=<cpu>" - TuneCPU set to <cpu>
1045 //  "feature", "no-feature" - Add (or remove) feature.
1046 //  "+feature", "+nofeature" - Add (or remove) feature.
1047 ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1048   ParsedTargetAttr Ret;
1049   if (Features == "default")
1050     return Ret;
1051   SmallVector<StringRef, 1> AttrFeatures;
1052   Features.split(AttrFeatures, ",");
1053   bool FoundArch = false;
1054 
1055   auto SplitAndAddFeatures = [](StringRef FeatString,
1056                                 std::vector<std::string> &Features) {
1057     SmallVector<StringRef, 8> SplitFeatures;
1058     FeatString.split(SplitFeatures, StringRef("+"), -1, false);
1059     for (StringRef Feature : SplitFeatures) {
1060       StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1061       if (!FeatureName.empty())
1062         Features.push_back(FeatureName.str());
1063       else
1064         // Pushing the original feature string to give a sema error later on
1065         // when they get checked.
1066         if (Feature.starts_with("no"))
1067           Features.push_back("-" + Feature.drop_front(2).str());
1068         else
1069           Features.push_back("+" + Feature.str());
1070     }
1071   };
1072 
1073   for (auto &Feature : AttrFeatures) {
1074     Feature = Feature.trim();
1075     if (Feature.starts_with("fpmath="))
1076       continue;
1077 
1078     if (Feature.starts_with("branch-protection=")) {
1079       Ret.BranchProtection = Feature.split('=').second.trim();
1080       continue;
1081     }
1082 
1083     if (Feature.starts_with("arch=")) {
1084       if (FoundArch)
1085         Ret.Duplicate = "arch=";
1086       FoundArch = true;
1087       std::pair<StringRef, StringRef> Split =
1088           Feature.split("=").second.trim().split("+");
1089       const std::optional<llvm::AArch64::ArchInfo> AI =
1090           llvm::AArch64::parseArch(Split.first);
1091 
1092       // Parse the architecture version, adding the required features to
1093       // Ret.Features.
1094       if (!AI)
1095         continue;
1096       Ret.Features.push_back(AI->ArchFeature.str());
1097       // Add any extra features, after the +
1098       SplitAndAddFeatures(Split.second, Ret.Features);
1099     } else if (Feature.starts_with("cpu=")) {
1100       if (!Ret.CPU.empty())
1101         Ret.Duplicate = "cpu=";
1102       else {
1103         // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1104         // "+feat" features.
1105         std::pair<StringRef, StringRef> Split =
1106             Feature.split("=").second.trim().split("+");
1107         Ret.CPU = Split.first;
1108         SplitAndAddFeatures(Split.second, Ret.Features);
1109       }
1110     } else if (Feature.starts_with("tune=")) {
1111       if (!Ret.Tune.empty())
1112         Ret.Duplicate = "tune=";
1113       else
1114         Ret.Tune = Feature.split("=").second.trim();
1115     } else if (Feature.starts_with("+")) {
1116       SplitAndAddFeatures(Feature, Ret.Features);
1117     } else if (Feature.starts_with("no-")) {
1118       StringRef FeatureName =
1119           llvm::AArch64::getArchExtFeature(Feature.split("-").second);
1120       if (!FeatureName.empty())
1121         Ret.Features.push_back("-" + FeatureName.drop_front(1).str());
1122       else
1123         Ret.Features.push_back("-" + Feature.split("-").second.str());
1124     } else {
1125       // Try parsing the string to the internal target feature name. If it is
1126       // invalid, add the original string (which could already be an internal
1127       // name). These should be checked later by isValidFeatureName.
1128       StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1129       if (!FeatureName.empty())
1130         Ret.Features.push_back(FeatureName.str());
1131       else
1132         Ret.Features.push_back("+" + Feature.str());
1133     }
1134   }
1135   return Ret;
1136 }
1137 
1138 bool AArch64TargetInfo::hasBFloat16Type() const {
1139   return true;
1140 }
1141 
1142 TargetInfo::CallingConvCheckResult
1143 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1144   switch (CC) {
1145   case CC_C:
1146   case CC_Swift:
1147   case CC_SwiftAsync:
1148   case CC_PreserveMost:
1149   case CC_PreserveAll:
1150   case CC_OpenCLKernel:
1151   case CC_AArch64VectorCall:
1152   case CC_AArch64SVEPCS:
1153   case CC_Win64:
1154     return CCCR_OK;
1155   default:
1156     return CCCR_Warning;
1157   }
1158 }
1159 
1160 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1161 
1162 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1163   return TargetInfo::AArch64ABIBuiltinVaList;
1164 }
1165 
1166 const char *const AArch64TargetInfo::GCCRegNames[] = {
1167     // 32-bit Integer registers
1168     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1169     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1170     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1171 
1172     // 64-bit Integer registers
1173     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1174     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1175     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1176 
1177     // 32-bit floating point regsisters
1178     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1179     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1180     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1181 
1182     // 64-bit floating point regsisters
1183     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1184     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1185     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1186 
1187     // Neon vector registers
1188     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1189     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1190     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1191 
1192     // SVE vector registers
1193     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
1194     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1195     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1196 
1197     // SVE predicate registers
1198     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
1199     "p11", "p12", "p13", "p14", "p15",
1200 
1201     // SVE predicate-as-counter registers
1202     "pn0",  "pn1",  "pn2",  "pn3",  "pn4",  "pn5",  "pn6",  "pn7",  "pn8",
1203     "pn9",  "pn10", "pn11", "pn12", "pn13", "pn14", "pn15"
1204 };
1205 
1206 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1207   return llvm::ArrayRef(GCCRegNames);
1208 }
1209 
1210 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1211     {{"w31"}, "wsp"},
1212     {{"x31"}, "sp"},
1213     // GCC rN registers are aliases of xN registers.
1214     {{"r0"}, "x0"},
1215     {{"r1"}, "x1"},
1216     {{"r2"}, "x2"},
1217     {{"r3"}, "x3"},
1218     {{"r4"}, "x4"},
1219     {{"r5"}, "x5"},
1220     {{"r6"}, "x6"},
1221     {{"r7"}, "x7"},
1222     {{"r8"}, "x8"},
1223     {{"r9"}, "x9"},
1224     {{"r10"}, "x10"},
1225     {{"r11"}, "x11"},
1226     {{"r12"}, "x12"},
1227     {{"r13"}, "x13"},
1228     {{"r14"}, "x14"},
1229     {{"r15"}, "x15"},
1230     {{"r16"}, "x16"},
1231     {{"r17"}, "x17"},
1232     {{"r18"}, "x18"},
1233     {{"r19"}, "x19"},
1234     {{"r20"}, "x20"},
1235     {{"r21"}, "x21"},
1236     {{"r22"}, "x22"},
1237     {{"r23"}, "x23"},
1238     {{"r24"}, "x24"},
1239     {{"r25"}, "x25"},
1240     {{"r26"}, "x26"},
1241     {{"r27"}, "x27"},
1242     {{"r28"}, "x28"},
1243     {{"r29", "x29"}, "fp"},
1244     {{"r30", "x30"}, "lr"},
1245     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1246     // don't want to substitute one of these for a different-sized one.
1247 };
1248 
1249 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1250   return llvm::ArrayRef(GCCRegAliases);
1251 }
1252 
1253 // Returns the length of cc constraint.
1254 static unsigned matchAsmCCConstraint(const char *Name) {
1255   constexpr unsigned len = 5;
1256   auto RV = llvm::StringSwitch<unsigned>(Name)
1257                 .Case("@cceq", len)
1258                 .Case("@ccne", len)
1259                 .Case("@cchs", len)
1260                 .Case("@cccs", len)
1261                 .Case("@cccc", len)
1262                 .Case("@cclo", len)
1263                 .Case("@ccmi", len)
1264                 .Case("@ccpl", len)
1265                 .Case("@ccvs", len)
1266                 .Case("@ccvc", len)
1267                 .Case("@cchi", len)
1268                 .Case("@ccls", len)
1269                 .Case("@ccge", len)
1270                 .Case("@cclt", len)
1271                 .Case("@ccgt", len)
1272                 .Case("@ccle", len)
1273                 .Default(0);
1274   return RV;
1275 }
1276 
1277 std::string
1278 AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1279   std::string R;
1280   switch (*Constraint) {
1281   case 'U': // Three-character constraint; add "@3" hint for later parsing.
1282     R = std::string("@3") + std::string(Constraint, 3);
1283     Constraint += 2;
1284     break;
1285   case '@':
1286     if (const unsigned Len = matchAsmCCConstraint(Constraint)) {
1287       std::string Converted = "{" + std::string(Constraint, Len) + "}";
1288       Constraint += Len - 1;
1289       return Converted;
1290     }
1291     return std::string(1, *Constraint);
1292   default:
1293     R = TargetInfo::convertConstraint(Constraint);
1294     break;
1295   }
1296   return R;
1297 }
1298 
1299 bool AArch64TargetInfo::validateAsmConstraint(
1300     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1301   switch (*Name) {
1302   default:
1303     return false;
1304   case 'w': // Floating point and SIMD registers (V0-V31)
1305     Info.setAllowsRegister();
1306     return true;
1307   case 'I': // Constant that can be used with an ADD instruction
1308   case 'J': // Constant that can be used with a SUB instruction
1309   case 'K': // Constant that can be used with a 32-bit logical instruction
1310   case 'L': // Constant that can be used with a 64-bit logical instruction
1311   case 'M': // Constant that can be used as a 32-bit MOV immediate
1312   case 'N': // Constant that can be used as a 64-bit MOV immediate
1313   case 'Y': // Floating point constant zero
1314   case 'Z': // Integer constant zero
1315     return true;
1316   case 'Q': // A memory reference with base register and no offset
1317     Info.setAllowsMemory();
1318     return true;
1319   case 'S': // A symbolic address
1320     Info.setAllowsRegister();
1321     return true;
1322   case 'U':
1323     if (Name[1] == 'p' &&
1324         (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
1325       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
1326       Info.setAllowsRegister();
1327       Name += 2;
1328       return true;
1329     }
1330     if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
1331       // Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
1332       Info.setAllowsRegister();
1333       Name += 2;
1334       return true;
1335     }
1336     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1337     // Utf: A memory address suitable for ldp/stp in TF mode.
1338     // Usa: An absolute symbolic address.
1339     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1340 
1341     // Better to return an error saying that it's an unrecognised constraint
1342     // even if this is a valid constraint in gcc.
1343     return false;
1344   case 'z': // Zero register, wzr or xzr
1345     Info.setAllowsRegister();
1346     return true;
1347   case 'x': // Floating point and SIMD registers (V0-V15)
1348     Info.setAllowsRegister();
1349     return true;
1350   case 'y': // SVE registers (V0-V7)
1351     Info.setAllowsRegister();
1352     return true;
1353   case '@':
1354     // CC condition
1355     if (const unsigned Len = matchAsmCCConstraint(Name)) {
1356       Name += Len - 1;
1357       Info.setAllowsRegister();
1358       return true;
1359     }
1360   }
1361   return false;
1362 }
1363 
1364 bool AArch64TargetInfo::validateConstraintModifier(
1365     StringRef Constraint, char Modifier, unsigned Size,
1366     std::string &SuggestedModifier) const {
1367   // Strip off constraint modifiers.
1368   Constraint = Constraint.ltrim("=+&");
1369 
1370   switch (Constraint[0]) {
1371   default:
1372     return true;
1373   case 'z':
1374   case 'r': {
1375     switch (Modifier) {
1376     case 'x':
1377     case 'w':
1378       // For now assume that the person knows what they're
1379       // doing with the modifier.
1380       return true;
1381     default:
1382       // By default an 'r' constraint will be in the 'x'
1383       // registers.
1384       if (Size == 64)
1385         return true;
1386 
1387       if (Size == 512)
1388         return HasLS64;
1389 
1390       SuggestedModifier = "w";
1391       return false;
1392     }
1393   }
1394   }
1395 }
1396 
1397 std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1398 
1399 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1400   if (RegNo == 0)
1401     return 0;
1402   if (RegNo == 1)
1403     return 1;
1404   return -1;
1405 }
1406 
1407 bool AArch64TargetInfo::hasInt128Type() const { return true; }
1408 
1409 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1410                                          const TargetOptions &Opts)
1411     : AArch64TargetInfo(Triple, Opts) {}
1412 
1413 void AArch64leTargetInfo::setDataLayout() {
1414   if (getTriple().isOSBinFormatMachO()) {
1415     if(getTriple().isArch32Bit())
1416       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
1417     else
1418       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
1419   } else
1420     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1421 }
1422 
1423 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1424                                            MacroBuilder &Builder) const {
1425   Builder.defineMacro("__AARCH64EL__");
1426   AArch64TargetInfo::getTargetDefines(Opts, Builder);
1427 }
1428 
1429 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1430                                          const TargetOptions &Opts)
1431     : AArch64TargetInfo(Triple, Opts) {}
1432 
1433 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1434                                            MacroBuilder &Builder) const {
1435   Builder.defineMacro("__AARCH64EB__");
1436   Builder.defineMacro("__AARCH_BIG_ENDIAN");
1437   Builder.defineMacro("__ARM_BIG_ENDIAN");
1438   AArch64TargetInfo::getTargetDefines(Opts, Builder);
1439 }
1440 
1441 void AArch64beTargetInfo::setDataLayout() {
1442   assert(!getTriple().isOSBinFormatMachO());
1443   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1444 }
1445 
1446 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1447                                                const TargetOptions &Opts)
1448     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1449 
1450   // This is an LLP64 platform.
1451   // int:4, long:4, long long:8, long double:8.
1452   IntWidth = IntAlign = 32;
1453   LongWidth = LongAlign = 32;
1454   DoubleAlign = LongLongAlign = 64;
1455   LongDoubleWidth = LongDoubleAlign = 64;
1456   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1457   IntMaxType = SignedLongLong;
1458   Int64Type = SignedLongLong;
1459   SizeType = UnsignedLongLong;
1460   PtrDiffType = SignedLongLong;
1461   IntPtrType = SignedLongLong;
1462 }
1463 
1464 void WindowsARM64TargetInfo::setDataLayout() {
1465   resetDataLayout(Triple.isOSBinFormatMachO()
1466                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
1467                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
1468                   Triple.isOSBinFormatMachO() ? "_" : "");
1469 }
1470 
1471 TargetInfo::BuiltinVaListKind
1472 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1473   return TargetInfo::CharPtrBuiltinVaList;
1474 }
1475 
1476 TargetInfo::CallingConvCheckResult
1477 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1478   switch (CC) {
1479   case CC_X86StdCall:
1480   case CC_X86ThisCall:
1481   case CC_X86FastCall:
1482   case CC_X86VectorCall:
1483     return CCCR_Ignore;
1484   case CC_C:
1485   case CC_OpenCLKernel:
1486   case CC_PreserveMost:
1487   case CC_PreserveAll:
1488   case CC_Swift:
1489   case CC_SwiftAsync:
1490   case CC_Win64:
1491     return CCCR_OK;
1492   default:
1493     return CCCR_Warning;
1494   }
1495 }
1496 
1497 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1498                                                    const TargetOptions &Opts)
1499     : WindowsARM64TargetInfo(Triple, Opts) {
1500   TheCXXABI.set(TargetCXXABI::Microsoft);
1501 }
1502 
1503 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1504                                                 MacroBuilder &Builder) const {
1505   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1506   if (getTriple().isWindowsArm64EC()) {
1507     Builder.defineMacro("_M_X64", "100");
1508     Builder.defineMacro("_M_AMD64", "100");
1509     Builder.defineMacro("_M_ARM64EC", "1");
1510   } else {
1511     Builder.defineMacro("_M_ARM64", "1");
1512   }
1513 }
1514 
1515 TargetInfo::CallingConvKind
1516 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1517   return CCK_MicrosoftWin64;
1518 }
1519 
1520 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
1521   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
1522 
1523   // MSVC does size based alignment for arm64 based on alignment section in
1524   // below document, replicate that to keep alignment consistent with object
1525   // files compiled by MSVC.
1526   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1527   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
1528     Align = std::max(Align, 128u);    // align type at least 16 bytes
1529   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
1530     Align = std::max(Align, 64u);     // align type at least 8 butes
1531   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
1532     Align = std::max(Align, 32u);     // align type at least 4 bytes
1533   }
1534   return Align;
1535 }
1536 
1537 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1538                                            const TargetOptions &Opts)
1539     : WindowsARM64TargetInfo(Triple, Opts) {
1540   TheCXXABI.set(TargetCXXABI::GenericAArch64);
1541 }
1542 
1543 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1544                                                  const TargetOptions &Opts)
1545     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1546   Int64Type = SignedLongLong;
1547   if (getTriple().isArch32Bit())
1548     IntMaxType = SignedLongLong;
1549 
1550   WCharType = SignedInt;
1551   UseSignedCharForObjCBool = false;
1552 
1553   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1554   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1555 
1556   UseZeroLengthBitfieldAlignment = false;
1557 
1558   if (getTriple().isArch32Bit()) {
1559     UseBitFieldTypeAlignment = false;
1560     ZeroLengthBitfieldBoundary = 32;
1561     UseZeroLengthBitfieldAlignment = true;
1562     TheCXXABI.set(TargetCXXABI::WatchOS);
1563   } else
1564     TheCXXABI.set(TargetCXXABI::AppleARM64);
1565 }
1566 
1567 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1568                                            const llvm::Triple &Triple,
1569                                            MacroBuilder &Builder) const {
1570   Builder.defineMacro("__AARCH64_SIMD__");
1571   if (Triple.isArch32Bit())
1572     Builder.defineMacro("__ARM64_ARCH_8_32__");
1573   else
1574     Builder.defineMacro("__ARM64_ARCH_8__");
1575   Builder.defineMacro("__ARM_NEON__");
1576   Builder.defineMacro("__REGISTER_PREFIX__", "");
1577   Builder.defineMacro("__arm64", "1");
1578   Builder.defineMacro("__arm64__", "1");
1579 
1580   if (Triple.isArm64e())
1581     Builder.defineMacro("__arm64e__", "1");
1582 
1583   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1584 }
1585 
1586 TargetInfo::BuiltinVaListKind
1587 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1588   return TargetInfo::CharPtrBuiltinVaList;
1589 }
1590 
1591 // 64-bit RenderScript is aarch64
1592 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1593                                                    const TargetOptions &Opts)
1594     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1595                                        Triple.getOSName(),
1596                                        Triple.getEnvironmentName()),
1597                           Opts) {
1598   IsRenderScriptTarget = true;
1599 }
1600 
1601 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1602                                                 MacroBuilder &Builder) const {
1603   Builder.defineMacro("__RENDERSCRIPT__");
1604   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1605 }
1606