xref: /freebsd/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/TargetParser/AArch64TargetParser.h"
21 #include "llvm/TargetParser/ARMTargetParserCommon.h"
22 #include <optional>
23 
24 using namespace clang;
25 using namespace clang::targets;
26 
27 static constexpr Builtin::Info BuiltinInfo[] = {
28 #define BUILTIN(ID, TYPE, ATTRS)                                               \
29   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
30 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
31   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32 #include "clang/Basic/BuiltinsNEON.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
36 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
37   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38 #include "clang/Basic/BuiltinsSVE.def"
39 
40 #define BUILTIN(ID, TYPE, ATTRS)                                               \
41   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
42 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
43   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
44 #include "clang/Basic/BuiltinsSME.def"
45 
46 #define BUILTIN(ID, TYPE, ATTRS)                                               \
47   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
48 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
49   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
50 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
51   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
52 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
53   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
54 #include "clang/Basic/BuiltinsAArch64.def"
55 };
56 
57 void AArch64TargetInfo::setArchFeatures() {
58   if (*ArchInfo == llvm::AArch64::ARMV8R) {
59     HasDotProd = true;
60     HasDIT = true;
61     HasFlagM = true;
62     HasRCPC = true;
63     FPU |= NeonMode;
64     HasCCPP = true;
65     HasCRC = true;
66     HasLSE = true;
67     HasRDM = true;
68   } else if (ArchInfo->Version.getMajor() == 8) {
69     if (ArchInfo->Version.getMinor() >= 7u) {
70       HasWFxT = true;
71     }
72     if (ArchInfo->Version.getMinor() >= 6u) {
73       HasBFloat16 = true;
74       HasMatMul = true;
75     }
76     if (ArchInfo->Version.getMinor() >= 5u) {
77       HasAlternativeNZCV = true;
78       HasFRInt3264 = true;
79       HasSSBS = true;
80       HasSB = true;
81       HasPredRes = true;
82       HasBTI = true;
83     }
84     if (ArchInfo->Version.getMinor() >= 4u) {
85       HasDotProd = true;
86       HasDIT = true;
87       HasFlagM = true;
88     }
89     if (ArchInfo->Version.getMinor() >= 3u) {
90       HasRCPC = true;
91       FPU |= NeonMode;
92     }
93     if (ArchInfo->Version.getMinor() >= 2u) {
94       HasCCPP = true;
95     }
96     if (ArchInfo->Version.getMinor() >= 1u) {
97       HasCRC = true;
98       HasLSE = true;
99       HasRDM = true;
100     }
101   } else if (ArchInfo->Version.getMajor() == 9) {
102     if (ArchInfo->Version.getMinor() >= 2u) {
103       HasWFxT = true;
104     }
105     if (ArchInfo->Version.getMinor() >= 1u) {
106       HasBFloat16 = true;
107       HasMatMul = true;
108     }
109     FPU |= SveMode;
110     HasSVE2 = true;
111     HasFullFP16 = true;
112     HasAlternativeNZCV = true;
113     HasFRInt3264 = true;
114     HasSSBS = true;
115     HasSB = true;
116     HasPredRes = true;
117     HasBTI = true;
118     HasDotProd = true;
119     HasDIT = true;
120     HasFlagM = true;
121     HasRCPC = true;
122     FPU |= NeonMode;
123     HasCCPP = true;
124     HasCRC = true;
125     HasLSE = true;
126     HasRDM = true;
127   }
128 }
129 
130 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
131                                      const TargetOptions &Opts)
132     : TargetInfo(Triple), ABI("aapcs") {
133   if (getTriple().isOSOpenBSD()) {
134     Int64Type = SignedLongLong;
135     IntMaxType = SignedLongLong;
136   } else {
137     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
138       WCharType = UnsignedInt;
139 
140     Int64Type = SignedLong;
141     IntMaxType = SignedLong;
142   }
143 
144   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
145   HasLegalHalfType = true;
146   HalfArgsAndReturns = true;
147   HasFloat16 = true;
148   HasStrictFP = true;
149 
150   if (Triple.isArch64Bit())
151     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
152   else
153     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
154 
155   MaxVectorAlign = 128;
156   MaxAtomicInlineWidth = 128;
157   MaxAtomicPromoteWidth = 128;
158 
159   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
160   LongDoubleFormat = &llvm::APFloat::IEEEquad();
161 
162   BFloat16Width = BFloat16Align = 16;
163   BFloat16Format = &llvm::APFloat::BFloat();
164 
165   // Make __builtin_ms_va_list available.
166   HasBuiltinMSVaList = true;
167 
168   // Make the SVE types available.  Note that this deliberately doesn't
169   // depend on SveMode, since in principle it should be possible to turn
170   // SVE on and off within a translation unit.  It should also be possible
171   // to compile the global declaration:
172   //
173   // __SVInt8_t *ptr;
174   //
175   // even without SVE.
176   HasAArch64SVETypes = true;
177 
178   // {} in inline assembly are neon specifiers, not assembly variant
179   // specifiers.
180   NoAsmVariants = true;
181 
182   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
183   // contributes to the alignment of the containing aggregate in the same way
184   // a plain (non bit-field) member of that type would, without exception for
185   // zero-sized or anonymous bit-fields."
186   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
187   UseZeroLengthBitfieldAlignment = true;
188 
189   // AArch64 targets default to using the ARM C++ ABI.
190   TheCXXABI.set(TargetCXXABI::GenericAArch64);
191 
192   if (Triple.getOS() == llvm::Triple::Linux)
193     this->MCountName = "\01_mcount";
194   else if (Triple.getOS() == llvm::Triple::UnknownOS)
195     this->MCountName =
196         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
197 }
198 
199 StringRef AArch64TargetInfo::getABI() const { return ABI; }
200 
201 bool AArch64TargetInfo::setABI(const std::string &Name) {
202   if (Name != "aapcs" && Name != "darwinpcs")
203     return false;
204 
205   ABI = Name;
206   return true;
207 }
208 
209 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
210                                                  BranchProtectionInfo &BPI,
211                                                  StringRef &Err) const {
212   llvm::ARM::ParsedBranchProtection PBP;
213   if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
214     return false;
215 
216   BPI.SignReturnAddr =
217       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
218           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
219           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
220           .Default(LangOptions::SignReturnAddressScopeKind::None);
221 
222   if (PBP.Key == "a_key")
223     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
224   else
225     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
226 
227   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
228   return true;
229 }
230 
231 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
232   return Name == "generic" || llvm::AArch64::parseCpu(Name);
233 }
234 
235 bool AArch64TargetInfo::setCPU(const std::string &Name) {
236   return isValidCPUName(Name);
237 }
238 
239 void AArch64TargetInfo::fillValidCPUList(
240     SmallVectorImpl<StringRef> &Values) const {
241   llvm::AArch64::fillValidCPUArchList(Values);
242 }
243 
244 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
245                                                 MacroBuilder &Builder) const {
246   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
247 }
248 
249 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
250                                                 MacroBuilder &Builder) const {
251   // Also include the ARMv8.1 defines
252   getTargetDefinesARMV81A(Opts, Builder);
253 }
254 
255 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
256                                                 MacroBuilder &Builder) const {
257   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
258   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
259   Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
260   // Also include the Armv8.2 defines
261   getTargetDefinesARMV82A(Opts, Builder);
262 }
263 
264 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
265                                                 MacroBuilder &Builder) const {
266   // Also include the Armv8.3 defines
267   getTargetDefinesARMV83A(Opts, Builder);
268 }
269 
270 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
271                                                 MacroBuilder &Builder) const {
272   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
273   Builder.defineMacro("__ARM_FEATURE_BTI", "1");
274   // Also include the Armv8.4 defines
275   getTargetDefinesARMV84A(Opts, Builder);
276 }
277 
278 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
279                                                 MacroBuilder &Builder) const {
280   // Also include the Armv8.5 defines
281   // FIXME: Armv8.6 makes the following extensions mandatory:
282   // - __ARM_FEATURE_BF16
283   // - __ARM_FEATURE_MATMUL_INT8
284   // Handle them here.
285   getTargetDefinesARMV85A(Opts, Builder);
286 }
287 
288 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
289                                                 MacroBuilder &Builder) const {
290   // Also include the Armv8.6 defines
291   getTargetDefinesARMV86A(Opts, Builder);
292 }
293 
294 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
295                                                 MacroBuilder &Builder) const {
296   // Also include the Armv8.7 defines
297   getTargetDefinesARMV87A(Opts, Builder);
298 }
299 
300 void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
301                                                 MacroBuilder &Builder) const {
302   // Also include the Armv8.8 defines
303   getTargetDefinesARMV88A(Opts, Builder);
304 }
305 
306 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
307                                                MacroBuilder &Builder) const {
308   // Armv9-A maps to Armv8.5-A
309   getTargetDefinesARMV85A(Opts, Builder);
310 }
311 
312 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
313                                                 MacroBuilder &Builder) const {
314   // Armv9.1-A maps to Armv8.6-A
315   getTargetDefinesARMV86A(Opts, Builder);
316 }
317 
318 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
319                                                 MacroBuilder &Builder) const {
320   // Armv9.2-A maps to Armv8.7-A
321   getTargetDefinesARMV87A(Opts, Builder);
322 }
323 
324 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
325                                                 MacroBuilder &Builder) const {
326   // Armv9.3-A maps to Armv8.8-A
327   getTargetDefinesARMV88A(Opts, Builder);
328 }
329 
330 void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
331                                                 MacroBuilder &Builder) const {
332   // Armv9.4-A maps to Armv8.9-A
333   getTargetDefinesARMV89A(Opts, Builder);
334 }
335 
336 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
337                                          MacroBuilder &Builder) const {
338   // Target identification.
339   Builder.defineMacro("__aarch64__");
340   // Inline assembly supports AArch64 flag outputs.
341   Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
342 
343   std::string CodeModel = getTargetOpts().CodeModel;
344   if (CodeModel == "default")
345     CodeModel = "small";
346   for (char &c : CodeModel)
347     c = toupper(c);
348   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
349 
350   // ACLE predefines. Many can only have one possible value on v8 AArch64.
351   Builder.defineMacro("__ARM_ACLE", "200");
352   Builder.defineMacro("__ARM_ARCH",
353                       std::to_string(ArchInfo->Version.getMajor()));
354   Builder.defineMacro("__ARM_ARCH_PROFILE",
355                       std::string("'") + (char)ArchInfo->Profile + "'");
356 
357   Builder.defineMacro("__ARM_64BIT_STATE", "1");
358   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
359   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
360 
361   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
362   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
363   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
364   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
365   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
366   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
367   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
368 
369   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
370 
371   // 0xe implies support for half, single and double precision operations.
372   if (FPU & FPUMode)
373     Builder.defineMacro("__ARM_FP", "0xE");
374 
375   // PCS specifies this for SysV variants, which is all we support. Other ABIs
376   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
377   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
378   Builder.defineMacro("__ARM_FP16_ARGS", "1");
379 
380   if (Opts.UnsafeFPMath)
381     Builder.defineMacro("__ARM_FP_FAST", "1");
382 
383   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
384                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
385 
386   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
387 
388   if (FPU & NeonMode) {
389     Builder.defineMacro("__ARM_NEON", "1");
390     // 64-bit NEON supports half, single and double precision operations.
391     Builder.defineMacro("__ARM_NEON_FP", "0xE");
392   }
393 
394   if (FPU & SveMode)
395     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
396 
397   if ((FPU & NeonMode) && (FPU & SveMode))
398     Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
399 
400   if (HasSVE2)
401     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
402 
403   if (HasSVE2 && HasSVE2AES)
404     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
405 
406   if (HasSVE2 && HasSVE2BitPerm)
407     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
408 
409   if (HasSVE2 && HasSVE2SHA3)
410     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
411 
412   if (HasSVE2 && HasSVE2SM4)
413     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
414 
415   if (HasCRC)
416     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
417 
418   if (HasRCPC3)
419     Builder.defineMacro("__ARM_FEATURE_RCPC", "3");
420   else if (HasRCPC)
421     Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
422 
423   if (HasFMV)
424     Builder.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
425 
426   // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
427   // macros for AES, SHA2, SHA3 and SM4
428   if (HasAES && HasSHA2)
429     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
430 
431   if (HasAES)
432     Builder.defineMacro("__ARM_FEATURE_AES", "1");
433 
434   if (HasSHA2)
435     Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
436 
437   if (HasSHA3) {
438     Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
439     Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
440   }
441 
442   if (HasSM4) {
443     Builder.defineMacro("__ARM_FEATURE_SM3", "1");
444     Builder.defineMacro("__ARM_FEATURE_SM4", "1");
445   }
446 
447   if (HasPAuth)
448     Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
449 
450   if (HasUnaligned)
451     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
452 
453   if ((FPU & NeonMode) && HasFullFP16)
454     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
455   if (HasFullFP16)
456    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
457 
458   if (HasDotProd)
459     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
460 
461   if (HasMTE)
462     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
463 
464   if (HasTME)
465     Builder.defineMacro("__ARM_FEATURE_TME", "1");
466 
467   if (HasMatMul)
468     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
469 
470   if (HasLSE)
471     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
472 
473   if (HasBFloat16) {
474     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
475     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
476     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
477     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
478   }
479 
480   if ((FPU & SveMode) && HasBFloat16) {
481     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
482   }
483 
484   if ((FPU & SveMode) && HasMatmulFP64)
485     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
486 
487   if ((FPU & SveMode) && HasMatmulFP32)
488     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
489 
490   if ((FPU & SveMode) && HasMatMul)
491     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
492 
493   if ((FPU & NeonMode) && HasFP16FML)
494     Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
495 
496   if (Opts.hasSignReturnAddress()) {
497     // Bitmask:
498     // 0: Protection using the A key
499     // 1: Protection using the B key
500     // 2: Protection including leaf functions
501     unsigned Value = 0;
502 
503     if (Opts.isSignReturnAddressWithAKey())
504       Value |= (1 << 0);
505     else
506       Value |= (1 << 1);
507 
508     if (Opts.isSignReturnAddressScopeAll())
509       Value |= (1 << 2);
510 
511     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
512   }
513 
514   if (Opts.BranchTargetEnforcement)
515     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
516 
517   if (HasLS64)
518     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
519 
520   if (HasRandGen)
521     Builder.defineMacro("__ARM_FEATURE_RNG", "1");
522 
523   if (HasMOPS)
524     Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
525 
526   if (HasD128)
527     Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
528 
529   if (*ArchInfo == llvm::AArch64::ARMV8_1A)
530     getTargetDefinesARMV81A(Opts, Builder);
531   else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
532     getTargetDefinesARMV82A(Opts, Builder);
533   else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
534     getTargetDefinesARMV83A(Opts, Builder);
535   else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
536     getTargetDefinesARMV84A(Opts, Builder);
537   else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
538     getTargetDefinesARMV85A(Opts, Builder);
539   else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
540     getTargetDefinesARMV86A(Opts, Builder);
541   else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
542     getTargetDefinesARMV87A(Opts, Builder);
543   else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
544     getTargetDefinesARMV88A(Opts, Builder);
545   else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
546     getTargetDefinesARMV89A(Opts, Builder);
547   else if (*ArchInfo == llvm::AArch64::ARMV9A)
548     getTargetDefinesARMV9A(Opts, Builder);
549   else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
550     getTargetDefinesARMV91A(Opts, Builder);
551   else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
552     getTargetDefinesARMV92A(Opts, Builder);
553   else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
554     getTargetDefinesARMV93A(Opts, Builder);
555   else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
556     getTargetDefinesARMV94A(Opts, Builder);
557 
558   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
559   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
560   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
561   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
562   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
563 
564   // Allow detection of fast FMA support.
565   Builder.defineMacro("__FP_FAST_FMA", "1");
566   Builder.defineMacro("__FP_FAST_FMAF", "1");
567 
568   // C/C++ operators work on both VLS and VLA SVE types
569   if (FPU & SveMode)
570     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
571 
572   if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
573     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
574   }
575 }
576 
577 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
578   return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
579                                          Builtin::FirstTSBuiltin);
580 }
581 
582 std::optional<std::pair<unsigned, unsigned>>
583 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
584   if (LangOpts.VScaleMin || LangOpts.VScaleMax)
585     return std::pair<unsigned, unsigned>(
586         LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
587 
588   if (hasFeature("sve"))
589     return std::pair<unsigned, unsigned>(1, 16);
590 
591   return std::nullopt;
592 }
593 
594 unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
595   if (Name == "default")
596     return 0;
597   for (const auto &E : llvm::AArch64::Extensions)
598     if (Name == E.Name)
599       return E.FmvPriority;
600   return 0;
601 }
602 
603 unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
604   // Take the maximum priority as per feature cost, so more features win.
605   return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
606 }
607 
608 bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
609   auto F = llvm::find_if(llvm::AArch64::Extensions, [&](const auto &E) {
610     return Name == E.Name && !E.DependentFeatures.empty();
611   });
612   return F != std::end(llvm::AArch64::Extensions);
613 }
614 
615 StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
616   auto F = llvm::find_if(llvm::AArch64::Extensions,
617                          [&](const auto &E) { return Name == E.Name; });
618   return F != std::end(llvm::AArch64::Extensions) ? F->DependentFeatures
619                                                   : StringRef();
620 }
621 
622 bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
623   for (const auto &E : llvm::AArch64::Extensions)
624     if (FeatureStr == E.Name)
625       return true;
626   return false;
627 }
628 
629 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
630   return llvm::StringSwitch<bool>(Feature)
631       .Cases("aarch64", "arm64", "arm", true)
632       .Case("fmv", HasFMV)
633       .Cases("neon", "fp", "simd", FPU & NeonMode)
634       .Case("jscvt", HasJSCVT)
635       .Case("fcma", HasFCMA)
636       .Case("rng", HasRandGen)
637       .Case("flagm", HasFlagM)
638       .Case("flagm2", HasAlternativeNZCV)
639       .Case("fp16fml", HasFP16FML)
640       .Case("dotprod", HasDotProd)
641       .Case("sm4", HasSM4)
642       .Case("rdm", HasRDM)
643       .Case("lse", HasLSE)
644       .Case("crc", HasCRC)
645       .Case("sha2", HasSHA2)
646       .Case("sha3", HasSHA3)
647       .Cases("aes", "pmull", HasAES)
648       .Cases("fp16", "fullfp16", HasFullFP16)
649       .Case("dit", HasDIT)
650       .Case("dpb", HasCCPP)
651       .Case("dpb2", HasCCDP)
652       .Case("rcpc", HasRCPC)
653       .Case("frintts", HasFRInt3264)
654       .Case("i8mm", HasMatMul)
655       .Case("bf16", HasBFloat16)
656       .Case("sve", FPU & SveMode)
657       .Case("sve-bf16", FPU & SveMode && HasBFloat16)
658       .Case("sve-i8mm", FPU & SveMode && HasMatMul)
659       .Case("f32mm", FPU & SveMode && HasMatmulFP32)
660       .Case("f64mm", FPU & SveMode && HasMatmulFP64)
661       .Case("sve2", FPU & SveMode && HasSVE2)
662       .Case("sve2-pmull128", FPU & SveMode && HasSVE2AES)
663       .Case("sve2-bitperm", FPU & SveMode && HasSVE2BitPerm)
664       .Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
665       .Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
666       .Case("sme", HasSME)
667       .Case("sme-f64f64", HasSMEF64F64)
668       .Case("sme-i16i64", HasSMEI16I64)
669       .Cases("memtag", "memtag2", HasMTE)
670       .Case("sb", HasSB)
671       .Case("predres", HasPredRes)
672       .Cases("ssbs", "ssbs2", HasSSBS)
673       .Case("bti", HasBTI)
674       .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
675       .Case("wfxt", HasWFxT)
676       .Case("rcpc3", HasRCPC3)
677       .Default(false);
678 }
679 
680 void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
681                                           StringRef Name, bool Enabled) const {
682   Features[Name] = Enabled;
683   // If the feature is an architecture feature (like v8.2a), add all previous
684   // architecture versions and any dependant target features.
685   const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
686       llvm::AArch64::ArchInfo::findBySubArch(Name);
687 
688   if (!ArchInfo)
689     return; // Not an architecture, nothing more to do.
690 
691   // Disabling an architecture feature does not affect dependent features
692   if (!Enabled)
693     return;
694 
695   for (const auto *OtherArch : llvm::AArch64::ArchInfos)
696     if (ArchInfo->implies(*OtherArch))
697       Features[OtherArch->getSubArch()] = true;
698 
699   // Set any features implied by the architecture
700   std::vector<StringRef> CPUFeats;
701   if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
702     for (auto F : CPUFeats) {
703       assert(F[0] == '+' && "Expected + in target feature!");
704       Features[F.drop_front(1)] = true;
705     }
706   }
707 }
708 
709 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
710                                              DiagnosticsEngine &Diags) {
711   for (const auto &Feature : Features) {
712     if (Feature == "-fp-armv8")
713       HasNoFP = true;
714     if (Feature == "-neon")
715       HasNoNeon = true;
716     if (Feature == "-sve")
717       HasNoSVE = true;
718 
719     if (Feature == "+neon" || Feature == "+fp-armv8")
720       FPU |= NeonMode;
721     if (Feature == "+jscvt") {
722       HasJSCVT = true;
723       FPU |= NeonMode;
724     }
725     if (Feature == "+fcma") {
726       HasFCMA = true;
727       FPU |= NeonMode;
728     }
729 
730     if (Feature == "+sve") {
731       FPU |= NeonMode;
732       FPU |= SveMode;
733       HasFullFP16 = true;
734     }
735     if (Feature == "+sve2") {
736       FPU |= NeonMode;
737       FPU |= SveMode;
738       HasFullFP16 = true;
739       HasSVE2 = true;
740     }
741     if (Feature == "+sve2-aes") {
742       FPU |= NeonMode;
743       FPU |= SveMode;
744       HasFullFP16 = true;
745       HasSVE2 = true;
746       HasSVE2AES = true;
747     }
748     if (Feature == "+sve2-sha3") {
749       FPU |= NeonMode;
750       FPU |= SveMode;
751       HasFullFP16 = true;
752       HasSVE2 = true;
753       HasSVE2SHA3 = true;
754     }
755     if (Feature == "+sve2-sm4") {
756       FPU |= NeonMode;
757       FPU |= SveMode;
758       HasFullFP16 = true;
759       HasSVE2 = true;
760       HasSVE2SM4 = true;
761     }
762     if (Feature == "+sve2-bitperm") {
763       FPU |= NeonMode;
764       FPU |= SveMode;
765       HasFullFP16 = true;
766       HasSVE2 = true;
767       HasSVE2BitPerm = true;
768     }
769     if (Feature == "+f32mm") {
770       FPU |= NeonMode;
771       FPU |= SveMode;
772       HasFullFP16 = true;
773       HasMatmulFP32 = true;
774     }
775     if (Feature == "+f64mm") {
776       FPU |= NeonMode;
777       FPU |= SveMode;
778       HasFullFP16 = true;
779       HasMatmulFP64 = true;
780     }
781     if (Feature == "+sme") {
782       HasSME = true;
783       HasBFloat16 = true;
784       HasFullFP16 = true;
785     }
786     if (Feature == "+sme-f64f64") {
787       HasSME = true;
788       HasSMEF64F64 = true;
789       HasBFloat16 = true;
790       HasFullFP16 = true;
791     }
792     if (Feature == "+sme-i16i64") {
793       HasSME = true;
794       HasSMEI16I64 = true;
795       HasBFloat16 = true;
796       HasFullFP16 = true;
797     }
798     if (Feature == "+sb")
799       HasSB = true;
800     if (Feature == "+predres")
801       HasPredRes = true;
802     if (Feature == "+ssbs")
803       HasSSBS = true;
804     if (Feature == "+bti")
805       HasBTI = true;
806     if (Feature == "+wfxt")
807       HasWFxT = true;
808     if (Feature == "-fmv")
809       HasFMV = false;
810     if (Feature == "+crc")
811       HasCRC = true;
812     if (Feature == "+rcpc")
813       HasRCPC = true;
814     if (Feature == "+aes") {
815       FPU |= NeonMode;
816       HasAES = true;
817     }
818     if (Feature == "+sha2") {
819       FPU |= NeonMode;
820       HasSHA2 = true;
821     }
822     if (Feature == "+sha3") {
823       FPU |= NeonMode;
824       HasSHA2 = true;
825       HasSHA3 = true;
826     }
827     if (Feature == "+rdm") {
828       FPU |= NeonMode;
829       HasRDM = true;
830     }
831     if (Feature == "+dit")
832       HasDIT = true;
833     if (Feature == "+cccp")
834       HasCCPP = true;
835     if (Feature == "+ccdp") {
836       HasCCPP = true;
837       HasCCDP = true;
838     }
839     if (Feature == "+fptoint")
840       HasFRInt3264 = true;
841     if (Feature == "+sm4") {
842       FPU |= NeonMode;
843       HasSM4 = true;
844     }
845     if (Feature == "+strict-align")
846       HasUnaligned = false;
847     // All predecessor archs are added but select the latest one for ArchKind.
848     if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
849       ArchInfo = &llvm::AArch64::ARMV8A;
850     if (Feature == "+v8.1a" &&
851         ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
852       ArchInfo = &llvm::AArch64::ARMV8_1A;
853     if (Feature == "+v8.2a" &&
854         ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
855       ArchInfo = &llvm::AArch64::ARMV8_2A;
856     if (Feature == "+v8.3a" &&
857         ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
858       ArchInfo = &llvm::AArch64::ARMV8_3A;
859     if (Feature == "+v8.4a" &&
860         ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
861       ArchInfo = &llvm::AArch64::ARMV8_4A;
862     if (Feature == "+v8.5a" &&
863         ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
864       ArchInfo = &llvm::AArch64::ARMV8_5A;
865     if (Feature == "+v8.6a" &&
866         ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
867       ArchInfo = &llvm::AArch64::ARMV8_6A;
868     if (Feature == "+v8.7a" &&
869         ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
870       ArchInfo = &llvm::AArch64::ARMV8_7A;
871     if (Feature == "+v8.8a" &&
872         ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
873       ArchInfo = &llvm::AArch64::ARMV8_8A;
874     if (Feature == "+v8.9a" &&
875         ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
876       ArchInfo = &llvm::AArch64::ARMV8_9A;
877     if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
878       ArchInfo = &llvm::AArch64::ARMV9A;
879     if (Feature == "+v9.1a" &&
880         ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
881       ArchInfo = &llvm::AArch64::ARMV9_1A;
882     if (Feature == "+v9.2a" &&
883         ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
884       ArchInfo = &llvm::AArch64::ARMV9_2A;
885     if (Feature == "+v9.3a" &&
886         ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
887       ArchInfo = &llvm::AArch64::ARMV9_3A;
888     if (Feature == "+v9.4a" &&
889         ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
890       ArchInfo = &llvm::AArch64::ARMV9_4A;
891     if (Feature == "+v8r")
892       ArchInfo = &llvm::AArch64::ARMV8R;
893     if (Feature == "+fullfp16") {
894       FPU |= NeonMode;
895       HasFullFP16 = true;
896     }
897     if (Feature == "+dotprod") {
898       FPU |= NeonMode;
899       HasDotProd = true;
900     }
901     if (Feature == "+fp16fml") {
902       FPU |= NeonMode;
903       HasFullFP16 = true;
904       HasFP16FML = true;
905     }
906     if (Feature == "+mte")
907       HasMTE = true;
908     if (Feature == "+tme")
909       HasTME = true;
910     if (Feature == "+pauth")
911       HasPAuth = true;
912     if (Feature == "+i8mm")
913       HasMatMul = true;
914     if (Feature == "+bf16")
915       HasBFloat16 = true;
916     if (Feature == "+lse")
917       HasLSE = true;
918     if (Feature == "+ls64")
919       HasLS64 = true;
920     if (Feature == "+rand")
921       HasRandGen = true;
922     if (Feature == "+flagm")
923       HasFlagM = true;
924     if (Feature == "+altnzcv") {
925       HasFlagM = true;
926       HasAlternativeNZCV = true;
927     }
928     if (Feature == "+mops")
929       HasMOPS = true;
930     if (Feature == "+d128")
931       HasD128 = true;
932     if (Feature == "+gcs")
933       HasGCS = true;
934     if (Feature == "+rcpc3")
935       HasRCPC3 = true;
936   }
937 
938   // Check features that are manually disabled by command line options.
939   // This needs to be checked after architecture-related features are handled,
940   // making sure they are properly disabled when required.
941   for (const auto &Feature : Features) {
942     if (Feature == "-d128")
943       HasD128 = false;
944   }
945 
946   setDataLayout();
947   setArchFeatures();
948 
949   if (HasNoFP) {
950     FPU &= ~FPUMode;
951     FPU &= ~NeonMode;
952     FPU &= ~SveMode;
953   }
954   if (HasNoNeon) {
955     FPU &= ~NeonMode;
956     FPU &= ~SveMode;
957   }
958   if (HasNoSVE)
959     FPU &= ~SveMode;
960 
961   return true;
962 }
963 
964 bool AArch64TargetInfo::initFeatureMap(
965     llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
966     const std::vector<std::string> &FeaturesVec) const {
967   std::vector<std::string> UpdatedFeaturesVec;
968   // Parse the CPU and add any implied features.
969   std::optional<llvm::AArch64::CpuInfo> CpuInfo = llvm::AArch64::parseCpu(CPU);
970   if (CpuInfo) {
971     uint64_t Exts = CpuInfo->getImpliedExtensions();
972     std::vector<StringRef> CPUFeats;
973     llvm::AArch64::getExtensionFeatures(Exts, CPUFeats);
974     for (auto F : CPUFeats) {
975       assert((F[0] == '+' || F[0] == '-') && "Expected +/- in target feature!");
976       UpdatedFeaturesVec.push_back(F.str());
977     }
978   }
979 
980   // Process target and dependent features. This is done in two loops collecting
981   // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
982   // add target '+/-'features that can later disable some of features added on
983   // the first loop. Function Multi Versioning features begin with '?'.
984   for (const auto &Feature : FeaturesVec)
985     if (((Feature[0] == '?' || Feature[0] == '+')) &&
986         AArch64TargetInfo::doesFeatureAffectCodeGen(Feature.substr(1))) {
987       StringRef DepFeatures =
988           AArch64TargetInfo::getFeatureDependencies(Feature.substr(1));
989       SmallVector<StringRef, 1> AttrFeatures;
990       DepFeatures.split(AttrFeatures, ",");
991       for (auto F : AttrFeatures)
992         UpdatedFeaturesVec.push_back(F.str());
993     }
994   for (const auto &Feature : FeaturesVec)
995     if (Feature[0] != '?') {
996       std::string UpdatedFeature = Feature;
997       if (Feature[0] == '+') {
998         std::optional<llvm::AArch64::ExtensionInfo> Extension =
999           llvm::AArch64::parseArchExtension(Feature.substr(1));
1000         if (Extension)
1001           UpdatedFeature = Extension->Feature.str();
1002       }
1003       UpdatedFeaturesVec.push_back(UpdatedFeature);
1004     }
1005 
1006   return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
1007 }
1008 
1009 // Parse AArch64 Target attributes, which are a comma separated list of:
1010 //  "arch=<arch>" - parsed to features as per -march=..
1011 //  "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1012 //  "tune=<cpu>" - TuneCPU set to <cpu>
1013 //  "feature", "no-feature" - Add (or remove) feature.
1014 //  "+feature", "+nofeature" - Add (or remove) feature.
1015 ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1016   ParsedTargetAttr Ret;
1017   if (Features == "default")
1018     return Ret;
1019   SmallVector<StringRef, 1> AttrFeatures;
1020   Features.split(AttrFeatures, ",");
1021   bool FoundArch = false;
1022 
1023   auto SplitAndAddFeatures = [](StringRef FeatString,
1024                                 std::vector<std::string> &Features) {
1025     SmallVector<StringRef, 8> SplitFeatures;
1026     FeatString.split(SplitFeatures, StringRef("+"), -1, false);
1027     for (StringRef Feature : SplitFeatures) {
1028       StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1029       if (!FeatureName.empty())
1030         Features.push_back(FeatureName.str());
1031       else
1032         // Pushing the original feature string to give a sema error later on
1033         // when they get checked.
1034         if (Feature.startswith("no"))
1035           Features.push_back("-" + Feature.drop_front(2).str());
1036         else
1037           Features.push_back("+" + Feature.str());
1038     }
1039   };
1040 
1041   for (auto &Feature : AttrFeatures) {
1042     Feature = Feature.trim();
1043     if (Feature.startswith("fpmath="))
1044       continue;
1045 
1046     if (Feature.startswith("branch-protection=")) {
1047       Ret.BranchProtection = Feature.split('=').second.trim();
1048       continue;
1049     }
1050 
1051     if (Feature.startswith("arch=")) {
1052       if (FoundArch)
1053         Ret.Duplicate = "arch=";
1054       FoundArch = true;
1055       std::pair<StringRef, StringRef> Split =
1056           Feature.split("=").second.trim().split("+");
1057       const std::optional<llvm::AArch64::ArchInfo> AI =
1058           llvm::AArch64::parseArch(Split.first);
1059 
1060       // Parse the architecture version, adding the required features to
1061       // Ret.Features.
1062       if (!AI)
1063         continue;
1064       Ret.Features.push_back(AI->ArchFeature.str());
1065       // Add any extra features, after the +
1066       SplitAndAddFeatures(Split.second, Ret.Features);
1067     } else if (Feature.startswith("cpu=")) {
1068       if (!Ret.CPU.empty())
1069         Ret.Duplicate = "cpu=";
1070       else {
1071         // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1072         // "+feat" features.
1073         std::pair<StringRef, StringRef> Split =
1074             Feature.split("=").second.trim().split("+");
1075         Ret.CPU = Split.first;
1076         SplitAndAddFeatures(Split.second, Ret.Features);
1077       }
1078     } else if (Feature.startswith("tune=")) {
1079       if (!Ret.Tune.empty())
1080         Ret.Duplicate = "tune=";
1081       else
1082         Ret.Tune = Feature.split("=").second.trim();
1083     } else if (Feature.startswith("+")) {
1084       SplitAndAddFeatures(Feature, Ret.Features);
1085     } else if (Feature.startswith("no-")) {
1086       StringRef FeatureName =
1087           llvm::AArch64::getArchExtFeature(Feature.split("-").second);
1088       if (!FeatureName.empty())
1089         Ret.Features.push_back("-" + FeatureName.drop_front(1).str());
1090       else
1091         Ret.Features.push_back("-" + Feature.split("-").second.str());
1092     } else {
1093       // Try parsing the string to the internal target feature name. If it is
1094       // invalid, add the original string (which could already be an internal
1095       // name). These should be checked later by isValidFeatureName.
1096       StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1097       if (!FeatureName.empty())
1098         Ret.Features.push_back(FeatureName.str());
1099       else
1100         Ret.Features.push_back("+" + Feature.str());
1101     }
1102   }
1103   return Ret;
1104 }
1105 
1106 bool AArch64TargetInfo::hasBFloat16Type() const {
1107   return true;
1108 }
1109 
1110 TargetInfo::CallingConvCheckResult
1111 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1112   switch (CC) {
1113   case CC_C:
1114   case CC_Swift:
1115   case CC_SwiftAsync:
1116   case CC_PreserveMost:
1117   case CC_PreserveAll:
1118   case CC_OpenCLKernel:
1119   case CC_AArch64VectorCall:
1120   case CC_AArch64SVEPCS:
1121   case CC_Win64:
1122     return CCCR_OK;
1123   default:
1124     return CCCR_Warning;
1125   }
1126 }
1127 
1128 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1129 
1130 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1131   return TargetInfo::AArch64ABIBuiltinVaList;
1132 }
1133 
1134 const char *const AArch64TargetInfo::GCCRegNames[] = {
1135     // 32-bit Integer registers
1136     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1137     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1138     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1139 
1140     // 64-bit Integer registers
1141     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1142     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1143     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1144 
1145     // 32-bit floating point regsisters
1146     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1147     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1148     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1149 
1150     // 64-bit floating point regsisters
1151     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1152     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1153     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1154 
1155     // Neon vector registers
1156     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1157     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1158     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1159 
1160     // SVE vector registers
1161     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
1162     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1163     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1164 
1165     // SVE predicate registers
1166     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
1167     "p11", "p12", "p13", "p14", "p15",
1168 
1169     // SVE predicate-as-counter registers
1170     "pn0",  "pn1",  "pn2",  "pn3",  "pn4",  "pn5",  "pn6",  "pn7",  "pn8",
1171     "pn9",  "pn10", "pn11", "pn12", "pn13", "pn14", "pn15"
1172 };
1173 
1174 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1175   return llvm::ArrayRef(GCCRegNames);
1176 }
1177 
1178 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1179     {{"w31"}, "wsp"},
1180     {{"x31"}, "sp"},
1181     // GCC rN registers are aliases of xN registers.
1182     {{"r0"}, "x0"},
1183     {{"r1"}, "x1"},
1184     {{"r2"}, "x2"},
1185     {{"r3"}, "x3"},
1186     {{"r4"}, "x4"},
1187     {{"r5"}, "x5"},
1188     {{"r6"}, "x6"},
1189     {{"r7"}, "x7"},
1190     {{"r8"}, "x8"},
1191     {{"r9"}, "x9"},
1192     {{"r10"}, "x10"},
1193     {{"r11"}, "x11"},
1194     {{"r12"}, "x12"},
1195     {{"r13"}, "x13"},
1196     {{"r14"}, "x14"},
1197     {{"r15"}, "x15"},
1198     {{"r16"}, "x16"},
1199     {{"r17"}, "x17"},
1200     {{"r18"}, "x18"},
1201     {{"r19"}, "x19"},
1202     {{"r20"}, "x20"},
1203     {{"r21"}, "x21"},
1204     {{"r22"}, "x22"},
1205     {{"r23"}, "x23"},
1206     {{"r24"}, "x24"},
1207     {{"r25"}, "x25"},
1208     {{"r26"}, "x26"},
1209     {{"r27"}, "x27"},
1210     {{"r28"}, "x28"},
1211     {{"r29", "x29"}, "fp"},
1212     {{"r30", "x30"}, "lr"},
1213     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1214     // don't want to substitute one of these for a different-sized one.
1215 };
1216 
1217 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1218   return llvm::ArrayRef(GCCRegAliases);
1219 }
1220 
1221 // Returns the length of cc constraint.
1222 static unsigned matchAsmCCConstraint(const char *Name) {
1223   constexpr unsigned len = 5;
1224   auto RV = llvm::StringSwitch<unsigned>(Name)
1225                 .Case("@cceq", len)
1226                 .Case("@ccne", len)
1227                 .Case("@cchs", len)
1228                 .Case("@cccs", len)
1229                 .Case("@cccc", len)
1230                 .Case("@cclo", len)
1231                 .Case("@ccmi", len)
1232                 .Case("@ccpl", len)
1233                 .Case("@ccvs", len)
1234                 .Case("@ccvc", len)
1235                 .Case("@cchi", len)
1236                 .Case("@ccls", len)
1237                 .Case("@ccge", len)
1238                 .Case("@cclt", len)
1239                 .Case("@ccgt", len)
1240                 .Case("@ccle", len)
1241                 .Default(0);
1242   return RV;
1243 }
1244 
1245 std::string
1246 AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1247   std::string R;
1248   switch (*Constraint) {
1249   case 'U': // Three-character constraint; add "@3" hint for later parsing.
1250     R = std::string("@3") + std::string(Constraint, 3);
1251     Constraint += 2;
1252     break;
1253   case '@':
1254     if (const unsigned Len = matchAsmCCConstraint(Constraint)) {
1255       std::string Converted = "{" + std::string(Constraint, Len) + "}";
1256       Constraint += Len - 1;
1257       return Converted;
1258     }
1259     return std::string(1, *Constraint);
1260   default:
1261     R = TargetInfo::convertConstraint(Constraint);
1262     break;
1263   }
1264   return R;
1265 }
1266 
1267 bool AArch64TargetInfo::validateAsmConstraint(
1268     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1269   switch (*Name) {
1270   default:
1271     return false;
1272   case 'w': // Floating point and SIMD registers (V0-V31)
1273     Info.setAllowsRegister();
1274     return true;
1275   case 'I': // Constant that can be used with an ADD instruction
1276   case 'J': // Constant that can be used with a SUB instruction
1277   case 'K': // Constant that can be used with a 32-bit logical instruction
1278   case 'L': // Constant that can be used with a 64-bit logical instruction
1279   case 'M': // Constant that can be used as a 32-bit MOV immediate
1280   case 'N': // Constant that can be used as a 64-bit MOV immediate
1281   case 'Y': // Floating point constant zero
1282   case 'Z': // Integer constant zero
1283     return true;
1284   case 'Q': // A memory reference with base register and no offset
1285     Info.setAllowsMemory();
1286     return true;
1287   case 'S': // A symbolic address
1288     Info.setAllowsRegister();
1289     return true;
1290   case 'U':
1291     if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
1292       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
1293       Info.setAllowsRegister();
1294       Name += 2;
1295       return true;
1296     }
1297     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1298     // Utf: A memory address suitable for ldp/stp in TF mode.
1299     // Usa: An absolute symbolic address.
1300     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1301 
1302     // Better to return an error saying that it's an unrecognised constraint
1303     // even if this is a valid constraint in gcc.
1304     return false;
1305   case 'z': // Zero register, wzr or xzr
1306     Info.setAllowsRegister();
1307     return true;
1308   case 'x': // Floating point and SIMD registers (V0-V15)
1309     Info.setAllowsRegister();
1310     return true;
1311   case 'y': // SVE registers (V0-V7)
1312     Info.setAllowsRegister();
1313     return true;
1314   case '@':
1315     // CC condition
1316     if (const unsigned Len = matchAsmCCConstraint(Name)) {
1317       Name += Len - 1;
1318       Info.setAllowsRegister();
1319       return true;
1320     }
1321   }
1322   return false;
1323 }
1324 
1325 bool AArch64TargetInfo::validateConstraintModifier(
1326     StringRef Constraint, char Modifier, unsigned Size,
1327     std::string &SuggestedModifier) const {
1328   // Strip off constraint modifiers.
1329   while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
1330     Constraint = Constraint.substr(1);
1331 
1332   switch (Constraint[0]) {
1333   default:
1334     return true;
1335   case 'z':
1336   case 'r': {
1337     switch (Modifier) {
1338     case 'x':
1339     case 'w':
1340       // For now assume that the person knows what they're
1341       // doing with the modifier.
1342       return true;
1343     default:
1344       // By default an 'r' constraint will be in the 'x'
1345       // registers.
1346       if (Size == 64)
1347         return true;
1348 
1349       if (Size == 512)
1350         return HasLS64;
1351 
1352       SuggestedModifier = "w";
1353       return false;
1354     }
1355   }
1356   }
1357 }
1358 
1359 std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1360 
1361 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1362   if (RegNo == 0)
1363     return 0;
1364   if (RegNo == 1)
1365     return 1;
1366   return -1;
1367 }
1368 
1369 bool AArch64TargetInfo::hasInt128Type() const { return true; }
1370 
1371 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1372                                          const TargetOptions &Opts)
1373     : AArch64TargetInfo(Triple, Opts) {}
1374 
1375 void AArch64leTargetInfo::setDataLayout() {
1376   if (getTriple().isOSBinFormatMachO()) {
1377     if(getTriple().isArch32Bit())
1378       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
1379     else
1380       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
1381   } else
1382     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1383 }
1384 
1385 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1386                                            MacroBuilder &Builder) const {
1387   Builder.defineMacro("__AARCH64EL__");
1388   AArch64TargetInfo::getTargetDefines(Opts, Builder);
1389 }
1390 
1391 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1392                                          const TargetOptions &Opts)
1393     : AArch64TargetInfo(Triple, Opts) {}
1394 
1395 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1396                                            MacroBuilder &Builder) const {
1397   Builder.defineMacro("__AARCH64EB__");
1398   Builder.defineMacro("__AARCH_BIG_ENDIAN");
1399   Builder.defineMacro("__ARM_BIG_ENDIAN");
1400   AArch64TargetInfo::getTargetDefines(Opts, Builder);
1401 }
1402 
1403 void AArch64beTargetInfo::setDataLayout() {
1404   assert(!getTriple().isOSBinFormatMachO());
1405   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1406 }
1407 
1408 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1409                                                const TargetOptions &Opts)
1410     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1411 
1412   // This is an LLP64 platform.
1413   // int:4, long:4, long long:8, long double:8.
1414   IntWidth = IntAlign = 32;
1415   LongWidth = LongAlign = 32;
1416   DoubleAlign = LongLongAlign = 64;
1417   LongDoubleWidth = LongDoubleAlign = 64;
1418   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1419   IntMaxType = SignedLongLong;
1420   Int64Type = SignedLongLong;
1421   SizeType = UnsignedLongLong;
1422   PtrDiffType = SignedLongLong;
1423   IntPtrType = SignedLongLong;
1424 }
1425 
1426 void WindowsARM64TargetInfo::setDataLayout() {
1427   resetDataLayout(Triple.isOSBinFormatMachO()
1428                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
1429                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
1430                   Triple.isOSBinFormatMachO() ? "_" : "");
1431 }
1432 
1433 TargetInfo::BuiltinVaListKind
1434 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1435   return TargetInfo::CharPtrBuiltinVaList;
1436 }
1437 
1438 TargetInfo::CallingConvCheckResult
1439 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1440   switch (CC) {
1441   case CC_X86StdCall:
1442   case CC_X86ThisCall:
1443   case CC_X86FastCall:
1444   case CC_X86VectorCall:
1445     return CCCR_Ignore;
1446   case CC_C:
1447   case CC_OpenCLKernel:
1448   case CC_PreserveMost:
1449   case CC_PreserveAll:
1450   case CC_Swift:
1451   case CC_SwiftAsync:
1452   case CC_Win64:
1453     return CCCR_OK;
1454   default:
1455     return CCCR_Warning;
1456   }
1457 }
1458 
1459 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1460                                                    const TargetOptions &Opts)
1461     : WindowsARM64TargetInfo(Triple, Opts) {
1462   TheCXXABI.set(TargetCXXABI::Microsoft);
1463 }
1464 
1465 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1466                                                 MacroBuilder &Builder) const {
1467   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1468   Builder.defineMacro("_M_ARM64", "1");
1469 }
1470 
1471 TargetInfo::CallingConvKind
1472 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1473   return CCK_MicrosoftWin64;
1474 }
1475 
1476 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
1477   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
1478 
1479   // MSVC does size based alignment for arm64 based on alignment section in
1480   // below document, replicate that to keep alignment consistent with object
1481   // files compiled by MSVC.
1482   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1483   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
1484     Align = std::max(Align, 128u);    // align type at least 16 bytes
1485   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
1486     Align = std::max(Align, 64u);     // align type at least 8 butes
1487   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
1488     Align = std::max(Align, 32u);     // align type at least 4 bytes
1489   }
1490   return Align;
1491 }
1492 
1493 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1494                                            const TargetOptions &Opts)
1495     : WindowsARM64TargetInfo(Triple, Opts) {
1496   TheCXXABI.set(TargetCXXABI::GenericAArch64);
1497 }
1498 
1499 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1500                                                  const TargetOptions &Opts)
1501     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1502   Int64Type = SignedLongLong;
1503   if (getTriple().isArch32Bit())
1504     IntMaxType = SignedLongLong;
1505 
1506   WCharType = SignedInt;
1507   UseSignedCharForObjCBool = false;
1508 
1509   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1510   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1511 
1512   UseZeroLengthBitfieldAlignment = false;
1513 
1514   if (getTriple().isArch32Bit()) {
1515     UseBitFieldTypeAlignment = false;
1516     ZeroLengthBitfieldBoundary = 32;
1517     UseZeroLengthBitfieldAlignment = true;
1518     TheCXXABI.set(TargetCXXABI::WatchOS);
1519   } else
1520     TheCXXABI.set(TargetCXXABI::AppleARM64);
1521 }
1522 
1523 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1524                                            const llvm::Triple &Triple,
1525                                            MacroBuilder &Builder) const {
1526   Builder.defineMacro("__AARCH64_SIMD__");
1527   if (Triple.isArch32Bit())
1528     Builder.defineMacro("__ARM64_ARCH_8_32__");
1529   else
1530     Builder.defineMacro("__ARM64_ARCH_8__");
1531   Builder.defineMacro("__ARM_NEON__");
1532   Builder.defineMacro("__REGISTER_PREFIX__", "");
1533   Builder.defineMacro("__arm64", "1");
1534   Builder.defineMacro("__arm64__", "1");
1535 
1536   if (Triple.isArm64e())
1537     Builder.defineMacro("__arm64e__", "1");
1538 
1539   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1540 }
1541 
1542 TargetInfo::BuiltinVaListKind
1543 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1544   return TargetInfo::CharPtrBuiltinVaList;
1545 }
1546 
1547 // 64-bit RenderScript is aarch64
1548 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1549                                                    const TargetOptions &Opts)
1550     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1551                                        Triple.getOSName(),
1552                                        Triple.getEnvironmentName()),
1553                           Opts) {
1554   IsRenderScriptTarget = true;
1555 }
1556 
1557 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1558                                                 MacroBuilder &Builder) const {
1559   Builder.defineMacro("__RENDERSCRIPT__");
1560   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1561 }
1562