xref: /freebsd/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp (revision e32fecd0c2c3ee37c47ee100f169e7eb0282a873)
1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/Support/AArch64TargetParser.h"
21 
22 using namespace clang;
23 using namespace clang::targets;
24 
25 const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
26 #define BUILTIN(ID, TYPE, ATTRS)                                               \
27    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
28 #include "clang/Basic/BuiltinsNEON.def"
29 
30 #define BUILTIN(ID, TYPE, ATTRS)                                               \
31    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
32 #include "clang/Basic/BuiltinsSVE.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
36 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
37   {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
38 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
39   {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
40 #include "clang/Basic/BuiltinsAArch64.def"
41 };
42 
43 static StringRef getArchVersionString(llvm::AArch64::ArchKind Kind) {
44   switch (Kind) {
45   case llvm::AArch64::ArchKind::ARMV9A:
46   case llvm::AArch64::ArchKind::ARMV9_1A:
47   case llvm::AArch64::ArchKind::ARMV9_2A:
48   case llvm::AArch64::ArchKind::ARMV9_3A:
49     return "9";
50   default:
51     return "8";
52   }
53 }
54 
55 StringRef AArch64TargetInfo::getArchProfile() const {
56   switch (ArchKind) {
57   case llvm::AArch64::ArchKind::ARMV8R:
58     return "R";
59   default:
60     return "A";
61   }
62 }
63 
64 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
65                                      const TargetOptions &Opts)
66     : TargetInfo(Triple), ABI("aapcs") {
67   if (getTriple().isOSOpenBSD()) {
68     Int64Type = SignedLongLong;
69     IntMaxType = SignedLongLong;
70   } else {
71     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
72       WCharType = UnsignedInt;
73 
74     Int64Type = SignedLong;
75     IntMaxType = SignedLong;
76   }
77 
78   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
79   HasLegalHalfType = true;
80   HasFloat16 = true;
81 
82   if (Triple.isArch64Bit())
83     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
84   else
85     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
86 
87   MaxVectorAlign = 128;
88   MaxAtomicInlineWidth = 128;
89   MaxAtomicPromoteWidth = 128;
90 
91   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
92   LongDoubleFormat = &llvm::APFloat::IEEEquad();
93 
94   BFloat16Width = BFloat16Align = 16;
95   BFloat16Format = &llvm::APFloat::BFloat();
96 
97   // Make __builtin_ms_va_list available.
98   HasBuiltinMSVaList = true;
99 
100   // Make the SVE types available.  Note that this deliberately doesn't
101   // depend on SveMode, since in principle it should be possible to turn
102   // SVE on and off within a translation unit.  It should also be possible
103   // to compile the global declaration:
104   //
105   // __SVInt8_t *ptr;
106   //
107   // even without SVE.
108   HasAArch64SVETypes = true;
109 
110   // {} in inline assembly are neon specifiers, not assembly variant
111   // specifiers.
112   NoAsmVariants = true;
113 
114   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
115   // contributes to the alignment of the containing aggregate in the same way
116   // a plain (non bit-field) member of that type would, without exception for
117   // zero-sized or anonymous bit-fields."
118   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
119   UseZeroLengthBitfieldAlignment = true;
120 
121   // AArch64 targets default to using the ARM C++ ABI.
122   TheCXXABI.set(TargetCXXABI::GenericAArch64);
123 
124   if (Triple.getOS() == llvm::Triple::Linux)
125     this->MCountName = "\01_mcount";
126   else if (Triple.getOS() == llvm::Triple::UnknownOS)
127     this->MCountName =
128         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
129 }
130 
131 StringRef AArch64TargetInfo::getABI() const { return ABI; }
132 
133 bool AArch64TargetInfo::setABI(const std::string &Name) {
134   if (Name != "aapcs" && Name != "darwinpcs")
135     return false;
136 
137   ABI = Name;
138   return true;
139 }
140 
141 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
142                                                  BranchProtectionInfo &BPI,
143                                                  StringRef &Err) const {
144   llvm::ARM::ParsedBranchProtection PBP;
145   if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
146     return false;
147 
148   BPI.SignReturnAddr =
149       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
150           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
151           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
152           .Default(LangOptions::SignReturnAddressScopeKind::None);
153 
154   if (PBP.Key == "a_key")
155     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
156   else
157     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
158 
159   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
160   return true;
161 }
162 
163 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
164   return Name == "generic" ||
165          llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
166 }
167 
168 bool AArch64TargetInfo::setCPU(const std::string &Name) {
169   return isValidCPUName(Name);
170 }
171 
172 void AArch64TargetInfo::fillValidCPUList(
173     SmallVectorImpl<StringRef> &Values) const {
174   llvm::AArch64::fillValidCPUArchList(Values);
175 }
176 
177 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
178                                                 MacroBuilder &Builder) const {
179   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
180   Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
181   Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
182 }
183 
184 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
185                                                 MacroBuilder &Builder) const {
186   // Also include the ARMv8.1 defines
187   getTargetDefinesARMV81A(Opts, Builder);
188 }
189 
190 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
191                                                 MacroBuilder &Builder) const {
192   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
193   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
194   // Also include the Armv8.2 defines
195   getTargetDefinesARMV82A(Opts, Builder);
196 }
197 
198 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
199                                                 MacroBuilder &Builder) const {
200   // Also include the Armv8.3 defines
201   getTargetDefinesARMV83A(Opts, Builder);
202 }
203 
204 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
205                                                 MacroBuilder &Builder) const {
206   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
207   // Also include the Armv8.4 defines
208   getTargetDefinesARMV84A(Opts, Builder);
209 }
210 
211 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
212                                                 MacroBuilder &Builder) const {
213   // Also include the Armv8.5 defines
214   // FIXME: Armv8.6 makes the following extensions mandatory:
215   // - __ARM_FEATURE_BF16
216   // - __ARM_FEATURE_MATMUL_INT8
217   // Handle them here.
218   getTargetDefinesARMV85A(Opts, Builder);
219 }
220 
221 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
222                                                 MacroBuilder &Builder) const {
223   // Also include the Armv8.6 defines
224   getTargetDefinesARMV86A(Opts, Builder);
225 }
226 
227 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
228                                                 MacroBuilder &Builder) const {
229   // Also include the Armv8.7 defines
230   getTargetDefinesARMV87A(Opts, Builder);
231 }
232 
233 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
234                                                MacroBuilder &Builder) const {
235   // Armv9-A maps to Armv8.5-A
236   getTargetDefinesARMV85A(Opts, Builder);
237 }
238 
239 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
240                                                 MacroBuilder &Builder) const {
241   // Armv9.1-A maps to Armv8.6-A
242   getTargetDefinesARMV86A(Opts, Builder);
243 }
244 
245 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
246                                                 MacroBuilder &Builder) const {
247   // Armv9.2-A maps to Armv8.7-A
248   getTargetDefinesARMV87A(Opts, Builder);
249 }
250 
251 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
252                                                 MacroBuilder &Builder) const {
253   // Armv9.3-A maps to Armv8.8-A
254   getTargetDefinesARMV88A(Opts, Builder);
255 }
256 
257 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
258                                          MacroBuilder &Builder) const {
259   // Target identification.
260   Builder.defineMacro("__aarch64__");
261   // For bare-metal.
262   if (getTriple().getOS() == llvm::Triple::UnknownOS &&
263       getTriple().isOSBinFormatELF())
264     Builder.defineMacro("__ELF__");
265 
266   // Target properties.
267   if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
268     Builder.defineMacro("_LP64");
269     Builder.defineMacro("__LP64__");
270   }
271 
272   std::string CodeModel = getTargetOpts().CodeModel;
273   if (CodeModel == "default")
274     CodeModel = "small";
275   for (char &c : CodeModel)
276     c = toupper(c);
277   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
278 
279   // ACLE predefines. Many can only have one possible value on v8 AArch64.
280   Builder.defineMacro("__ARM_ACLE", "200");
281   Builder.defineMacro("__ARM_ARCH", getArchVersionString(ArchKind));
282   Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + getArchProfile() + "'");
283 
284   Builder.defineMacro("__ARM_64BIT_STATE", "1");
285   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
286   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
287 
288   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
289   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
290   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
291   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
292   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
293   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
294   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
295 
296   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
297 
298   // 0xe implies support for half, single and double precision operations.
299   Builder.defineMacro("__ARM_FP", "0xE");
300 
301   // PCS specifies this for SysV variants, which is all we support. Other ABIs
302   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
303   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
304   Builder.defineMacro("__ARM_FP16_ARGS", "1");
305 
306   if (Opts.UnsafeFPMath)
307     Builder.defineMacro("__ARM_FP_FAST", "1");
308 
309   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
310                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
311 
312   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
313 
314   if (FPU & NeonMode) {
315     Builder.defineMacro("__ARM_NEON", "1");
316     // 64-bit NEON supports half, single and double precision operations.
317     Builder.defineMacro("__ARM_NEON_FP", "0xE");
318   }
319 
320   if (FPU & SveMode)
321     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
322 
323   if ((FPU & NeonMode) && (FPU & SveMode))
324     Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
325 
326   if (HasSVE2)
327     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
328 
329   if (HasSVE2 && HasSVE2AES)
330     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
331 
332   if (HasSVE2 && HasSVE2BitPerm)
333     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
334 
335   if (HasSVE2 && HasSVE2SHA3)
336     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
337 
338   if (HasSVE2 && HasSVE2SM4)
339     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
340 
341   if (HasCRC)
342     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
343 
344   // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
345   // macros for AES, SHA2, SHA3 and SM4
346   if (HasAES && HasSHA2)
347     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
348 
349   if (HasAES)
350     Builder.defineMacro("__ARM_FEATURE_AES", "1");
351 
352   if (HasSHA2)
353     Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
354 
355   if (HasSHA3) {
356     Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
357     Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
358   }
359 
360   if (HasSM4) {
361     Builder.defineMacro("__ARM_FEATURE_SM3", "1");
362     Builder.defineMacro("__ARM_FEATURE_SM4", "1");
363   }
364 
365   if (HasUnaligned)
366     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
367 
368   if ((FPU & NeonMode) && HasFullFP16)
369     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
370   if (HasFullFP16)
371    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
372 
373   if (HasDotProd)
374     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
375 
376   if (HasMTE)
377     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
378 
379   if (HasTME)
380     Builder.defineMacro("__ARM_FEATURE_TME", "1");
381 
382   if (HasMatMul)
383     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
384 
385   if (HasLSE)
386     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
387 
388   if (HasBFloat16) {
389     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
390     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
391     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
392     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
393   }
394 
395   if ((FPU & SveMode) && HasBFloat16) {
396     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
397   }
398 
399   if ((FPU & SveMode) && HasMatmulFP64)
400     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
401 
402   if ((FPU & SveMode) && HasMatmulFP32)
403     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
404 
405   if ((FPU & SveMode) && HasMatMul)
406     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
407 
408   if ((FPU & NeonMode) && HasFP16FML)
409     Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
410 
411   if (Opts.hasSignReturnAddress()) {
412     // Bitmask:
413     // 0: Protection using the A key
414     // 1: Protection using the B key
415     // 2: Protection including leaf functions
416     unsigned Value = 0;
417 
418     if (Opts.isSignReturnAddressWithAKey())
419       Value |= (1 << 0);
420     else
421       Value |= (1 << 1);
422 
423     if (Opts.isSignReturnAddressScopeAll())
424       Value |= (1 << 2);
425 
426     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
427   }
428 
429   if (Opts.BranchTargetEnforcement)
430     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
431 
432   if (HasLS64)
433     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
434 
435   if (HasRandGen)
436     Builder.defineMacro("__ARM_FEATURE_RNG", "1");
437 
438   if (HasMOPS)
439     Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
440 
441   switch (ArchKind) {
442   default:
443     break;
444   case llvm::AArch64::ArchKind::ARMV8_1A:
445     getTargetDefinesARMV81A(Opts, Builder);
446     break;
447   case llvm::AArch64::ArchKind::ARMV8_2A:
448     getTargetDefinesARMV82A(Opts, Builder);
449     break;
450   case llvm::AArch64::ArchKind::ARMV8_3A:
451     getTargetDefinesARMV83A(Opts, Builder);
452     break;
453   case llvm::AArch64::ArchKind::ARMV8_4A:
454     getTargetDefinesARMV84A(Opts, Builder);
455     break;
456   case llvm::AArch64::ArchKind::ARMV8_5A:
457     getTargetDefinesARMV85A(Opts, Builder);
458     break;
459   case llvm::AArch64::ArchKind::ARMV8_6A:
460     getTargetDefinesARMV86A(Opts, Builder);
461     break;
462   case llvm::AArch64::ArchKind::ARMV8_7A:
463     getTargetDefinesARMV87A(Opts, Builder);
464     break;
465   case llvm::AArch64::ArchKind::ARMV8_8A:
466     getTargetDefinesARMV88A(Opts, Builder);
467     break;
468   case llvm::AArch64::ArchKind::ARMV9A:
469     getTargetDefinesARMV9A(Opts, Builder);
470     break;
471   case llvm::AArch64::ArchKind::ARMV9_1A:
472     getTargetDefinesARMV91A(Opts, Builder);
473     break;
474   case llvm::AArch64::ArchKind::ARMV9_2A:
475     getTargetDefinesARMV92A(Opts, Builder);
476     break;
477   case llvm::AArch64::ArchKind::ARMV9_3A:
478     getTargetDefinesARMV93A(Opts, Builder);
479     break;
480   }
481 
482   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
483   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
484   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
485   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
486   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
487 
488   // Allow detection of fast FMA support.
489   Builder.defineMacro("__FP_FAST_FMA", "1");
490   Builder.defineMacro("__FP_FAST_FMAF", "1");
491 
492   // C/C++ operators work on both VLS and VLA SVE types
493   if (FPU & SveMode)
494     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
495 
496   if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
497     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
498   }
499 }
500 
501 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
502   return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
503                                              Builtin::FirstTSBuiltin);
504 }
505 
506 Optional<std::pair<unsigned, unsigned>>
507 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
508   if (LangOpts.VScaleMin || LangOpts.VScaleMax)
509     return std::pair<unsigned, unsigned>(
510         LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
511 
512   if (hasFeature("sve"))
513     return std::pair<unsigned, unsigned>(1, 16);
514 
515   return None;
516 }
517 
518 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
519   return llvm::StringSwitch<bool>(Feature)
520     .Cases("aarch64", "arm64", "arm", true)
521     .Case("neon", FPU & NeonMode)
522     .Cases("sve", "sve2", "sve2-bitperm", "sve2-aes", "sve2-sha3", "sve2-sm4", "f64mm", "f32mm", "i8mm", "bf16", FPU & SveMode)
523     .Case("ls64", HasLS64)
524     .Default(false);
525 }
526 
527 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
528                                              DiagnosticsEngine &Diags) {
529   FPU = FPUMode;
530   HasCRC = false;
531   HasAES = false;
532   HasSHA2 = false;
533   HasSHA3 = false;
534   HasSM4 = false;
535   HasUnaligned = true;
536   HasFullFP16 = false;
537   HasDotProd = false;
538   HasFP16FML = false;
539   HasMTE = false;
540   HasTME = false;
541   HasLS64 = false;
542   HasRandGen = false;
543   HasMatMul = false;
544   HasBFloat16 = false;
545   HasSVE2 = false;
546   HasSVE2AES = false;
547   HasSVE2SHA3 = false;
548   HasSVE2SM4 = false;
549   HasSVE2BitPerm = false;
550   HasMatmulFP64 = false;
551   HasMatmulFP32 = false;
552   HasLSE = false;
553   HasMOPS = false;
554 
555   ArchKind = llvm::AArch64::ArchKind::INVALID;
556 
557   for (const auto &Feature : Features) {
558     if (Feature == "+neon")
559       FPU |= NeonMode;
560     if (Feature == "+sve") {
561       FPU |= SveMode;
562       HasFullFP16 = true;
563     }
564     if (Feature == "+sve2") {
565       FPU |= SveMode;
566       HasFullFP16 = true;
567       HasSVE2 = true;
568     }
569     if (Feature == "+sve2-aes") {
570       FPU |= SveMode;
571       HasFullFP16 = true;
572       HasSVE2 = true;
573       HasSVE2AES = true;
574     }
575     if (Feature == "+sve2-sha3") {
576       FPU |= SveMode;
577       HasFullFP16 = true;
578       HasSVE2 = true;
579       HasSVE2SHA3 = true;
580     }
581     if (Feature == "+sve2-sm4") {
582       FPU |= SveMode;
583       HasFullFP16 = true;
584       HasSVE2 = true;
585       HasSVE2SM4 = true;
586     }
587     if (Feature == "+sve2-bitperm") {
588       FPU |= SveMode;
589       HasFullFP16 = true;
590       HasSVE2 = true;
591       HasSVE2BitPerm = true;
592     }
593     if (Feature == "+f32mm") {
594       FPU |= SveMode;
595       HasMatmulFP32 = true;
596     }
597     if (Feature == "+f64mm") {
598       FPU |= SveMode;
599       HasMatmulFP64 = true;
600     }
601     if (Feature == "+crc")
602       HasCRC = true;
603     if (Feature == "+aes")
604       HasAES = true;
605     if (Feature == "+sha2")
606       HasSHA2 = true;
607     if (Feature == "+sha3") {
608       HasSHA2 = true;
609       HasSHA3 = true;
610     }
611     if (Feature == "+sm4")
612       HasSM4 = true;
613     if (Feature == "+strict-align")
614       HasUnaligned = false;
615     if (Feature == "+v8a")
616       ArchKind = llvm::AArch64::ArchKind::ARMV8A;
617     if (Feature == "+v8.1a")
618       ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
619     if (Feature == "+v8.2a")
620       ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
621     if (Feature == "+v8.3a")
622       ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
623     if (Feature == "+v8.4a")
624       ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
625     if (Feature == "+v8.5a")
626       ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
627     if (Feature == "+v8.6a")
628       ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
629     if (Feature == "+v8.7a")
630       ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
631     if (Feature == "+v8.8a")
632       ArchKind = llvm::AArch64::ArchKind::ARMV8_8A;
633     if (Feature == "+v9a")
634       ArchKind = llvm::AArch64::ArchKind::ARMV9A;
635     if (Feature == "+v9.1a")
636       ArchKind = llvm::AArch64::ArchKind::ARMV9_1A;
637     if (Feature == "+v9.2a")
638       ArchKind = llvm::AArch64::ArchKind::ARMV9_2A;
639     if (Feature == "+v9.3a")
640       ArchKind = llvm::AArch64::ArchKind::ARMV9_3A;
641     if (Feature == "+v8r")
642       ArchKind = llvm::AArch64::ArchKind::ARMV8R;
643     if (Feature == "+fullfp16")
644       HasFullFP16 = true;
645     if (Feature == "+dotprod")
646       HasDotProd = true;
647     if (Feature == "+fp16fml")
648       HasFP16FML = true;
649     if (Feature == "+mte")
650       HasMTE = true;
651     if (Feature == "+tme")
652       HasTME = true;
653     if (Feature == "+pauth")
654       HasPAuth = true;
655     if (Feature == "+i8mm")
656       HasMatMul = true;
657     if (Feature == "+bf16")
658       HasBFloat16 = true;
659     if (Feature == "+lse")
660       HasLSE = true;
661     if (Feature == "+ls64")
662       HasLS64 = true;
663     if (Feature == "+rand")
664       HasRandGen = true;
665     if (Feature == "+flagm")
666       HasFlagM = true;
667     if (Feature == "+mops")
668       HasMOPS = true;
669   }
670 
671   setDataLayout();
672 
673   return true;
674 }
675 
676 TargetInfo::CallingConvCheckResult
677 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
678   switch (CC) {
679   case CC_C:
680   case CC_Swift:
681   case CC_SwiftAsync:
682   case CC_PreserveMost:
683   case CC_PreserveAll:
684   case CC_OpenCLKernel:
685   case CC_AArch64VectorCall:
686   case CC_AArch64SVEPCS:
687   case CC_Win64:
688     return CCCR_OK;
689   default:
690     return CCCR_Warning;
691   }
692 }
693 
694 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
695 
696 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
697   return TargetInfo::AArch64ABIBuiltinVaList;
698 }
699 
700 const char *const AArch64TargetInfo::GCCRegNames[] = {
701     // 32-bit Integer registers
702     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
703     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
704     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
705 
706     // 64-bit Integer registers
707     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
708     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
709     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
710 
711     // 32-bit floating point regsisters
712     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
713     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
714     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
715 
716     // 64-bit floating point regsisters
717     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
718     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
719     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
720 
721     // Neon vector registers
722     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
723     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
724     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
725 
726     // SVE vector registers
727     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
728     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
729     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
730 
731     // SVE predicate registers
732     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
733     "p11", "p12", "p13", "p14", "p15"
734 };
735 
736 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
737   return llvm::makeArrayRef(GCCRegNames);
738 }
739 
740 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
741     {{"w31"}, "wsp"},
742     {{"x31"}, "sp"},
743     // GCC rN registers are aliases of xN registers.
744     {{"r0"}, "x0"},
745     {{"r1"}, "x1"},
746     {{"r2"}, "x2"},
747     {{"r3"}, "x3"},
748     {{"r4"}, "x4"},
749     {{"r5"}, "x5"},
750     {{"r6"}, "x6"},
751     {{"r7"}, "x7"},
752     {{"r8"}, "x8"},
753     {{"r9"}, "x9"},
754     {{"r10"}, "x10"},
755     {{"r11"}, "x11"},
756     {{"r12"}, "x12"},
757     {{"r13"}, "x13"},
758     {{"r14"}, "x14"},
759     {{"r15"}, "x15"},
760     {{"r16"}, "x16"},
761     {{"r17"}, "x17"},
762     {{"r18"}, "x18"},
763     {{"r19"}, "x19"},
764     {{"r20"}, "x20"},
765     {{"r21"}, "x21"},
766     {{"r22"}, "x22"},
767     {{"r23"}, "x23"},
768     {{"r24"}, "x24"},
769     {{"r25"}, "x25"},
770     {{"r26"}, "x26"},
771     {{"r27"}, "x27"},
772     {{"r28"}, "x28"},
773     {{"r29", "x29"}, "fp"},
774     {{"r30", "x30"}, "lr"},
775     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
776     // don't want to substitute one of these for a different-sized one.
777 };
778 
779 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
780   return llvm::makeArrayRef(GCCRegAliases);
781 }
782 
783 bool AArch64TargetInfo::validateAsmConstraint(
784     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
785   switch (*Name) {
786   default:
787     return false;
788   case 'w': // Floating point and SIMD registers (V0-V31)
789     Info.setAllowsRegister();
790     return true;
791   case 'I': // Constant that can be used with an ADD instruction
792   case 'J': // Constant that can be used with a SUB instruction
793   case 'K': // Constant that can be used with a 32-bit logical instruction
794   case 'L': // Constant that can be used with a 64-bit logical instruction
795   case 'M': // Constant that can be used as a 32-bit MOV immediate
796   case 'N': // Constant that can be used as a 64-bit MOV immediate
797   case 'Y': // Floating point constant zero
798   case 'Z': // Integer constant zero
799     return true;
800   case 'Q': // A memory reference with base register and no offset
801     Info.setAllowsMemory();
802     return true;
803   case 'S': // A symbolic address
804     Info.setAllowsRegister();
805     return true;
806   case 'U':
807     if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
808       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
809       Info.setAllowsRegister();
810       Name += 2;
811       return true;
812     }
813     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
814     // Utf: A memory address suitable for ldp/stp in TF mode.
815     // Usa: An absolute symbolic address.
816     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
817 
818     // Better to return an error saying that it's an unrecognised constraint
819     // even if this is a valid constraint in gcc.
820     return false;
821   case 'z': // Zero register, wzr or xzr
822     Info.setAllowsRegister();
823     return true;
824   case 'x': // Floating point and SIMD registers (V0-V15)
825     Info.setAllowsRegister();
826     return true;
827   case 'y': // SVE registers (V0-V7)
828     Info.setAllowsRegister();
829     return true;
830   }
831   return false;
832 }
833 
834 bool AArch64TargetInfo::validateConstraintModifier(
835     StringRef Constraint, char Modifier, unsigned Size,
836     std::string &SuggestedModifier) const {
837   // Strip off constraint modifiers.
838   while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
839     Constraint = Constraint.substr(1);
840 
841   switch (Constraint[0]) {
842   default:
843     return true;
844   case 'z':
845   case 'r': {
846     switch (Modifier) {
847     case 'x':
848     case 'w':
849       // For now assume that the person knows what they're
850       // doing with the modifier.
851       return true;
852     default:
853       // By default an 'r' constraint will be in the 'x'
854       // registers.
855       if (Size == 64)
856         return true;
857 
858       if (Size == 512)
859         return HasLS64;
860 
861       SuggestedModifier = "w";
862       return false;
863     }
864   }
865   }
866 }
867 
868 const char *AArch64TargetInfo::getClobbers() const { return ""; }
869 
870 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
871   if (RegNo == 0)
872     return 0;
873   if (RegNo == 1)
874     return 1;
875   return -1;
876 }
877 
878 bool AArch64TargetInfo::hasInt128Type() const { return true; }
879 
880 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
881                                          const TargetOptions &Opts)
882     : AArch64TargetInfo(Triple, Opts) {}
883 
884 void AArch64leTargetInfo::setDataLayout() {
885   if (getTriple().isOSBinFormatMachO()) {
886     if(getTriple().isArch32Bit())
887       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
888     else
889       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
890   } else
891     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
892 }
893 
894 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
895                                            MacroBuilder &Builder) const {
896   Builder.defineMacro("__AARCH64EL__");
897   AArch64TargetInfo::getTargetDefines(Opts, Builder);
898 }
899 
900 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
901                                          const TargetOptions &Opts)
902     : AArch64TargetInfo(Triple, Opts) {}
903 
904 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
905                                            MacroBuilder &Builder) const {
906   Builder.defineMacro("__AARCH64EB__");
907   Builder.defineMacro("__AARCH_BIG_ENDIAN");
908   Builder.defineMacro("__ARM_BIG_ENDIAN");
909   AArch64TargetInfo::getTargetDefines(Opts, Builder);
910 }
911 
912 void AArch64beTargetInfo::setDataLayout() {
913   assert(!getTriple().isOSBinFormatMachO());
914   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
915 }
916 
917 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
918                                                const TargetOptions &Opts)
919     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
920 
921   // This is an LLP64 platform.
922   // int:4, long:4, long long:8, long double:8.
923   IntWidth = IntAlign = 32;
924   LongWidth = LongAlign = 32;
925   DoubleAlign = LongLongAlign = 64;
926   LongDoubleWidth = LongDoubleAlign = 64;
927   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
928   IntMaxType = SignedLongLong;
929   Int64Type = SignedLongLong;
930   SizeType = UnsignedLongLong;
931   PtrDiffType = SignedLongLong;
932   IntPtrType = SignedLongLong;
933 }
934 
935 void WindowsARM64TargetInfo::setDataLayout() {
936   resetDataLayout(Triple.isOSBinFormatMachO()
937                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
938                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
939                   Triple.isOSBinFormatMachO() ? "_" : "");
940 }
941 
942 TargetInfo::BuiltinVaListKind
943 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
944   return TargetInfo::CharPtrBuiltinVaList;
945 }
946 
947 TargetInfo::CallingConvCheckResult
948 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
949   switch (CC) {
950   case CC_X86StdCall:
951   case CC_X86ThisCall:
952   case CC_X86FastCall:
953   case CC_X86VectorCall:
954     return CCCR_Ignore;
955   case CC_C:
956   case CC_OpenCLKernel:
957   case CC_PreserveMost:
958   case CC_PreserveAll:
959   case CC_Swift:
960   case CC_SwiftAsync:
961   case CC_Win64:
962     return CCCR_OK;
963   default:
964     return CCCR_Warning;
965   }
966 }
967 
968 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
969                                                    const TargetOptions &Opts)
970     : WindowsARM64TargetInfo(Triple, Opts) {
971   TheCXXABI.set(TargetCXXABI::Microsoft);
972 }
973 
974 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
975                                                 MacroBuilder &Builder) const {
976   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
977   Builder.defineMacro("_M_ARM64", "1");
978 }
979 
980 TargetInfo::CallingConvKind
981 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
982   return CCK_MicrosoftWin64;
983 }
984 
985 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
986   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
987 
988   // MSVC does size based alignment for arm64 based on alignment section in
989   // below document, replicate that to keep alignment consistent with object
990   // files compiled by MSVC.
991   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
992   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
993     Align = std::max(Align, 128u);    // align type at least 16 bytes
994   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
995     Align = std::max(Align, 64u);     // align type at least 8 butes
996   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
997     Align = std::max(Align, 32u);     // align type at least 4 bytes
998   }
999   return Align;
1000 }
1001 
1002 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1003                                            const TargetOptions &Opts)
1004     : WindowsARM64TargetInfo(Triple, Opts) {
1005   TheCXXABI.set(TargetCXXABI::GenericAArch64);
1006 }
1007 
1008 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1009                                                  const TargetOptions &Opts)
1010     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1011   Int64Type = SignedLongLong;
1012   if (getTriple().isArch32Bit())
1013     IntMaxType = SignedLongLong;
1014 
1015   WCharType = SignedInt;
1016   UseSignedCharForObjCBool = false;
1017 
1018   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1019   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1020 
1021   UseZeroLengthBitfieldAlignment = false;
1022 
1023   if (getTriple().isArch32Bit()) {
1024     UseBitFieldTypeAlignment = false;
1025     ZeroLengthBitfieldBoundary = 32;
1026     UseZeroLengthBitfieldAlignment = true;
1027     TheCXXABI.set(TargetCXXABI::WatchOS);
1028   } else
1029     TheCXXABI.set(TargetCXXABI::AppleARM64);
1030 }
1031 
1032 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1033                                            const llvm::Triple &Triple,
1034                                            MacroBuilder &Builder) const {
1035   Builder.defineMacro("__AARCH64_SIMD__");
1036   if (Triple.isArch32Bit())
1037     Builder.defineMacro("__ARM64_ARCH_8_32__");
1038   else
1039     Builder.defineMacro("__ARM64_ARCH_8__");
1040   Builder.defineMacro("__ARM_NEON__");
1041   Builder.defineMacro("__LITTLE_ENDIAN__");
1042   Builder.defineMacro("__REGISTER_PREFIX__", "");
1043   Builder.defineMacro("__arm64", "1");
1044   Builder.defineMacro("__arm64__", "1");
1045 
1046   if (Triple.isArm64e())
1047     Builder.defineMacro("__arm64e__", "1");
1048 
1049   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1050 }
1051 
1052 TargetInfo::BuiltinVaListKind
1053 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1054   return TargetInfo::CharPtrBuiltinVaList;
1055 }
1056 
1057 // 64-bit RenderScript is aarch64
1058 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1059                                                    const TargetOptions &Opts)
1060     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1061                                        Triple.getOSName(),
1062                                        Triple.getEnvironmentName()),
1063                           Opts) {
1064   IsRenderScriptTarget = true;
1065 }
1066 
1067 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1068                                                 MacroBuilder &Builder) const {
1069   Builder.defineMacro("__RENDERSCRIPT__");
1070   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1071 }
1072