1 //===--- AArch64Subtarget.h - Define Subtarget for the AArch64 -*- C++ -*--===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file declares the AArch64 specific subclass of TargetSubtarget. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64SUBTARGET_H 14 #define LLVM_LIB_TARGET_AARCH64_AARCH64SUBTARGET_H 15 16 #include "AArch64FrameLowering.h" 17 #include "AArch64ISelLowering.h" 18 #include "AArch64InstrInfo.h" 19 #include "AArch64RegisterInfo.h" 20 #include "AArch64SelectionDAGInfo.h" 21 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 22 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" 23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 24 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" 25 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" 26 #include "llvm/CodeGen/TargetSubtargetInfo.h" 27 #include "llvm/IR/DataLayout.h" 28 #include <string> 29 30 #define GET_SUBTARGETINFO_HEADER 31 #include "AArch64GenSubtargetInfo.inc" 32 33 namespace llvm { 34 class GlobalValue; 35 class StringRef; 36 class Triple; 37 38 class AArch64Subtarget final : public AArch64GenSubtargetInfo { 39 public: 40 enum ARMProcFamilyEnum : uint8_t { 41 Others, 42 A64FX, 43 Ampere1, 44 AppleA7, 45 AppleA10, 46 AppleA11, 47 AppleA12, 48 AppleA13, 49 AppleA14, 50 Carmel, 51 CortexA35, 52 CortexA53, 53 CortexA55, 54 CortexA510, 55 CortexA57, 56 CortexA65, 57 CortexA72, 58 CortexA73, 59 CortexA75, 60 CortexA76, 61 CortexA77, 62 CortexA78, 63 CortexA78C, 64 CortexA710, 65 CortexR82, 66 CortexX1, 67 CortexX1C, 68 CortexX2, 69 ExynosM3, 70 Falkor, 71 Kryo, 72 NeoverseE1, 73 NeoverseN1, 74 NeoverseN2, 75 Neoverse512TVB, 76 NeoverseV1, 77 Saphira, 78 ThunderX2T99, 79 ThunderX, 80 ThunderXT81, 81 ThunderXT83, 82 ThunderXT88, 83 ThunderX3T110, 84 TSV110 85 }; 86 87 protected: 88 /// ARMProcFamily - ARM processor family: Cortex-A53, Cortex-A57, and others. 89 ARMProcFamilyEnum ARMProcFamily = Others; 90 91 bool HasV8_0aOps = false; 92 bool HasV8_1aOps = false; 93 bool HasV8_2aOps = false; 94 bool HasV8_3aOps = false; 95 bool HasV8_4aOps = false; 96 bool HasV8_5aOps = false; 97 bool HasV8_6aOps = false; 98 bool HasV8_7aOps = false; 99 bool HasV8_8aOps = false; 100 bool HasV9_0aOps = false; 101 bool HasV9_1aOps = false; 102 bool HasV9_2aOps = false; 103 bool HasV9_3aOps = false; 104 bool HasV8_0rOps = false; 105 106 bool HasCONTEXTIDREL2 = false; 107 bool HasEL2VMSA = false; 108 bool HasEL3 = false; 109 bool HasFPARMv8 = false; 110 bool HasNEON = false; 111 bool HasCrypto = false; 112 bool HasDotProd = false; 113 bool HasCRC = false; 114 bool HasLSE = false; 115 bool HasLSE2 = false; 116 bool HasRAS = false; 117 bool HasRDM = false; 118 bool HasPerfMon = false; 119 bool HasFullFP16 = false; 120 bool HasFP16FML = false; 121 bool HasSPE = false; 122 123 bool FixCortexA53_835769 = false; 124 125 // ARMv8.1 extensions 126 bool HasVH = false; 127 bool HasPAN = false; 128 bool HasLOR = false; 129 130 // ARMv8.2 extensions 131 bool HasPsUAO = false; 132 bool HasPAN_RWV = false; 133 bool HasCCPP = false; 134 135 // SVE extensions 136 bool HasSVE = false; 137 bool UseExperimentalZeroingPseudos = false; 138 bool UseScalarIncVL = false; 139 140 // Armv8.2 Crypto extensions 141 bool HasSM4 = false; 142 bool HasSHA3 = false; 143 bool HasSHA2 = false; 144 bool HasAES = false; 145 146 // ARMv8.3 extensions 147 bool HasPAuth = false; 148 bool HasJS = false; 149 bool HasCCIDX = false; 150 bool HasComplxNum = false; 151 152 // ARMv8.4 extensions 153 bool HasNV = false; 154 bool HasMPAM = false; 155 bool HasDIT = false; 156 bool HasTRACEV8_4 = false; 157 bool HasAM = false; 158 bool HasSEL2 = false; 159 bool HasTLB_RMI = false; 160 bool HasFlagM = false; 161 bool HasRCPC_IMMO = false; 162 163 bool HasLSLFast = false; 164 bool HasRCPC = false; 165 bool HasAggressiveFMA = false; 166 167 // Armv8.5-A Extensions 168 bool HasAlternativeNZCV = false; 169 bool HasFRInt3264 = false; 170 bool HasSpecRestrict = false; 171 bool HasSSBS = false; 172 bool HasSB = false; 173 bool HasPredRes = false; 174 bool HasCCDP = false; 175 bool HasBTI = false; 176 bool HasRandGen = false; 177 bool HasMTE = false; 178 bool HasTME = false; 179 180 // Armv8.6-A Extensions 181 bool HasBF16 = false; 182 bool HasMatMulInt8 = false; 183 bool HasMatMulFP32 = false; 184 bool HasMatMulFP64 = false; 185 bool HasAMVS = false; 186 bool HasFineGrainedTraps = false; 187 bool HasEnhancedCounterVirtualization = false; 188 189 // Armv8.7-A Extensions 190 bool HasXS = false; 191 bool HasWFxT = false; 192 bool HasHCX = false; 193 bool HasLS64 = false; 194 195 // Armv8.8-A Extensions 196 bool HasHBC = false; 197 bool HasMOPS = false; 198 199 // Arm SVE2 extensions 200 bool HasSVE2 = false; 201 bool HasSVE2AES = false; 202 bool HasSVE2SM4 = false; 203 bool HasSVE2SHA3 = false; 204 bool HasSVE2BitPerm = false; 205 206 // Armv9-A Extensions 207 bool HasRME = false; 208 209 // Arm Scalable Matrix Extension (SME) 210 bool HasSME = false; 211 bool HasSMEF64 = false; 212 bool HasSMEI64 = false; 213 bool HasStreamingSVE = false; 214 215 // AppleA7 system register. 216 bool HasAppleA7SysReg = false; 217 218 // Future architecture extensions. 219 bool HasETE = false; 220 bool HasTRBE = false; 221 bool HasBRBE = false; 222 bool HasSPE_EEF = false; 223 224 // HasZeroCycleRegMove - Has zero-cycle register mov instructions. 225 bool HasZeroCycleRegMove = false; 226 227 // HasZeroCycleZeroing - Has zero-cycle zeroing instructions. 228 bool HasZeroCycleZeroing = false; 229 bool HasZeroCycleZeroingGP = false; 230 bool HasZeroCycleZeroingFPWorkaround = false; 231 232 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0". 233 // as movi is more efficient across all cores. Newer cores can eliminate 234 // fmovs early and there is no difference with movi, but this not true for 235 // all implementations. 236 bool HasZeroCycleZeroingFP = true; 237 238 // StrictAlign - Disallow unaligned memory accesses. 239 bool StrictAlign = false; 240 241 // NegativeImmediates - transform instructions with negative immediates 242 bool NegativeImmediates = true; 243 244 // Enable 64-bit vectorization in SLP. 245 unsigned MinVectorRegisterBitWidth = 64; 246 247 // Do not place a BTI instruction after a call to a return twice function like 248 // setjmp. 249 bool NoBTIAtReturnTwice = false; 250 251 bool OutlineAtomics = false; 252 bool PredictableSelectIsExpensive = false; 253 bool BalanceFPOps = false; 254 bool CustomAsCheapAsMove = false; 255 bool ExynosAsCheapAsMove = false; 256 bool UsePostRAScheduler = false; 257 bool Misaligned128StoreIsSlow = false; 258 bool Paired128IsSlow = false; 259 bool STRQroIsSlow = false; 260 bool UseAlternateSExtLoadCVTF32Pattern = false; 261 bool HasArithmeticBccFusion = false; 262 bool HasArithmeticCbzFusion = false; 263 bool HasCmpBccFusion = false; 264 bool HasFuseAddress = false; 265 bool HasFuseAES = false; 266 bool HasFuseArithmeticLogic = false; 267 bool HasFuseCCSelect = false; 268 bool HasFuseCryptoEOR = false; 269 bool HasFuseLiterals = false; 270 bool DisableLatencySchedHeuristic = false; 271 bool UseRSqrt = false; 272 bool Force32BitJumpTables = false; 273 bool UseEL1ForTP = false; 274 bool UseEL2ForTP = false; 275 bool UseEL3ForTP = false; 276 bool AllowTaggedGlobals = false; 277 bool HardenSlsRetBr = false; 278 bool HardenSlsBlr = false; 279 bool HardenSlsNoComdat = false; 280 uint8_t MaxInterleaveFactor = 2; 281 uint8_t VectorInsertExtractBaseCost = 3; 282 uint16_t CacheLineSize = 0; 283 uint16_t PrefetchDistance = 0; 284 uint16_t MinPrefetchStride = 1; 285 unsigned MaxPrefetchIterationsAhead = UINT_MAX; 286 unsigned PrefFunctionLogAlignment = 0; 287 unsigned PrefLoopLogAlignment = 0; 288 unsigned MaxBytesForLoopAlignment = 0; 289 unsigned MaxJumpTableSize = 0; 290 unsigned WideningBaseCost = 0; 291 292 // ReserveXRegister[i] - X#i is not available as a general purpose register. 293 BitVector ReserveXRegister; 294 295 // CustomCallUsedXRegister[i] - X#i call saved. 296 BitVector CustomCallSavedXRegs; 297 298 bool IsLittle; 299 300 unsigned MinSVEVectorSizeInBits; 301 unsigned MaxSVEVectorSizeInBits; 302 unsigned VScaleForTuning = 2; 303 304 /// TargetTriple - What processor and OS we're targeting. 305 Triple TargetTriple; 306 307 AArch64FrameLowering FrameLowering; 308 AArch64InstrInfo InstrInfo; 309 AArch64SelectionDAGInfo TSInfo; 310 AArch64TargetLowering TLInfo; 311 312 /// GlobalISel related APIs. 313 std::unique_ptr<CallLowering> CallLoweringInfo; 314 std::unique_ptr<InlineAsmLowering> InlineAsmLoweringInfo; 315 std::unique_ptr<InstructionSelector> InstSelector; 316 std::unique_ptr<LegalizerInfo> Legalizer; 317 std::unique_ptr<RegisterBankInfo> RegBankInfo; 318 319 private: 320 /// initializeSubtargetDependencies - Initializes using CPUString and the 321 /// passed in feature string so that we can use initializer lists for 322 /// subtarget initialization. 323 AArch64Subtarget &initializeSubtargetDependencies(StringRef FS, 324 StringRef CPUString, 325 StringRef TuneCPUString); 326 327 /// Initialize properties based on the selected processor family. 328 void initializeProperties(); 329 330 public: 331 /// This constructor initializes the data members to match that 332 /// of the specified triple. 333 AArch64Subtarget(const Triple &TT, const std::string &CPU, 334 const std::string &TuneCPU, const std::string &FS, 335 const TargetMachine &TM, bool LittleEndian, 336 unsigned MinSVEVectorSizeInBitsOverride = 0, 337 unsigned MaxSVEVectorSizeInBitsOverride = 0); 338 339 const AArch64SelectionDAGInfo *getSelectionDAGInfo() const override { 340 return &TSInfo; 341 } 342 const AArch64FrameLowering *getFrameLowering() const override { 343 return &FrameLowering; 344 } 345 const AArch64TargetLowering *getTargetLowering() const override { 346 return &TLInfo; 347 } 348 const AArch64InstrInfo *getInstrInfo() const override { return &InstrInfo; } 349 const AArch64RegisterInfo *getRegisterInfo() const override { 350 return &getInstrInfo()->getRegisterInfo(); 351 } 352 const CallLowering *getCallLowering() const override; 353 const InlineAsmLowering *getInlineAsmLowering() const override; 354 InstructionSelector *getInstructionSelector() const override; 355 const LegalizerInfo *getLegalizerInfo() const override; 356 const RegisterBankInfo *getRegBankInfo() const override; 357 const Triple &getTargetTriple() const { return TargetTriple; } 358 bool enableMachineScheduler() const override { return true; } 359 bool enablePostRAScheduler() const override { 360 return UsePostRAScheduler; 361 } 362 363 /// Returns ARM processor family. 364 /// Avoid this function! CPU specifics should be kept local to this class 365 /// and preferably modeled with SubtargetFeatures or properties in 366 /// initializeProperties(). 367 ARMProcFamilyEnum getProcFamily() const { 368 return ARMProcFamily; 369 } 370 371 bool hasV8_0aOps() const { return HasV8_0aOps; } 372 bool hasV8_1aOps() const { return HasV8_1aOps; } 373 bool hasV8_2aOps() const { return HasV8_2aOps; } 374 bool hasV8_3aOps() const { return HasV8_3aOps; } 375 bool hasV8_4aOps() const { return HasV8_4aOps; } 376 bool hasV8_5aOps() const { return HasV8_5aOps; } 377 bool hasV9_0aOps() const { return HasV9_0aOps; } 378 bool hasV9_1aOps() const { return HasV9_1aOps; } 379 bool hasV9_2aOps() const { return HasV9_2aOps; } 380 bool hasV9_3aOps() const { return HasV9_3aOps; } 381 bool hasV8_0rOps() const { return HasV8_0rOps; } 382 383 bool hasZeroCycleRegMove() const { return HasZeroCycleRegMove; } 384 385 bool hasZeroCycleZeroingGP() const { return HasZeroCycleZeroingGP; } 386 387 bool hasZeroCycleZeroingFP() const { return HasZeroCycleZeroingFP; } 388 389 bool hasZeroCycleZeroingFPWorkaround() const { 390 return HasZeroCycleZeroingFPWorkaround; 391 } 392 393 bool requiresStrictAlign() const { return StrictAlign; } 394 395 bool isXRaySupported() const override { return true; } 396 397 unsigned getMinVectorRegisterBitWidth() const { 398 return MinVectorRegisterBitWidth; 399 } 400 401 bool isXRegisterReserved(size_t i) const { return ReserveXRegister[i]; } 402 unsigned getNumXRegisterReserved() const { return ReserveXRegister.count(); } 403 bool isXRegCustomCalleeSaved(size_t i) const { 404 return CustomCallSavedXRegs[i]; 405 } 406 bool hasCustomCallingConv() const { return CustomCallSavedXRegs.any(); } 407 bool hasFPARMv8() const { return HasFPARMv8; } 408 bool hasNEON() const { return HasNEON; } 409 bool hasCrypto() const { return HasCrypto; } 410 bool hasDotProd() const { return HasDotProd; } 411 bool hasCRC() const { return HasCRC; } 412 bool hasLSE() const { return HasLSE; } 413 bool hasLSE2() const { return HasLSE2; } 414 bool hasRAS() const { return HasRAS; } 415 bool hasRDM() const { return HasRDM; } 416 bool hasSM4() const { return HasSM4; } 417 bool hasSHA3() const { return HasSHA3; } 418 bool hasSHA2() const { return HasSHA2; } 419 bool hasAES() const { return HasAES; } 420 bool hasCONTEXTIDREL2() const { return HasCONTEXTIDREL2; } 421 bool balanceFPOps() const { return BalanceFPOps; } 422 bool predictableSelectIsExpensive() const { 423 return PredictableSelectIsExpensive; 424 } 425 bool hasCustomCheapAsMoveHandling() const { return CustomAsCheapAsMove; } 426 bool hasExynosCheapAsMoveHandling() const { return ExynosAsCheapAsMove; } 427 bool isMisaligned128StoreSlow() const { return Misaligned128StoreIsSlow; } 428 bool isPaired128Slow() const { return Paired128IsSlow; } 429 bool isSTRQroSlow() const { return STRQroIsSlow; } 430 bool useAlternateSExtLoadCVTF32Pattern() const { 431 return UseAlternateSExtLoadCVTF32Pattern; 432 } 433 bool hasArithmeticBccFusion() const { return HasArithmeticBccFusion; } 434 bool hasArithmeticCbzFusion() const { return HasArithmeticCbzFusion; } 435 bool hasCmpBccFusion() const { return HasCmpBccFusion; } 436 bool hasFuseAddress() const { return HasFuseAddress; } 437 bool hasFuseAES() const { return HasFuseAES; } 438 bool hasFuseArithmeticLogic() const { return HasFuseArithmeticLogic; } 439 bool hasFuseCCSelect() const { return HasFuseCCSelect; } 440 bool hasFuseCryptoEOR() const { return HasFuseCryptoEOR; } 441 bool hasFuseLiterals() const { return HasFuseLiterals; } 442 443 /// Return true if the CPU supports any kind of instruction fusion. 444 bool hasFusion() const { 445 return hasArithmeticBccFusion() || hasArithmeticCbzFusion() || 446 hasFuseAES() || hasFuseArithmeticLogic() || 447 hasFuseCCSelect() || hasFuseLiterals(); 448 } 449 450 bool hardenSlsRetBr() const { return HardenSlsRetBr; } 451 bool hardenSlsBlr() const { return HardenSlsBlr; } 452 bool hardenSlsNoComdat() const { return HardenSlsNoComdat; } 453 454 bool useEL1ForTP() const { return UseEL1ForTP; } 455 bool useEL2ForTP() const { return UseEL2ForTP; } 456 bool useEL3ForTP() const { return UseEL3ForTP; } 457 458 bool useRSqrt() const { return UseRSqrt; } 459 bool force32BitJumpTables() const { return Force32BitJumpTables; } 460 unsigned getMaxInterleaveFactor() const { return MaxInterleaveFactor; } 461 unsigned getVectorInsertExtractBaseCost() const { 462 return VectorInsertExtractBaseCost; 463 } 464 unsigned getCacheLineSize() const override { return CacheLineSize; } 465 unsigned getPrefetchDistance() const override { return PrefetchDistance; } 466 unsigned getMinPrefetchStride(unsigned NumMemAccesses, 467 unsigned NumStridedMemAccesses, 468 unsigned NumPrefetches, 469 bool HasCall) const override { 470 return MinPrefetchStride; 471 } 472 unsigned getMaxPrefetchIterationsAhead() const override { 473 return MaxPrefetchIterationsAhead; 474 } 475 unsigned getPrefFunctionLogAlignment() const { 476 return PrefFunctionLogAlignment; 477 } 478 unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; } 479 480 unsigned getMaxBytesForLoopAlignment() const { 481 return MaxBytesForLoopAlignment; 482 } 483 484 unsigned getMaximumJumpTableSize() const { return MaxJumpTableSize; } 485 486 unsigned getWideningBaseCost() const { return WideningBaseCost; } 487 488 bool useExperimentalZeroingPseudos() const { 489 return UseExperimentalZeroingPseudos; 490 } 491 492 bool useScalarIncVL() const { return UseScalarIncVL; } 493 494 /// CPU has TBI (top byte of addresses is ignored during HW address 495 /// translation) and OS enables it. 496 bool supportsAddressTopByteIgnored() const; 497 498 bool hasPerfMon() const { return HasPerfMon; } 499 bool hasFullFP16() const { return HasFullFP16; } 500 bool hasFP16FML() const { return HasFP16FML; } 501 bool hasSPE() const { return HasSPE; } 502 bool hasLSLFast() const { return HasLSLFast; } 503 bool hasSVE() const { return HasSVE; } 504 bool hasSVE2() const { return HasSVE2; } 505 bool hasRCPC() const { return HasRCPC; } 506 bool hasAggressiveFMA() const { return HasAggressiveFMA; } 507 bool hasAlternativeNZCV() const { return HasAlternativeNZCV; } 508 bool hasFRInt3264() const { return HasFRInt3264; } 509 bool hasSpecRestrict() const { return HasSpecRestrict; } 510 bool hasSSBS() const { return HasSSBS; } 511 bool hasSB() const { return HasSB; } 512 bool hasPredRes() const { return HasPredRes; } 513 bool hasCCDP() const { return HasCCDP; } 514 bool hasBTI() const { return HasBTI; } 515 bool hasRandGen() const { return HasRandGen; } 516 bool hasMTE() const { return HasMTE; } 517 bool hasTME() const { return HasTME; } 518 // Arm SVE2 extensions 519 bool hasSVE2AES() const { return HasSVE2AES; } 520 bool hasSVE2SM4() const { return HasSVE2SM4; } 521 bool hasSVE2SHA3() const { return HasSVE2SHA3; } 522 bool hasSVE2BitPerm() const { return HasSVE2BitPerm; } 523 bool hasMatMulInt8() const { return HasMatMulInt8; } 524 bool hasMatMulFP32() const { return HasMatMulFP32; } 525 bool hasMatMulFP64() const { return HasMatMulFP64; } 526 527 // Armv8.6-A Extensions 528 bool hasBF16() const { return HasBF16; } 529 bool hasFineGrainedTraps() const { return HasFineGrainedTraps; } 530 bool hasEnhancedCounterVirtualization() const { 531 return HasEnhancedCounterVirtualization; 532 } 533 534 // Arm Scalable Matrix Extension (SME) 535 bool hasSME() const { return HasSME; } 536 bool hasSMEF64() const { return HasSMEF64; } 537 bool hasSMEI64() const { return HasSMEI64; } 538 bool hasStreamingSVE() const { return HasStreamingSVE; } 539 540 bool isLittleEndian() const { return IsLittle; } 541 542 bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); } 543 bool isTargetIOS() const { return TargetTriple.isiOS(); } 544 bool isTargetLinux() const { return TargetTriple.isOSLinux(); } 545 bool isTargetWindows() const { return TargetTriple.isOSWindows(); } 546 bool isTargetAndroid() const { return TargetTriple.isAndroid(); } 547 bool isTargetFuchsia() const { return TargetTriple.isOSFuchsia(); } 548 549 bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); } 550 bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); } 551 bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); } 552 553 bool isTargetILP32() const { 554 return TargetTriple.isArch32Bit() || 555 TargetTriple.getEnvironment() == Triple::GNUILP32; 556 } 557 558 bool useAA() const override; 559 560 bool outlineAtomics() const { return OutlineAtomics; } 561 562 bool hasVH() const { return HasVH; } 563 bool hasPAN() const { return HasPAN; } 564 bool hasLOR() const { return HasLOR; } 565 566 bool hasPsUAO() const { return HasPsUAO; } 567 bool hasPAN_RWV() const { return HasPAN_RWV; } 568 bool hasCCPP() const { return HasCCPP; } 569 570 bool hasPAuth() const { return HasPAuth; } 571 bool hasJS() const { return HasJS; } 572 bool hasCCIDX() const { return HasCCIDX; } 573 bool hasComplxNum() const { return HasComplxNum; } 574 575 bool hasNV() const { return HasNV; } 576 bool hasMPAM() const { return HasMPAM; } 577 bool hasDIT() const { return HasDIT; } 578 bool hasTRACEV8_4() const { return HasTRACEV8_4; } 579 bool hasAM() const { return HasAM; } 580 bool hasAMVS() const { return HasAMVS; } 581 bool hasXS() const { return HasXS; } 582 bool hasWFxT() const { return HasWFxT; } 583 bool hasHCX() const { return HasHCX; } 584 bool hasLS64() const { return HasLS64; } 585 bool hasSEL2() const { return HasSEL2; } 586 bool hasTLB_RMI() const { return HasTLB_RMI; } 587 bool hasFlagM() const { return HasFlagM; } 588 bool hasRCPC_IMMO() const { return HasRCPC_IMMO; } 589 bool hasEL2VMSA() const { return HasEL2VMSA; } 590 bool hasEL3() const { return HasEL3; } 591 bool hasHBC() const { return HasHBC; } 592 bool hasMOPS() const { return HasMOPS; } 593 594 bool fixCortexA53_835769() const { return FixCortexA53_835769; } 595 596 bool noBTIAtReturnTwice() const { return NoBTIAtReturnTwice; } 597 598 bool addrSinkUsingGEPs() const override { 599 // Keeping GEPs inbounds is important for exploiting AArch64 600 // addressing-modes in ILP32 mode. 601 return useAA() || isTargetILP32(); 602 } 603 604 bool useSmallAddressing() const { 605 switch (TLInfo.getTargetMachine().getCodeModel()) { 606 case CodeModel::Kernel: 607 // Kernel is currently allowed only for Fuchsia targets, 608 // where it is the same as Small for almost all purposes. 609 case CodeModel::Small: 610 return true; 611 default: 612 return false; 613 } 614 } 615 616 /// ParseSubtargetFeatures - Parses features string setting specified 617 /// subtarget options. Definition of function is auto generated by tblgen. 618 void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); 619 620 /// ClassifyGlobalReference - Find the target operand flags that describe 621 /// how a global value should be referenced for the current subtarget. 622 unsigned ClassifyGlobalReference(const GlobalValue *GV, 623 const TargetMachine &TM) const; 624 625 unsigned classifyGlobalFunctionReference(const GlobalValue *GV, 626 const TargetMachine &TM) const; 627 628 void overrideSchedPolicy(MachineSchedPolicy &Policy, 629 unsigned NumRegionInstrs) const override; 630 631 bool enableEarlyIfConversion() const override; 632 633 bool enableAdvancedRASplitCost() const override { return false; } 634 635 std::unique_ptr<PBQPRAConstraint> getCustomPBQPConstraints() const override; 636 637 bool isCallingConvWin64(CallingConv::ID CC) const { 638 switch (CC) { 639 case CallingConv::C: 640 case CallingConv::Fast: 641 case CallingConv::Swift: 642 return isTargetWindows(); 643 case CallingConv::Win64: 644 return true; 645 default: 646 return false; 647 } 648 } 649 650 /// Return whether FrameLowering should always set the "extended frame 651 /// present" bit in FP, or set it based on a symbol in the runtime. 652 bool swiftAsyncContextIsDynamicallySet() const { 653 // Older OS versions (particularly system unwinders) are confused by the 654 // Swift extended frame, so when building code that might be run on them we 655 // must dynamically query the concurrency library to determine whether 656 // extended frames should be flagged as present. 657 const Triple &TT = getTargetTriple(); 658 659 unsigned Major = TT.getOSVersion().getMajor(); 660 switch(TT.getOS()) { 661 default: 662 return false; 663 case Triple::IOS: 664 case Triple::TvOS: 665 return Major < 15; 666 case Triple::WatchOS: 667 return Major < 8; 668 case Triple::MacOSX: 669 case Triple::Darwin: 670 return Major < 12; 671 } 672 } 673 674 void mirFileLoaded(MachineFunction &MF) const override; 675 676 // Return the known range for the bit length of SVE data registers. A value 677 // of 0 means nothing is known about that particular limit beyong what's 678 // implied by the architecture. 679 unsigned getMaxSVEVectorSizeInBits() const { 680 assert(HasSVE && "Tried to get SVE vector length without SVE support!"); 681 return MaxSVEVectorSizeInBits; 682 } 683 684 unsigned getMinSVEVectorSizeInBits() const { 685 assert(HasSVE && "Tried to get SVE vector length without SVE support!"); 686 return MinSVEVectorSizeInBits; 687 } 688 689 bool useSVEForFixedLengthVectors() const { 690 // Prefer NEON unless larger SVE registers are available. 691 return hasSVE() && getMinSVEVectorSizeInBits() >= 256; 692 } 693 694 unsigned getVScaleForTuning() const { return VScaleForTuning; } 695 }; 696 } // End llvm namespace 697 698 #endif 699