1 //===--- AArch64Subtarget.h - Define Subtarget for the AArch64 -*- C++ -*--===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file declares the AArch64 specific subclass of TargetSubtarget. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64SUBTARGET_H 14 #define LLVM_LIB_TARGET_AARCH64_AARCH64SUBTARGET_H 15 16 #include "AArch64FrameLowering.h" 17 #include "AArch64ISelLowering.h" 18 #include "AArch64InstrInfo.h" 19 #include "AArch64RegisterInfo.h" 20 #include "AArch64SelectionDAGInfo.h" 21 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 22 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" 23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 24 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" 25 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" 26 #include "llvm/CodeGen/TargetSubtargetInfo.h" 27 #include "llvm/IR/DataLayout.h" 28 #include <string> 29 30 #define GET_SUBTARGETINFO_HEADER 31 #include "AArch64GenSubtargetInfo.inc" 32 33 namespace llvm { 34 class GlobalValue; 35 class StringRef; 36 class Triple; 37 38 class AArch64Subtarget final : public AArch64GenSubtargetInfo { 39 public: 40 enum ARMProcFamilyEnum : uint8_t { 41 Others, 42 A64FX, 43 AppleA7, 44 AppleA10, 45 AppleA11, 46 AppleA12, 47 AppleA13, 48 AppleA14, 49 Carmel, 50 CortexA35, 51 CortexA53, 52 CortexA55, 53 CortexA510, 54 CortexA57, 55 CortexA65, 56 CortexA72, 57 CortexA73, 58 CortexA75, 59 CortexA76, 60 CortexA77, 61 CortexA78, 62 CortexA78C, 63 CortexA710, 64 CortexR82, 65 CortexX1, 66 CortexX1C, 67 CortexX2, 68 ExynosM3, 69 Falkor, 70 Kryo, 71 NeoverseE1, 72 NeoverseN1, 73 NeoverseN2, 74 Neoverse512TVB, 75 NeoverseV1, 76 Saphira, 77 ThunderX2T99, 78 ThunderX, 79 ThunderXT81, 80 ThunderXT83, 81 ThunderXT88, 82 ThunderX3T110, 83 TSV110 84 }; 85 86 protected: 87 /// ARMProcFamily - ARM processor family: Cortex-A53, Cortex-A57, and others. 88 ARMProcFamilyEnum ARMProcFamily = Others; 89 90 bool HasV8_0aOps = false; 91 bool HasV8_1aOps = false; 92 bool HasV8_2aOps = false; 93 bool HasV8_3aOps = false; 94 bool HasV8_4aOps = false; 95 bool HasV8_5aOps = false; 96 bool HasV8_6aOps = false; 97 bool HasV8_7aOps = false; 98 bool HasV8_8aOps = false; 99 bool HasV9_0aOps = false; 100 bool HasV9_1aOps = false; 101 bool HasV9_2aOps = false; 102 bool HasV9_3aOps = false; 103 bool HasV8_0rOps = false; 104 105 bool HasCONTEXTIDREL2 = false; 106 bool HasEL2VMSA = false; 107 bool HasEL3 = false; 108 bool HasFPARMv8 = false; 109 bool HasNEON = false; 110 bool HasCrypto = false; 111 bool HasDotProd = false; 112 bool HasCRC = false; 113 bool HasLSE = false; 114 bool HasLSE2 = false; 115 bool HasRAS = false; 116 bool HasRDM = false; 117 bool HasPerfMon = false; 118 bool HasFullFP16 = false; 119 bool HasFP16FML = false; 120 bool HasSPE = false; 121 122 bool FixCortexA53_835769 = false; 123 124 // ARMv8.1 extensions 125 bool HasVH = false; 126 bool HasPAN = false; 127 bool HasLOR = false; 128 129 // ARMv8.2 extensions 130 bool HasPsUAO = false; 131 bool HasPAN_RWV = false; 132 bool HasCCPP = false; 133 134 // SVE extensions 135 bool HasSVE = false; 136 bool UseExperimentalZeroingPseudos = false; 137 bool UseScalarIncVL = false; 138 139 // Armv8.2 Crypto extensions 140 bool HasSM4 = false; 141 bool HasSHA3 = false; 142 bool HasSHA2 = false; 143 bool HasAES = false; 144 145 // ARMv8.3 extensions 146 bool HasPAuth = false; 147 bool HasJS = false; 148 bool HasCCIDX = false; 149 bool HasComplxNum = false; 150 151 // ARMv8.4 extensions 152 bool HasNV = false; 153 bool HasMPAM = false; 154 bool HasDIT = false; 155 bool HasTRACEV8_4 = false; 156 bool HasAM = false; 157 bool HasSEL2 = false; 158 bool HasTLB_RMI = false; 159 bool HasFlagM = false; 160 bool HasRCPC_IMMO = false; 161 162 bool HasLSLFast = false; 163 bool HasRCPC = false; 164 bool HasAggressiveFMA = false; 165 166 // Armv8.5-A Extensions 167 bool HasAlternativeNZCV = false; 168 bool HasFRInt3264 = false; 169 bool HasSpecRestrict = false; 170 bool HasSSBS = false; 171 bool HasSB = false; 172 bool HasPredRes = false; 173 bool HasCCDP = false; 174 bool HasBTI = false; 175 bool HasRandGen = false; 176 bool HasMTE = false; 177 bool HasTME = false; 178 179 // Armv8.6-A Extensions 180 bool HasBF16 = false; 181 bool HasMatMulInt8 = false; 182 bool HasMatMulFP32 = false; 183 bool HasMatMulFP64 = false; 184 bool HasAMVS = false; 185 bool HasFineGrainedTraps = false; 186 bool HasEnhancedCounterVirtualization = false; 187 188 // Armv8.7-A Extensions 189 bool HasXS = false; 190 bool HasWFxT = false; 191 bool HasHCX = false; 192 bool HasLS64 = false; 193 194 // Armv8.8-A Extensions 195 bool HasHBC = false; 196 bool HasMOPS = false; 197 198 // Arm SVE2 extensions 199 bool HasSVE2 = false; 200 bool HasSVE2AES = false; 201 bool HasSVE2SM4 = false; 202 bool HasSVE2SHA3 = false; 203 bool HasSVE2BitPerm = false; 204 205 // Armv9-A Extensions 206 bool HasRME = false; 207 208 // Arm Scalable Matrix Extension (SME) 209 bool HasSME = false; 210 bool HasSMEF64 = false; 211 bool HasSMEI64 = false; 212 bool HasStreamingSVE = false; 213 214 // AppleA7 system register. 215 bool HasAppleA7SysReg = false; 216 217 // Future architecture extensions. 218 bool HasETE = false; 219 bool HasTRBE = false; 220 bool HasBRBE = false; 221 bool HasSPE_EEF = false; 222 223 // HasZeroCycleRegMove - Has zero-cycle register mov instructions. 224 bool HasZeroCycleRegMove = false; 225 226 // HasZeroCycleZeroing - Has zero-cycle zeroing instructions. 227 bool HasZeroCycleZeroing = false; 228 bool HasZeroCycleZeroingGP = false; 229 bool HasZeroCycleZeroingFPWorkaround = false; 230 231 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0". 232 // as movi is more efficient across all cores. Newer cores can eliminate 233 // fmovs early and there is no difference with movi, but this not true for 234 // all implementations. 235 bool HasZeroCycleZeroingFP = true; 236 237 // StrictAlign - Disallow unaligned memory accesses. 238 bool StrictAlign = false; 239 240 // NegativeImmediates - transform instructions with negative immediates 241 bool NegativeImmediates = true; 242 243 // Enable 64-bit vectorization in SLP. 244 unsigned MinVectorRegisterBitWidth = 64; 245 246 // Do not place a BTI instruction after a call to a return twice function like 247 // setjmp. 248 bool NoBTIAtReturnTwice = false; 249 250 bool OutlineAtomics = false; 251 bool PredictableSelectIsExpensive = false; 252 bool BalanceFPOps = false; 253 bool CustomAsCheapAsMove = false; 254 bool ExynosAsCheapAsMove = false; 255 bool UsePostRAScheduler = false; 256 bool Misaligned128StoreIsSlow = false; 257 bool Paired128IsSlow = false; 258 bool STRQroIsSlow = false; 259 bool UseAlternateSExtLoadCVTF32Pattern = false; 260 bool HasArithmeticBccFusion = false; 261 bool HasArithmeticCbzFusion = false; 262 bool HasCmpBccFusion = false; 263 bool HasFuseAddress = false; 264 bool HasFuseAES = false; 265 bool HasFuseArithmeticLogic = false; 266 bool HasFuseCCSelect = false; 267 bool HasFuseCryptoEOR = false; 268 bool HasFuseLiterals = false; 269 bool DisableLatencySchedHeuristic = false; 270 bool UseRSqrt = false; 271 bool Force32BitJumpTables = false; 272 bool UseEL1ForTP = false; 273 bool UseEL2ForTP = false; 274 bool UseEL3ForTP = false; 275 bool AllowTaggedGlobals = false; 276 bool HardenSlsRetBr = false; 277 bool HardenSlsBlr = false; 278 bool HardenSlsNoComdat = false; 279 uint8_t MaxInterleaveFactor = 2; 280 uint8_t VectorInsertExtractBaseCost = 3; 281 uint16_t CacheLineSize = 0; 282 uint16_t PrefetchDistance = 0; 283 uint16_t MinPrefetchStride = 1; 284 unsigned MaxPrefetchIterationsAhead = UINT_MAX; 285 unsigned PrefFunctionLogAlignment = 0; 286 unsigned PrefLoopLogAlignment = 0; 287 unsigned MaxBytesForLoopAlignment = 0; 288 unsigned MaxJumpTableSize = 0; 289 unsigned WideningBaseCost = 0; 290 291 // ReserveXRegister[i] - X#i is not available as a general purpose register. 292 BitVector ReserveXRegister; 293 294 // CustomCallUsedXRegister[i] - X#i call saved. 295 BitVector CustomCallSavedXRegs; 296 297 bool IsLittle; 298 299 unsigned MinSVEVectorSizeInBits; 300 unsigned MaxSVEVectorSizeInBits; 301 unsigned VScaleForTuning = 2; 302 303 /// TargetTriple - What processor and OS we're targeting. 304 Triple TargetTriple; 305 306 AArch64FrameLowering FrameLowering; 307 AArch64InstrInfo InstrInfo; 308 AArch64SelectionDAGInfo TSInfo; 309 AArch64TargetLowering TLInfo; 310 311 /// GlobalISel related APIs. 312 std::unique_ptr<CallLowering> CallLoweringInfo; 313 std::unique_ptr<InlineAsmLowering> InlineAsmLoweringInfo; 314 std::unique_ptr<InstructionSelector> InstSelector; 315 std::unique_ptr<LegalizerInfo> Legalizer; 316 std::unique_ptr<RegisterBankInfo> RegBankInfo; 317 318 private: 319 /// initializeSubtargetDependencies - Initializes using CPUString and the 320 /// passed in feature string so that we can use initializer lists for 321 /// subtarget initialization. 322 AArch64Subtarget &initializeSubtargetDependencies(StringRef FS, 323 StringRef CPUString, 324 StringRef TuneCPUString); 325 326 /// Initialize properties based on the selected processor family. 327 void initializeProperties(); 328 329 public: 330 /// This constructor initializes the data members to match that 331 /// of the specified triple. 332 AArch64Subtarget(const Triple &TT, const std::string &CPU, 333 const std::string &TuneCPU, const std::string &FS, 334 const TargetMachine &TM, bool LittleEndian, 335 unsigned MinSVEVectorSizeInBitsOverride = 0, 336 unsigned MaxSVEVectorSizeInBitsOverride = 0); 337 338 const AArch64SelectionDAGInfo *getSelectionDAGInfo() const override { 339 return &TSInfo; 340 } 341 const AArch64FrameLowering *getFrameLowering() const override { 342 return &FrameLowering; 343 } 344 const AArch64TargetLowering *getTargetLowering() const override { 345 return &TLInfo; 346 } 347 const AArch64InstrInfo *getInstrInfo() const override { return &InstrInfo; } 348 const AArch64RegisterInfo *getRegisterInfo() const override { 349 return &getInstrInfo()->getRegisterInfo(); 350 } 351 const CallLowering *getCallLowering() const override; 352 const InlineAsmLowering *getInlineAsmLowering() const override; 353 InstructionSelector *getInstructionSelector() const override; 354 const LegalizerInfo *getLegalizerInfo() const override; 355 const RegisterBankInfo *getRegBankInfo() const override; 356 const Triple &getTargetTriple() const { return TargetTriple; } 357 bool enableMachineScheduler() const override { return true; } 358 bool enablePostRAScheduler() const override { 359 return UsePostRAScheduler; 360 } 361 362 /// Returns ARM processor family. 363 /// Avoid this function! CPU specifics should be kept local to this class 364 /// and preferably modeled with SubtargetFeatures or properties in 365 /// initializeProperties(). 366 ARMProcFamilyEnum getProcFamily() const { 367 return ARMProcFamily; 368 } 369 370 bool hasV8_0aOps() const { return HasV8_0aOps; } 371 bool hasV8_1aOps() const { return HasV8_1aOps; } 372 bool hasV8_2aOps() const { return HasV8_2aOps; } 373 bool hasV8_3aOps() const { return HasV8_3aOps; } 374 bool hasV8_4aOps() const { return HasV8_4aOps; } 375 bool hasV8_5aOps() const { return HasV8_5aOps; } 376 bool hasV9_0aOps() const { return HasV9_0aOps; } 377 bool hasV9_1aOps() const { return HasV9_1aOps; } 378 bool hasV9_2aOps() const { return HasV9_2aOps; } 379 bool hasV9_3aOps() const { return HasV9_3aOps; } 380 bool hasV8_0rOps() const { return HasV8_0rOps; } 381 382 bool hasZeroCycleRegMove() const { return HasZeroCycleRegMove; } 383 384 bool hasZeroCycleZeroingGP() const { return HasZeroCycleZeroingGP; } 385 386 bool hasZeroCycleZeroingFP() const { return HasZeroCycleZeroingFP; } 387 388 bool hasZeroCycleZeroingFPWorkaround() const { 389 return HasZeroCycleZeroingFPWorkaround; 390 } 391 392 bool requiresStrictAlign() const { return StrictAlign; } 393 394 bool isXRaySupported() const override { return true; } 395 396 unsigned getMinVectorRegisterBitWidth() const { 397 return MinVectorRegisterBitWidth; 398 } 399 400 bool isXRegisterReserved(size_t i) const { return ReserveXRegister[i]; } 401 unsigned getNumXRegisterReserved() const { return ReserveXRegister.count(); } 402 bool isXRegCustomCalleeSaved(size_t i) const { 403 return CustomCallSavedXRegs[i]; 404 } 405 bool hasCustomCallingConv() const { return CustomCallSavedXRegs.any(); } 406 bool hasFPARMv8() const { return HasFPARMv8; } 407 bool hasNEON() const { return HasNEON; } 408 bool hasCrypto() const { return HasCrypto; } 409 bool hasDotProd() const { return HasDotProd; } 410 bool hasCRC() const { return HasCRC; } 411 bool hasLSE() const { return HasLSE; } 412 bool hasLSE2() const { return HasLSE2; } 413 bool hasRAS() const { return HasRAS; } 414 bool hasRDM() const { return HasRDM; } 415 bool hasSM4() const { return HasSM4; } 416 bool hasSHA3() const { return HasSHA3; } 417 bool hasSHA2() const { return HasSHA2; } 418 bool hasAES() const { return HasAES; } 419 bool hasCONTEXTIDREL2() const { return HasCONTEXTIDREL2; } 420 bool balanceFPOps() const { return BalanceFPOps; } 421 bool predictableSelectIsExpensive() const { 422 return PredictableSelectIsExpensive; 423 } 424 bool hasCustomCheapAsMoveHandling() const { return CustomAsCheapAsMove; } 425 bool hasExynosCheapAsMoveHandling() const { return ExynosAsCheapAsMove; } 426 bool isMisaligned128StoreSlow() const { return Misaligned128StoreIsSlow; } 427 bool isPaired128Slow() const { return Paired128IsSlow; } 428 bool isSTRQroSlow() const { return STRQroIsSlow; } 429 bool useAlternateSExtLoadCVTF32Pattern() const { 430 return UseAlternateSExtLoadCVTF32Pattern; 431 } 432 bool hasArithmeticBccFusion() const { return HasArithmeticBccFusion; } 433 bool hasArithmeticCbzFusion() const { return HasArithmeticCbzFusion; } 434 bool hasCmpBccFusion() const { return HasCmpBccFusion; } 435 bool hasFuseAddress() const { return HasFuseAddress; } 436 bool hasFuseAES() const { return HasFuseAES; } 437 bool hasFuseArithmeticLogic() const { return HasFuseArithmeticLogic; } 438 bool hasFuseCCSelect() const { return HasFuseCCSelect; } 439 bool hasFuseCryptoEOR() const { return HasFuseCryptoEOR; } 440 bool hasFuseLiterals() const { return HasFuseLiterals; } 441 442 /// Return true if the CPU supports any kind of instruction fusion. 443 bool hasFusion() const { 444 return hasArithmeticBccFusion() || hasArithmeticCbzFusion() || 445 hasFuseAES() || hasFuseArithmeticLogic() || 446 hasFuseCCSelect() || hasFuseLiterals(); 447 } 448 449 bool hardenSlsRetBr() const { return HardenSlsRetBr; } 450 bool hardenSlsBlr() const { return HardenSlsBlr; } 451 bool hardenSlsNoComdat() const { return HardenSlsNoComdat; } 452 453 bool useEL1ForTP() const { return UseEL1ForTP; } 454 bool useEL2ForTP() const { return UseEL2ForTP; } 455 bool useEL3ForTP() const { return UseEL3ForTP; } 456 457 bool useRSqrt() const { return UseRSqrt; } 458 bool force32BitJumpTables() const { return Force32BitJumpTables; } 459 unsigned getMaxInterleaveFactor() const { return MaxInterleaveFactor; } 460 unsigned getVectorInsertExtractBaseCost() const { 461 return VectorInsertExtractBaseCost; 462 } 463 unsigned getCacheLineSize() const override { return CacheLineSize; } 464 unsigned getPrefetchDistance() const override { return PrefetchDistance; } 465 unsigned getMinPrefetchStride(unsigned NumMemAccesses, 466 unsigned NumStridedMemAccesses, 467 unsigned NumPrefetches, 468 bool HasCall) const override { 469 return MinPrefetchStride; 470 } 471 unsigned getMaxPrefetchIterationsAhead() const override { 472 return MaxPrefetchIterationsAhead; 473 } 474 unsigned getPrefFunctionLogAlignment() const { 475 return PrefFunctionLogAlignment; 476 } 477 unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; } 478 479 unsigned getMaxBytesForLoopAlignment() const { 480 return MaxBytesForLoopAlignment; 481 } 482 483 unsigned getMaximumJumpTableSize() const { return MaxJumpTableSize; } 484 485 unsigned getWideningBaseCost() const { return WideningBaseCost; } 486 487 bool useExperimentalZeroingPseudos() const { 488 return UseExperimentalZeroingPseudos; 489 } 490 491 bool useScalarIncVL() const { return UseScalarIncVL; } 492 493 /// CPU has TBI (top byte of addresses is ignored during HW address 494 /// translation) and OS enables it. 495 bool supportsAddressTopByteIgnored() const; 496 497 bool hasPerfMon() const { return HasPerfMon; } 498 bool hasFullFP16() const { return HasFullFP16; } 499 bool hasFP16FML() const { return HasFP16FML; } 500 bool hasSPE() const { return HasSPE; } 501 bool hasLSLFast() const { return HasLSLFast; } 502 bool hasSVE() const { return HasSVE; } 503 bool hasSVE2() const { return HasSVE2; } 504 bool hasRCPC() const { return HasRCPC; } 505 bool hasAggressiveFMA() const { return HasAggressiveFMA; } 506 bool hasAlternativeNZCV() const { return HasAlternativeNZCV; } 507 bool hasFRInt3264() const { return HasFRInt3264; } 508 bool hasSpecRestrict() const { return HasSpecRestrict; } 509 bool hasSSBS() const { return HasSSBS; } 510 bool hasSB() const { return HasSB; } 511 bool hasPredRes() const { return HasPredRes; } 512 bool hasCCDP() const { return HasCCDP; } 513 bool hasBTI() const { return HasBTI; } 514 bool hasRandGen() const { return HasRandGen; } 515 bool hasMTE() const { return HasMTE; } 516 bool hasTME() const { return HasTME; } 517 // Arm SVE2 extensions 518 bool hasSVE2AES() const { return HasSVE2AES; } 519 bool hasSVE2SM4() const { return HasSVE2SM4; } 520 bool hasSVE2SHA3() const { return HasSVE2SHA3; } 521 bool hasSVE2BitPerm() const { return HasSVE2BitPerm; } 522 bool hasMatMulInt8() const { return HasMatMulInt8; } 523 bool hasMatMulFP32() const { return HasMatMulFP32; } 524 bool hasMatMulFP64() const { return HasMatMulFP64; } 525 526 // Armv8.6-A Extensions 527 bool hasBF16() const { return HasBF16; } 528 bool hasFineGrainedTraps() const { return HasFineGrainedTraps; } 529 bool hasEnhancedCounterVirtualization() const { 530 return HasEnhancedCounterVirtualization; 531 } 532 533 // Arm Scalable Matrix Extension (SME) 534 bool hasSME() const { return HasSME; } 535 bool hasSMEF64() const { return HasSMEF64; } 536 bool hasSMEI64() const { return HasSMEI64; } 537 bool hasStreamingSVE() const { return HasStreamingSVE; } 538 539 bool isLittleEndian() const { return IsLittle; } 540 541 bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); } 542 bool isTargetIOS() const { return TargetTriple.isiOS(); } 543 bool isTargetLinux() const { return TargetTriple.isOSLinux(); } 544 bool isTargetWindows() const { return TargetTriple.isOSWindows(); } 545 bool isTargetAndroid() const { return TargetTriple.isAndroid(); } 546 bool isTargetFuchsia() const { return TargetTriple.isOSFuchsia(); } 547 548 bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); } 549 bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); } 550 bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); } 551 552 bool isTargetILP32() const { 553 return TargetTriple.isArch32Bit() || 554 TargetTriple.getEnvironment() == Triple::GNUILP32; 555 } 556 557 bool useAA() const override; 558 559 bool outlineAtomics() const { return OutlineAtomics; } 560 561 bool hasVH() const { return HasVH; } 562 bool hasPAN() const { return HasPAN; } 563 bool hasLOR() const { return HasLOR; } 564 565 bool hasPsUAO() const { return HasPsUAO; } 566 bool hasPAN_RWV() const { return HasPAN_RWV; } 567 bool hasCCPP() const { return HasCCPP; } 568 569 bool hasPAuth() const { return HasPAuth; } 570 bool hasJS() const { return HasJS; } 571 bool hasCCIDX() const { return HasCCIDX; } 572 bool hasComplxNum() const { return HasComplxNum; } 573 574 bool hasNV() const { return HasNV; } 575 bool hasMPAM() const { return HasMPAM; } 576 bool hasDIT() const { return HasDIT; } 577 bool hasTRACEV8_4() const { return HasTRACEV8_4; } 578 bool hasAM() const { return HasAM; } 579 bool hasAMVS() const { return HasAMVS; } 580 bool hasXS() const { return HasXS; } 581 bool hasWFxT() const { return HasWFxT; } 582 bool hasHCX() const { return HasHCX; } 583 bool hasLS64() const { return HasLS64; } 584 bool hasSEL2() const { return HasSEL2; } 585 bool hasTLB_RMI() const { return HasTLB_RMI; } 586 bool hasFlagM() const { return HasFlagM; } 587 bool hasRCPC_IMMO() const { return HasRCPC_IMMO; } 588 bool hasEL2VMSA() const { return HasEL2VMSA; } 589 bool hasEL3() const { return HasEL3; } 590 bool hasHBC() const { return HasHBC; } 591 bool hasMOPS() const { return HasMOPS; } 592 593 bool fixCortexA53_835769() const { return FixCortexA53_835769; } 594 595 bool noBTIAtReturnTwice() const { return NoBTIAtReturnTwice; } 596 597 bool addrSinkUsingGEPs() const override { 598 // Keeping GEPs inbounds is important for exploiting AArch64 599 // addressing-modes in ILP32 mode. 600 return useAA() || isTargetILP32(); 601 } 602 603 bool useSmallAddressing() const { 604 switch (TLInfo.getTargetMachine().getCodeModel()) { 605 case CodeModel::Kernel: 606 // Kernel is currently allowed only for Fuchsia targets, 607 // where it is the same as Small for almost all purposes. 608 case CodeModel::Small: 609 return true; 610 default: 611 return false; 612 } 613 } 614 615 /// ParseSubtargetFeatures - Parses features string setting specified 616 /// subtarget options. Definition of function is auto generated by tblgen. 617 void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); 618 619 /// ClassifyGlobalReference - Find the target operand flags that describe 620 /// how a global value should be referenced for the current subtarget. 621 unsigned ClassifyGlobalReference(const GlobalValue *GV, 622 const TargetMachine &TM) const; 623 624 unsigned classifyGlobalFunctionReference(const GlobalValue *GV, 625 const TargetMachine &TM) const; 626 627 void overrideSchedPolicy(MachineSchedPolicy &Policy, 628 unsigned NumRegionInstrs) const override; 629 630 bool enableEarlyIfConversion() const override; 631 632 bool enableAdvancedRASplitCost() const override { return false; } 633 634 std::unique_ptr<PBQPRAConstraint> getCustomPBQPConstraints() const override; 635 636 bool isCallingConvWin64(CallingConv::ID CC) const { 637 switch (CC) { 638 case CallingConv::C: 639 case CallingConv::Fast: 640 case CallingConv::Swift: 641 return isTargetWindows(); 642 case CallingConv::Win64: 643 return true; 644 default: 645 return false; 646 } 647 } 648 649 /// Return whether FrameLowering should always set the "extended frame 650 /// present" bit in FP, or set it based on a symbol in the runtime. 651 bool swiftAsyncContextIsDynamicallySet() const { 652 // Older OS versions (particularly system unwinders) are confused by the 653 // Swift extended frame, so when building code that might be run on them we 654 // must dynamically query the concurrency library to determine whether 655 // extended frames should be flagged as present. 656 const Triple &TT = getTargetTriple(); 657 658 unsigned Major = TT.getOSVersion().getMajor(); 659 switch(TT.getOS()) { 660 default: 661 return false; 662 case Triple::IOS: 663 case Triple::TvOS: 664 return Major < 15; 665 case Triple::WatchOS: 666 return Major < 8; 667 case Triple::MacOSX: 668 case Triple::Darwin: 669 return Major < 12; 670 } 671 } 672 673 void mirFileLoaded(MachineFunction &MF) const override; 674 675 // Return the known range for the bit length of SVE data registers. A value 676 // of 0 means nothing is known about that particular limit beyong what's 677 // implied by the architecture. 678 unsigned getMaxSVEVectorSizeInBits() const { 679 assert(HasSVE && "Tried to get SVE vector length without SVE support!"); 680 return MaxSVEVectorSizeInBits; 681 } 682 683 unsigned getMinSVEVectorSizeInBits() const { 684 assert(HasSVE && "Tried to get SVE vector length without SVE support!"); 685 return MinSVEVectorSizeInBits; 686 } 687 688 bool useSVEForFixedLengthVectors() const { 689 // Prefer NEON unless larger SVE registers are available. 690 return hasSVE() && getMinSVEVectorSizeInBits() >= 256; 691 } 692 693 unsigned getVScaleForTuning() const { return VScaleForTuning; } 694 }; 695 } // End llvm namespace 696 697 #endif 698