1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // 10 //===----------------------------------------------------------------------===// 11 12 #include "AArch64TargetMachine.h" 13 #include "AArch64.h" 14 #include "AArch64MachineFunctionInfo.h" 15 #include "AArch64MachineScheduler.h" 16 #include "AArch64MacroFusion.h" 17 #include "AArch64Subtarget.h" 18 #include "AArch64TargetObjectFile.h" 19 #include "AArch64TargetTransformInfo.h" 20 #include "MCTargetDesc/AArch64MCTargetDesc.h" 21 #include "TargetInfo/AArch64TargetInfo.h" 22 #include "llvm/Analysis/TargetTransformInfo.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/CodeGen/CSEConfigBase.h" 25 #include "llvm/CodeGen/GlobalISel/CSEInfo.h" 26 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 27 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 28 #include "llvm/CodeGen/GlobalISel/Legalizer.h" 29 #include "llvm/CodeGen/GlobalISel/LoadStoreOpt.h" 30 #include "llvm/CodeGen/GlobalISel/Localizer.h" 31 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 32 #include "llvm/CodeGen/MIRParser/MIParser.h" 33 #include "llvm/CodeGen/MachineScheduler.h" 34 #include "llvm/CodeGen/Passes.h" 35 #include "llvm/CodeGen/TargetInstrInfo.h" 36 #include "llvm/CodeGen/TargetPassConfig.h" 37 #include "llvm/IR/Attributes.h" 38 #include "llvm/IR/Function.h" 39 #include "llvm/InitializePasses.h" 40 #include "llvm/MC/MCAsmInfo.h" 41 #include "llvm/MC/MCTargetOptions.h" 42 #include "llvm/MC/TargetRegistry.h" 43 #include "llvm/Pass.h" 44 #include "llvm/Passes/PassBuilder.h" 45 #include "llvm/Support/CodeGen.h" 46 #include "llvm/Support/CommandLine.h" 47 #include "llvm/Support/Compiler.h" 48 #include "llvm/Target/TargetLoweringObjectFile.h" 49 #include "llvm/Target/TargetOptions.h" 50 #include "llvm/TargetParser/Triple.h" 51 #include "llvm/Transforms/CFGuard.h" 52 #include "llvm/Transforms/Scalar.h" 53 #include "llvm/Transforms/Utils/LowerIFunc.h" 54 #include "llvm/Transforms/Vectorize/LoopIdiomVectorize.h" 55 #include <memory> 56 #include <optional> 57 #include <string> 58 59 using namespace llvm; 60 61 static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp", 62 cl::desc("Enable the CCMP formation pass"), 63 cl::init(true), cl::Hidden); 64 65 static cl::opt<bool> 66 EnableCondBrTuning("aarch64-enable-cond-br-tune", 67 cl::desc("Enable the conditional branch tuning pass"), 68 cl::init(true), cl::Hidden); 69 70 static cl::opt<bool> EnableAArch64CopyPropagation( 71 "aarch64-enable-copy-propagation", 72 cl::desc("Enable the copy propagation with AArch64 copy instr"), 73 cl::init(true), cl::Hidden); 74 75 static cl::opt<bool> EnableMCR("aarch64-enable-mcr", 76 cl::desc("Enable the machine combiner pass"), 77 cl::init(true), cl::Hidden); 78 79 static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress", 80 cl::desc("Suppress STP for AArch64"), 81 cl::init(true), cl::Hidden); 82 83 static cl::opt<bool> EnableAdvSIMDScalar( 84 "aarch64-enable-simd-scalar", 85 cl::desc("Enable use of AdvSIMD scalar integer instructions"), 86 cl::init(false), cl::Hidden); 87 88 static cl::opt<bool> 89 EnablePromoteConstant("aarch64-enable-promote-const", 90 cl::desc("Enable the promote constant pass"), 91 cl::init(true), cl::Hidden); 92 93 static cl::opt<bool> EnableCollectLOH( 94 "aarch64-enable-collect-loh", 95 cl::desc("Enable the pass that emits the linker optimization hints (LOH)"), 96 cl::init(true), cl::Hidden); 97 98 static cl::opt<bool> 99 EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden, 100 cl::desc("Enable the pass that removes dead" 101 " definitions and replaces stores to" 102 " them with stores to the zero" 103 " register"), 104 cl::init(true)); 105 106 static cl::opt<bool> EnableRedundantCopyElimination( 107 "aarch64-enable-copyelim", 108 cl::desc("Enable the redundant copy elimination pass"), cl::init(true), 109 cl::Hidden); 110 111 static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt", 112 cl::desc("Enable the load/store pair" 113 " optimization pass"), 114 cl::init(true), cl::Hidden); 115 116 static cl::opt<bool> EnableAtomicTidy( 117 "aarch64-enable-atomic-cfg-tidy", cl::Hidden, 118 cl::desc("Run SimplifyCFG after expanding atomic operations" 119 " to make use of cmpxchg flow-based information"), 120 cl::init(true)); 121 122 static cl::opt<bool> 123 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, 124 cl::desc("Run early if-conversion"), 125 cl::init(true)); 126 127 static cl::opt<bool> 128 EnableCondOpt("aarch64-enable-condopt", 129 cl::desc("Enable the condition optimizer pass"), 130 cl::init(true), cl::Hidden); 131 132 static cl::opt<bool> 133 EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden, 134 cl::desc("Enable optimizations on complex GEPs"), 135 cl::init(false)); 136 137 static cl::opt<bool> 138 EnableSelectOpt("aarch64-select-opt", cl::Hidden, 139 cl::desc("Enable select to branch optimizations"), 140 cl::init(true)); 141 142 static cl::opt<bool> 143 BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true), 144 cl::desc("Relax out of range conditional branches")); 145 146 static cl::opt<bool> EnableCompressJumpTables( 147 "aarch64-enable-compress-jump-tables", cl::Hidden, cl::init(true), 148 cl::desc("Use smallest entry possible for jump tables")); 149 150 // FIXME: Unify control over GlobalMerge. 151 static cl::opt<cl::boolOrDefault> 152 EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden, 153 cl::desc("Enable the global merge pass")); 154 155 static cl::opt<bool> 156 EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden, 157 cl::desc("Enable the loop data prefetch pass"), 158 cl::init(true)); 159 160 static cl::opt<int> EnableGlobalISelAtO( 161 "aarch64-enable-global-isel-at-O", cl::Hidden, 162 cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"), 163 cl::init(0)); 164 165 static cl::opt<bool> 166 EnableSVEIntrinsicOpts("aarch64-enable-sve-intrinsic-opts", cl::Hidden, 167 cl::desc("Enable SVE intrinsic opts"), 168 cl::init(true)); 169 170 static cl::opt<bool> 171 EnableSMEPeepholeOpt("enable-aarch64-sme-peephole-opt", cl::init(true), 172 cl::Hidden, 173 cl::desc("Perform SME peephole optimization")); 174 175 static cl::opt<bool> EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix", 176 cl::init(true), cl::Hidden); 177 178 static cl::opt<bool> 179 EnableBranchTargets("aarch64-enable-branch-targets", cl::Hidden, 180 cl::desc("Enable the AArch64 branch target pass"), 181 cl::init(true)); 182 183 static cl::opt<unsigned> SVEVectorBitsMaxOpt( 184 "aarch64-sve-vector-bits-max", 185 cl::desc("Assume SVE vector registers are at most this big, " 186 "with zero meaning no maximum size is assumed."), 187 cl::init(0), cl::Hidden); 188 189 static cl::opt<unsigned> SVEVectorBitsMinOpt( 190 "aarch64-sve-vector-bits-min", 191 cl::desc("Assume SVE vector registers are at least this big, " 192 "with zero meaning no minimum size is assumed."), 193 cl::init(0), cl::Hidden); 194 195 static cl::opt<bool> ForceStreaming( 196 "force-streaming", 197 cl::desc("Force the use of streaming code for all functions"), 198 cl::init(false), cl::Hidden); 199 200 static cl::opt<bool> ForceStreamingCompatible( 201 "force-streaming-compatible", 202 cl::desc("Force the use of streaming-compatible code for all functions"), 203 cl::init(false), cl::Hidden); 204 205 extern cl::opt<bool> EnableHomogeneousPrologEpilog; 206 207 static cl::opt<bool> EnableGISelLoadStoreOptPreLegal( 208 "aarch64-enable-gisel-ldst-prelegal", 209 cl::desc("Enable GlobalISel's pre-legalizer load/store optimization pass"), 210 cl::init(true), cl::Hidden); 211 212 static cl::opt<bool> EnableGISelLoadStoreOptPostLegal( 213 "aarch64-enable-gisel-ldst-postlegal", 214 cl::desc("Enable GlobalISel's post-legalizer load/store optimization pass"), 215 cl::init(false), cl::Hidden); 216 217 static cl::opt<bool> 218 EnableSinkFold("aarch64-enable-sink-fold", 219 cl::desc("Enable sinking and folding of instruction copies"), 220 cl::init(true), cl::Hidden); 221 222 static cl::opt<bool> 223 EnableMachinePipeliner("aarch64-enable-pipeliner", 224 cl::desc("Enable Machine Pipeliner for AArch64"), 225 cl::init(false), cl::Hidden); 226 227 extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void 228 LLVMInitializeAArch64Target() { 229 // Register the target. 230 RegisterTargetMachine<AArch64leTargetMachine> X(getTheAArch64leTarget()); 231 RegisterTargetMachine<AArch64beTargetMachine> Y(getTheAArch64beTarget()); 232 RegisterTargetMachine<AArch64leTargetMachine> Z(getTheARM64Target()); 233 RegisterTargetMachine<AArch64leTargetMachine> W(getTheARM64_32Target()); 234 RegisterTargetMachine<AArch64leTargetMachine> V(getTheAArch64_32Target()); 235 auto &PR = *PassRegistry::getPassRegistry(); 236 initializeGlobalISel(PR); 237 initializeAArch64A53Fix835769Pass(PR); 238 initializeAArch64A57FPLoadBalancingPass(PR); 239 initializeAArch64AdvSIMDScalarPass(PR); 240 initializeAArch64AsmPrinterPass(PR); 241 initializeAArch64BranchTargetsPass(PR); 242 initializeAArch64CollectLOHPass(PR); 243 initializeAArch64CompressJumpTablesPass(PR); 244 initializeAArch64ConditionalComparesPass(PR); 245 initializeAArch64ConditionOptimizerPass(PR); 246 initializeAArch64DeadRegisterDefinitionsPass(PR); 247 initializeAArch64ExpandPseudoPass(PR); 248 initializeAArch64LoadStoreOptPass(PR); 249 initializeAArch64MIPeepholeOptPass(PR); 250 initializeAArch64SIMDInstrOptPass(PR); 251 initializeAArch64O0PreLegalizerCombinerPass(PR); 252 initializeAArch64PreLegalizerCombinerPass(PR); 253 initializeAArch64PointerAuthPass(PR); 254 initializeAArch64PostCoalescerPass(PR); 255 initializeAArch64PostLegalizerCombinerPass(PR); 256 initializeAArch64PostLegalizerLoweringPass(PR); 257 initializeAArch64PostSelectOptimizePass(PR); 258 initializeAArch64PromoteConstantPass(PR); 259 initializeAArch64RedundantCopyEliminationPass(PR); 260 initializeAArch64StorePairSuppressPass(PR); 261 initializeFalkorHWPFFixPass(PR); 262 initializeFalkorMarkStridedAccessesLegacyPass(PR); 263 initializeLDTLSCleanupPass(PR); 264 initializeKCFIPass(PR); 265 initializeSMEABIPass(PR); 266 initializeSMEPeepholeOptPass(PR); 267 initializeSVEIntrinsicOptsPass(PR); 268 initializeAArch64SpeculationHardeningPass(PR); 269 initializeAArch64SLSHardeningPass(PR); 270 initializeAArch64StackTaggingPass(PR); 271 initializeAArch64StackTaggingPreRAPass(PR); 272 initializeAArch64LowerHomogeneousPrologEpilogPass(PR); 273 initializeAArch64DAGToDAGISelLegacyPass(PR); 274 initializeAArch64CondBrTuningPass(PR); 275 initializeAArch64Arm64ECCallLoweringPass(PR); 276 } 277 278 void AArch64TargetMachine::reset() { SubtargetMap.clear(); } 279 280 //===----------------------------------------------------------------------===// 281 // AArch64 Lowering public interface. 282 //===----------------------------------------------------------------------===// 283 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 284 if (TT.isOSBinFormatMachO()) 285 return std::make_unique<AArch64_MachoTargetObjectFile>(); 286 if (TT.isOSBinFormatCOFF()) 287 return std::make_unique<AArch64_COFFTargetObjectFile>(); 288 289 return std::make_unique<AArch64_ELFTargetObjectFile>(); 290 } 291 292 // Helper function to build a DataLayout string 293 static std::string computeDataLayout(const Triple &TT, 294 const MCTargetOptions &Options, 295 bool LittleEndian) { 296 if (TT.isOSBinFormatMachO()) { 297 if (TT.getArch() == Triple::aarch64_32) 298 return "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-" 299 "n32:64-S128-Fn32"; 300 return "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-n32:64-S128-" 301 "Fn32"; 302 } 303 if (TT.isOSBinFormatCOFF()) 304 return "e-m:w-p270:32:32-p271:32:32-p272:64:64-p:64:64-i32:32-i64:64-i128:" 305 "128-n32:64-S128-Fn32"; 306 std::string Endian = LittleEndian ? "e" : "E"; 307 std::string Ptr32 = TT.getEnvironment() == Triple::GNUILP32 ? "-p:32:32" : ""; 308 return Endian + "-m:e" + Ptr32 + 309 "-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-" 310 "n32:64-S128-Fn32"; 311 } 312 313 static StringRef computeDefaultCPU(const Triple &TT, StringRef CPU) { 314 if (CPU.empty() && TT.isArm64e()) 315 return "apple-a12"; 316 return CPU; 317 } 318 319 static Reloc::Model getEffectiveRelocModel(const Triple &TT, 320 std::optional<Reloc::Model> RM) { 321 // AArch64 Darwin and Windows are always PIC. 322 if (TT.isOSDarwin() || TT.isOSWindows()) 323 return Reloc::PIC_; 324 // On ELF platforms the default static relocation model has a smart enough 325 // linker to cope with referencing external symbols defined in a shared 326 // library. Hence DynamicNoPIC doesn't need to be promoted to PIC. 327 if (!RM || *RM == Reloc::DynamicNoPIC) 328 return Reloc::Static; 329 return *RM; 330 } 331 332 static CodeModel::Model 333 getEffectiveAArch64CodeModel(const Triple &TT, 334 std::optional<CodeModel::Model> CM, bool JIT) { 335 if (CM) { 336 if (*CM != CodeModel::Small && *CM != CodeModel::Tiny && 337 *CM != CodeModel::Large) { 338 report_fatal_error( 339 "Only small, tiny and large code models are allowed on AArch64"); 340 } else if (*CM == CodeModel::Tiny && !TT.isOSBinFormatELF()) { 341 report_fatal_error("tiny code model is only supported on ELF"); 342 } 343 return *CM; 344 } 345 // The default MCJIT memory managers make no guarantees about where they can 346 // find an executable page; JITed code needs to be able to refer to globals 347 // no matter how far away they are. 348 // We should set the CodeModel::Small for Windows ARM64 in JIT mode, 349 // since with large code model LLVM generating 4 MOV instructions, and 350 // Windows doesn't support relocating these long branch (4 MOVs). 351 if (JIT && !TT.isOSWindows()) 352 return CodeModel::Large; 353 return CodeModel::Small; 354 } 355 356 /// Create an AArch64 architecture model. 357 /// 358 AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT, 359 StringRef CPU, StringRef FS, 360 const TargetOptions &Options, 361 std::optional<Reloc::Model> RM, 362 std::optional<CodeModel::Model> CM, 363 CodeGenOptLevel OL, bool JIT, 364 bool LittleEndian) 365 : CodeGenTargetMachineImpl( 366 T, computeDataLayout(TT, Options.MCOptions, LittleEndian), TT, 367 computeDefaultCPU(TT, CPU), FS, Options, 368 getEffectiveRelocModel(TT, RM), 369 getEffectiveAArch64CodeModel(TT, CM, JIT), OL), 370 TLOF(createTLOF(getTargetTriple())), isLittle(LittleEndian) { 371 initAsmInfo(); 372 373 if (TT.isOSBinFormatMachO()) { 374 this->Options.TrapUnreachable = true; 375 this->Options.NoTrapAfterNoreturn = true; 376 } 377 378 if (getMCAsmInfo()->usesWindowsCFI()) { 379 // Unwinding can get confused if the last instruction in an 380 // exception-handling region (function, funclet, try block, etc.) 381 // is a call. 382 // 383 // FIXME: We could elide the trap if the next instruction would be in 384 // the same region anyway. 385 this->Options.TrapUnreachable = true; 386 } 387 388 if (this->Options.TLSSize == 0) // default 389 this->Options.TLSSize = 24; 390 if ((getCodeModel() == CodeModel::Small || 391 getCodeModel() == CodeModel::Kernel) && 392 this->Options.TLSSize > 32) 393 // for the small (and kernel) code model, the maximum TLS size is 4GiB 394 this->Options.TLSSize = 32; 395 else if (getCodeModel() == CodeModel::Tiny && this->Options.TLSSize > 24) 396 // for the tiny code model, the maximum TLS size is 1MiB (< 16MiB) 397 this->Options.TLSSize = 24; 398 399 // Enable GlobalISel at or below EnableGlobalISelAt0, unless this is 400 // MachO/CodeModel::Large, which GlobalISel does not support. 401 if (static_cast<int>(getOptLevel()) <= EnableGlobalISelAtO && 402 TT.getArch() != Triple::aarch64_32 && 403 TT.getEnvironment() != Triple::GNUILP32 && 404 !(getCodeModel() == CodeModel::Large && TT.isOSBinFormatMachO())) { 405 setGlobalISel(true); 406 setGlobalISelAbort(GlobalISelAbortMode::Disable); 407 } 408 409 // AArch64 supports the MachineOutliner. 410 setMachineOutliner(true); 411 412 // AArch64 supports default outlining behaviour. 413 setSupportsDefaultOutlining(true); 414 415 // AArch64 supports the debug entry values. 416 setSupportsDebugEntryValues(true); 417 418 // AArch64 supports fixing up the DWARF unwind information. 419 if (!getMCAsmInfo()->usesWindowsCFI()) 420 setCFIFixup(true); 421 } 422 423 AArch64TargetMachine::~AArch64TargetMachine() = default; 424 425 const AArch64Subtarget * 426 AArch64TargetMachine::getSubtargetImpl(const Function &F) const { 427 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 428 Attribute TuneAttr = F.getFnAttribute("tune-cpu"); 429 Attribute FSAttr = F.getFnAttribute("target-features"); 430 431 StringRef CPU = CPUAttr.isValid() ? CPUAttr.getValueAsString() : TargetCPU; 432 StringRef TuneCPU = TuneAttr.isValid() ? TuneAttr.getValueAsString() : CPU; 433 StringRef FS = FSAttr.isValid() ? FSAttr.getValueAsString() : TargetFS; 434 bool HasMinSize = F.hasMinSize(); 435 436 bool IsStreaming = ForceStreaming || 437 F.hasFnAttribute("aarch64_pstate_sm_enabled") || 438 F.hasFnAttribute("aarch64_pstate_sm_body"); 439 bool IsStreamingCompatible = ForceStreamingCompatible || 440 F.hasFnAttribute("aarch64_pstate_sm_compatible"); 441 442 unsigned MinSVEVectorSize = 0; 443 unsigned MaxSVEVectorSize = 0; 444 if (F.hasFnAttribute(Attribute::VScaleRange)) { 445 ConstantRange CR = getVScaleRange(&F, 64); 446 MinSVEVectorSize = CR.getUnsignedMin().getZExtValue() * 128; 447 MaxSVEVectorSize = CR.getUnsignedMax().getZExtValue() * 128; 448 } else { 449 MinSVEVectorSize = SVEVectorBitsMinOpt; 450 MaxSVEVectorSize = SVEVectorBitsMaxOpt; 451 } 452 453 assert(MinSVEVectorSize % 128 == 0 && 454 "SVE requires vector length in multiples of 128!"); 455 assert(MaxSVEVectorSize % 128 == 0 && 456 "SVE requires vector length in multiples of 128!"); 457 assert((MaxSVEVectorSize >= MinSVEVectorSize || MaxSVEVectorSize == 0) && 458 "Minimum SVE vector size should not be larger than its maximum!"); 459 460 // Sanitize user input in case of no asserts 461 if (MaxSVEVectorSize != 0) { 462 MinSVEVectorSize = std::min(MinSVEVectorSize, MaxSVEVectorSize); 463 MaxSVEVectorSize = std::max(MinSVEVectorSize, MaxSVEVectorSize); 464 } 465 466 SmallString<512> Key; 467 raw_svector_ostream(Key) << "SVEMin" << MinSVEVectorSize << "SVEMax" 468 << MaxSVEVectorSize << "IsStreaming=" << IsStreaming 469 << "IsStreamingCompatible=" << IsStreamingCompatible 470 << CPU << TuneCPU << FS 471 << "HasMinSize=" << HasMinSize; 472 473 auto &I = SubtargetMap[Key]; 474 if (!I) { 475 // This needs to be done before we create a new subtarget since any 476 // creation will depend on the TM and the code generation flags on the 477 // function that reside in TargetOptions. 478 resetTargetOptions(F); 479 I = std::make_unique<AArch64Subtarget>( 480 TargetTriple, CPU, TuneCPU, FS, *this, isLittle, MinSVEVectorSize, 481 MaxSVEVectorSize, IsStreaming, IsStreamingCompatible, HasMinSize); 482 } 483 484 if (IsStreaming && !I->hasSME()) 485 reportFatalUsageError("streaming SVE functions require SME"); 486 487 return I.get(); 488 } 489 490 ScheduleDAGInstrs * 491 AArch64TargetMachine::createMachineScheduler(MachineSchedContext *C) const { 492 const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>(); 493 ScheduleDAGMILive *DAG = createSchedLive(C); 494 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 495 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 496 if (ST.hasFusion()) 497 DAG->addMutation(createAArch64MacroFusionDAGMutation()); 498 return DAG; 499 } 500 501 ScheduleDAGInstrs * 502 AArch64TargetMachine::createPostMachineScheduler(MachineSchedContext *C) const { 503 const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>(); 504 ScheduleDAGMI *DAG = createSchedPostRA<AArch64PostRASchedStrategy>(C); 505 if (ST.hasFusion()) { 506 // Run the Macro Fusion after RA again since literals are expanded from 507 // pseudos then (v. addPreSched2()). 508 DAG->addMutation(createAArch64MacroFusionDAGMutation()); 509 return DAG; 510 } 511 512 return DAG; 513 } 514 515 size_t AArch64TargetMachine::clearLinkerOptimizationHints( 516 const SmallPtrSetImpl<MachineInstr *> &MIs) const { 517 if (MIs.empty()) 518 return 0; 519 auto *MI = *MIs.begin(); 520 auto *FuncInfo = MI->getMF()->getInfo<AArch64FunctionInfo>(); 521 return FuncInfo->clearLinkerOptimizationHints(MIs); 522 } 523 524 void AArch64leTargetMachine::anchor() { } 525 526 AArch64leTargetMachine::AArch64leTargetMachine( 527 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 528 const TargetOptions &Options, std::optional<Reloc::Model> RM, 529 std::optional<CodeModel::Model> CM, CodeGenOptLevel OL, bool JIT) 530 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, true) {} 531 532 void AArch64beTargetMachine::anchor() { } 533 534 AArch64beTargetMachine::AArch64beTargetMachine( 535 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 536 const TargetOptions &Options, std::optional<Reloc::Model> RM, 537 std::optional<CodeModel::Model> CM, CodeGenOptLevel OL, bool JIT) 538 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, false) {} 539 540 namespace { 541 542 /// AArch64 Code Generator Pass Configuration Options. 543 class AArch64PassConfig : public TargetPassConfig { 544 public: 545 AArch64PassConfig(AArch64TargetMachine &TM, PassManagerBase &PM) 546 : TargetPassConfig(TM, PM) { 547 if (TM.getOptLevel() != CodeGenOptLevel::None) 548 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID); 549 setEnableSinkAndFold(EnableSinkFold); 550 } 551 552 AArch64TargetMachine &getAArch64TargetMachine() const { 553 return getTM<AArch64TargetMachine>(); 554 } 555 556 void addIRPasses() override; 557 bool addPreISel() override; 558 void addCodeGenPrepare() override; 559 bool addInstSelector() override; 560 bool addIRTranslator() override; 561 void addPreLegalizeMachineIR() override; 562 bool addLegalizeMachineIR() override; 563 void addPreRegBankSelect() override; 564 bool addRegBankSelect() override; 565 bool addGlobalInstructionSelect() override; 566 void addMachineSSAOptimization() override; 567 bool addILPOpts() override; 568 void addPreRegAlloc() override; 569 void addPostRegAlloc() override; 570 void addPreSched2() override; 571 void addPreEmitPass() override; 572 void addPostBBSections() override; 573 void addPreEmitPass2() override; 574 bool addRegAssignAndRewriteOptimized() override; 575 576 std::unique_ptr<CSEConfigBase> getCSEConfig() const override; 577 }; 578 579 } // end anonymous namespace 580 581 void AArch64TargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { 582 583 PB.registerLateLoopOptimizationsEPCallback( 584 [=](LoopPassManager &LPM, OptimizationLevel Level) { 585 LPM.addPass(LoopIdiomVectorizePass()); 586 }); 587 if (getTargetTriple().isOSWindows()) 588 PB.registerPipelineEarlySimplificationEPCallback( 589 [](ModulePassManager &PM, OptimizationLevel, ThinOrFullLTOPhase) { 590 PM.addPass(LowerIFuncPass()); 591 }); 592 } 593 594 TargetTransformInfo 595 AArch64TargetMachine::getTargetTransformInfo(const Function &F) const { 596 return TargetTransformInfo(std::make_unique<AArch64TTIImpl>(this, F)); 597 } 598 599 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) { 600 return new AArch64PassConfig(*this, PM); 601 } 602 603 std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const { 604 return getStandardCSEConfigForOpt(TM->getOptLevel()); 605 } 606 607 void AArch64PassConfig::addIRPasses() { 608 // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg 609 // ourselves. 610 addPass(createAtomicExpandLegacyPass()); 611 612 // Expand any SVE vector library calls that we can't code generate directly. 613 if (EnableSVEIntrinsicOpts && 614 TM->getOptLevel() != CodeGenOptLevel::None) 615 addPass(createSVEIntrinsicOptsPass()); 616 617 // Cmpxchg instructions are often used with a subsequent comparison to 618 // determine whether it succeeded. We can exploit existing control-flow in 619 // ldrex/strex loops to simplify this, but it needs tidying up. 620 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableAtomicTidy) 621 addPass(createCFGSimplificationPass(SimplifyCFGOptions() 622 .forwardSwitchCondToPhi(true) 623 .convertSwitchRangeToICmp(true) 624 .convertSwitchToLookupTable(true) 625 .needCanonicalLoops(false) 626 .hoistCommonInsts(true) 627 .sinkCommonInsts(true))); 628 629 // Run LoopDataPrefetch 630 // 631 // Run this before LSR to remove the multiplies involved in computing the 632 // pointer values N iterations ahead. 633 if (TM->getOptLevel() != CodeGenOptLevel::None) { 634 if (EnableLoopDataPrefetch) 635 addPass(createLoopDataPrefetchPass()); 636 if (EnableFalkorHWPFFix) 637 addPass(createFalkorMarkStridedAccessesPass()); 638 } 639 640 if (EnableGEPOpt) { 641 // Call SeparateConstOffsetFromGEP pass to extract constants within indices 642 // and lower a GEP with multiple indices to either arithmetic operations or 643 // multiple GEPs with single index. 644 addPass(createSeparateConstOffsetFromGEPPass(true)); 645 // Call EarlyCSE pass to find and remove subexpressions in the lowered 646 // result. 647 addPass(createEarlyCSEPass()); 648 // Do loop invariant code motion in case part of the lowered result is 649 // invariant. 650 addPass(createLICMPass()); 651 } 652 653 TargetPassConfig::addIRPasses(); 654 655 if (getOptLevel() == CodeGenOptLevel::Aggressive && EnableSelectOpt) 656 addPass(createSelectOptimizePass()); 657 658 addPass(createAArch64StackTaggingPass( 659 /*IsOptNone=*/TM->getOptLevel() == CodeGenOptLevel::None)); 660 661 // Match complex arithmetic patterns 662 if (TM->getOptLevel() >= CodeGenOptLevel::Default) 663 addPass(createComplexDeinterleavingPass(TM)); 664 665 // Match interleaved memory accesses to ldN/stN intrinsics. 666 if (TM->getOptLevel() != CodeGenOptLevel::None) { 667 addPass(createInterleavedLoadCombinePass()); 668 addPass(createInterleavedAccessPass()); 669 } 670 671 // Expand any functions marked with SME attributes which require special 672 // changes for the calling convention or that require the lazy-saving 673 // mechanism specified in the SME ABI. 674 addPass(createSMEABIPass()); 675 676 // Add Control Flow Guard checks. 677 if (TM->getTargetTriple().isOSWindows()) { 678 if (TM->getTargetTriple().isWindowsArm64EC()) 679 addPass(createAArch64Arm64ECCallLoweringPass()); 680 else 681 addPass(createCFGuardCheckPass()); 682 } 683 684 if (TM->Options.JMCInstrument) 685 addPass(createJMCInstrumenterPass()); 686 } 687 688 // Pass Pipeline Configuration 689 bool AArch64PassConfig::addPreISel() { 690 // Run promote constant before global merge, so that the promoted constants 691 // get a chance to be merged 692 if (TM->getOptLevel() != CodeGenOptLevel::None && EnablePromoteConstant) 693 addPass(createAArch64PromoteConstantPass()); 694 // FIXME: On AArch64, this depends on the type. 695 // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes(). 696 // and the offset has to be a multiple of the related size in bytes. 697 if ((TM->getOptLevel() != CodeGenOptLevel::None && 698 EnableGlobalMerge == cl::BOU_UNSET) || 699 EnableGlobalMerge == cl::BOU_TRUE) { 700 bool OnlyOptimizeForSize = 701 (TM->getOptLevel() < CodeGenOptLevel::Aggressive) && 702 (EnableGlobalMerge == cl::BOU_UNSET); 703 704 // Merging of extern globals is enabled by default on non-Mach-O as we 705 // expect it to be generally either beneficial or harmless. On Mach-O it 706 // is disabled as we emit the .subsections_via_symbols directive which 707 // means that merging extern globals is not safe. 708 bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO(); 709 710 // FIXME: extern global merging is only enabled when we optimise for size 711 // because there are some regressions with it also enabled for performance. 712 if (!OnlyOptimizeForSize) 713 MergeExternalByDefault = false; 714 715 addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize, 716 MergeExternalByDefault)); 717 } 718 719 return false; 720 } 721 722 void AArch64PassConfig::addCodeGenPrepare() { 723 if (getOptLevel() != CodeGenOptLevel::None) 724 addPass(createTypePromotionLegacyPass()); 725 TargetPassConfig::addCodeGenPrepare(); 726 } 727 728 bool AArch64PassConfig::addInstSelector() { 729 addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel())); 730 731 // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many 732 // references to _TLS_MODULE_BASE_ as possible. 733 if (TM->getTargetTriple().isOSBinFormatELF() && 734 getOptLevel() != CodeGenOptLevel::None) 735 addPass(createAArch64CleanupLocalDynamicTLSPass()); 736 737 return false; 738 } 739 740 bool AArch64PassConfig::addIRTranslator() { 741 addPass(new IRTranslator(getOptLevel())); 742 return false; 743 } 744 745 void AArch64PassConfig::addPreLegalizeMachineIR() { 746 if (getOptLevel() == CodeGenOptLevel::None) { 747 addPass(createAArch64O0PreLegalizerCombiner()); 748 addPass(new Localizer()); 749 } else { 750 addPass(createAArch64PreLegalizerCombiner()); 751 addPass(new Localizer()); 752 if (EnableGISelLoadStoreOptPreLegal) 753 addPass(new LoadStoreOpt()); 754 } 755 } 756 757 bool AArch64PassConfig::addLegalizeMachineIR() { 758 addPass(new Legalizer()); 759 return false; 760 } 761 762 void AArch64PassConfig::addPreRegBankSelect() { 763 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None; 764 if (!IsOptNone) { 765 addPass(createAArch64PostLegalizerCombiner(IsOptNone)); 766 if (EnableGISelLoadStoreOptPostLegal) 767 addPass(new LoadStoreOpt()); 768 } 769 addPass(createAArch64PostLegalizerLowering()); 770 } 771 772 bool AArch64PassConfig::addRegBankSelect() { 773 addPass(new RegBankSelect()); 774 return false; 775 } 776 777 bool AArch64PassConfig::addGlobalInstructionSelect() { 778 addPass(new InstructionSelect(getOptLevel())); 779 if (getOptLevel() != CodeGenOptLevel::None) 780 addPass(createAArch64PostSelectOptimize()); 781 return false; 782 } 783 784 void AArch64PassConfig::addMachineSSAOptimization() { 785 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableSMEPeepholeOpt) 786 addPass(createSMEPeepholeOptPass()); 787 788 // Run default MachineSSAOptimization first. 789 TargetPassConfig::addMachineSSAOptimization(); 790 791 if (TM->getOptLevel() != CodeGenOptLevel::None) 792 addPass(createAArch64MIPeepholeOptPass()); 793 } 794 795 bool AArch64PassConfig::addILPOpts() { 796 if (EnableCondOpt) 797 addPass(createAArch64ConditionOptimizerPass()); 798 if (EnableCCMP) 799 addPass(createAArch64ConditionalCompares()); 800 if (EnableMCR) 801 addPass(&MachineCombinerID); 802 if (EnableCondBrTuning) 803 addPass(createAArch64CondBrTuning()); 804 if (EnableEarlyIfConversion) 805 addPass(&EarlyIfConverterLegacyID); 806 if (EnableStPairSuppress) 807 addPass(createAArch64StorePairSuppressPass()); 808 addPass(createAArch64SIMDInstrOptPass()); 809 if (TM->getOptLevel() != CodeGenOptLevel::None) 810 addPass(createAArch64StackTaggingPreRAPass()); 811 return true; 812 } 813 814 void AArch64PassConfig::addPreRegAlloc() { 815 // Change dead register definitions to refer to the zero register. 816 if (TM->getOptLevel() != CodeGenOptLevel::None && 817 EnableDeadRegisterElimination) 818 addPass(createAArch64DeadRegisterDefinitions()); 819 820 // Use AdvSIMD scalar instructions whenever profitable. 821 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableAdvSIMDScalar) { 822 addPass(createAArch64AdvSIMDScalar()); 823 // The AdvSIMD pass may produce copies that can be rewritten to 824 // be register coalescer friendly. 825 addPass(&PeepholeOptimizerLegacyID); 826 } 827 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableMachinePipeliner) 828 addPass(&MachinePipelinerID); 829 } 830 831 void AArch64PassConfig::addPostRegAlloc() { 832 // Remove redundant copy instructions. 833 if (TM->getOptLevel() != CodeGenOptLevel::None && 834 EnableRedundantCopyElimination) 835 addPass(createAArch64RedundantCopyEliminationPass()); 836 837 if (TM->getOptLevel() != CodeGenOptLevel::None && usingDefaultRegAlloc()) 838 // Improve performance for some FP/SIMD code for A57. 839 addPass(createAArch64A57FPLoadBalancing()); 840 } 841 842 void AArch64PassConfig::addPreSched2() { 843 // Lower homogeneous frame instructions 844 if (EnableHomogeneousPrologEpilog) 845 addPass(createAArch64LowerHomogeneousPrologEpilogPass()); 846 // Expand some pseudo instructions to allow proper scheduling. 847 addPass(createAArch64ExpandPseudoPass()); 848 // Use load/store pair instructions when possible. 849 if (TM->getOptLevel() != CodeGenOptLevel::None) { 850 if (EnableLoadStoreOpt) 851 addPass(createAArch64LoadStoreOptimizationPass()); 852 } 853 // Emit KCFI checks for indirect calls. 854 addPass(createKCFIPass()); 855 856 // The AArch64SpeculationHardeningPass destroys dominator tree and natural 857 // loop info, which is needed for the FalkorHWPFFixPass and also later on. 858 // Therefore, run the AArch64SpeculationHardeningPass before the 859 // FalkorHWPFFixPass to avoid recomputing dominator tree and natural loop 860 // info. 861 addPass(createAArch64SpeculationHardeningPass()); 862 863 if (TM->getOptLevel() != CodeGenOptLevel::None) { 864 if (EnableFalkorHWPFFix) 865 addPass(createFalkorHWPFFixPass()); 866 } 867 } 868 869 void AArch64PassConfig::addPreEmitPass() { 870 // Machine Block Placement might have created new opportunities when run 871 // at O3, where the Tail Duplication Threshold is set to 4 instructions. 872 // Run the load/store optimizer once more. 873 if (TM->getOptLevel() >= CodeGenOptLevel::Aggressive && EnableLoadStoreOpt) 874 addPass(createAArch64LoadStoreOptimizationPass()); 875 876 if (TM->getOptLevel() >= CodeGenOptLevel::Aggressive && 877 EnableAArch64CopyPropagation) 878 addPass(createMachineCopyPropagationPass(true)); 879 880 addPass(createAArch64A53Fix835769()); 881 882 if (TM->getTargetTriple().isOSWindows()) { 883 // Identify valid longjmp targets for Windows Control Flow Guard. 884 addPass(createCFGuardLongjmpPass()); 885 // Identify valid eh continuation targets for Windows EHCont Guard. 886 addPass(createEHContGuardTargetsPass()); 887 } 888 889 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableCollectLOH && 890 TM->getTargetTriple().isOSBinFormatMachO()) 891 addPass(createAArch64CollectLOHPass()); 892 } 893 894 void AArch64PassConfig::addPostBBSections() { 895 addPass(createAArch64SLSHardeningPass()); 896 addPass(createAArch64PointerAuthPass()); 897 if (EnableBranchTargets) 898 addPass(createAArch64BranchTargetsPass()); 899 // Relax conditional branch instructions if they're otherwise out of 900 // range of their destination. 901 if (BranchRelaxation) 902 addPass(&BranchRelaxationPassID); 903 904 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableCompressJumpTables) 905 addPass(createAArch64CompressJumpTablesPass()); 906 } 907 908 void AArch64PassConfig::addPreEmitPass2() { 909 // SVE bundles move prefixes with destructive operations. BLR_RVMARKER pseudo 910 // instructions are lowered to bundles as well. 911 addPass(createUnpackMachineBundles(nullptr)); 912 } 913 914 bool AArch64PassConfig::addRegAssignAndRewriteOptimized() { 915 addPass(createAArch64PostCoalescerPass()); 916 return TargetPassConfig::addRegAssignAndRewriteOptimized(); 917 } 918 919 MachineFunctionInfo *AArch64TargetMachine::createMachineFunctionInfo( 920 BumpPtrAllocator &Allocator, const Function &F, 921 const TargetSubtargetInfo *STI) const { 922 return AArch64FunctionInfo::create<AArch64FunctionInfo>( 923 Allocator, F, static_cast<const AArch64Subtarget *>(STI)); 924 } 925 926 yaml::MachineFunctionInfo * 927 AArch64TargetMachine::createDefaultFuncInfoYAML() const { 928 return new yaml::AArch64FunctionInfo(); 929 } 930 931 yaml::MachineFunctionInfo * 932 AArch64TargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const { 933 const auto *MFI = MF.getInfo<AArch64FunctionInfo>(); 934 return new yaml::AArch64FunctionInfo(*MFI); 935 } 936 937 bool AArch64TargetMachine::parseMachineFunctionInfo( 938 const yaml::MachineFunctionInfo &MFI, PerFunctionMIParsingState &PFS, 939 SMDiagnostic &Error, SMRange &SourceRange) const { 940 const auto &YamlMFI = static_cast<const yaml::AArch64FunctionInfo &>(MFI); 941 MachineFunction &MF = PFS.MF; 942 MF.getInfo<AArch64FunctionInfo>()->initializeBaseYamlFields(YamlMFI); 943 return false; 944 } 945