10b57cec5SDimitry Andric //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric // 90b57cec5SDimitry Andric // 100b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 110b57cec5SDimitry Andric 120b57cec5SDimitry Andric #include "AArch64TargetMachine.h" 130b57cec5SDimitry Andric #include "AArch64.h" 145ffd83dbSDimitry Andric #include "AArch64MachineFunctionInfo.h" 150b57cec5SDimitry Andric #include "AArch64MacroFusion.h" 160b57cec5SDimitry Andric #include "AArch64Subtarget.h" 170b57cec5SDimitry Andric #include "AArch64TargetObjectFile.h" 180b57cec5SDimitry Andric #include "AArch64TargetTransformInfo.h" 190b57cec5SDimitry Andric #include "MCTargetDesc/AArch64MCTargetDesc.h" 200b57cec5SDimitry Andric #include "TargetInfo/AArch64TargetInfo.h" 210b57cec5SDimitry Andric #include "llvm/ADT/STLExtras.h" 220b57cec5SDimitry Andric #include "llvm/ADT/Triple.h" 230b57cec5SDimitry Andric #include "llvm/Analysis/TargetTransformInfo.h" 240b57cec5SDimitry Andric #include "llvm/CodeGen/CSEConfigBase.h" 250b57cec5SDimitry Andric #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 260b57cec5SDimitry Andric #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 270b57cec5SDimitry Andric #include "llvm/CodeGen/GlobalISel/Legalizer.h" 280b57cec5SDimitry Andric #include "llvm/CodeGen/GlobalISel/Localizer.h" 290b57cec5SDimitry Andric #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 305ffd83dbSDimitry Andric #include "llvm/CodeGen/MIRParser/MIParser.h" 310b57cec5SDimitry Andric #include "llvm/CodeGen/MachineScheduler.h" 320b57cec5SDimitry Andric #include "llvm/CodeGen/Passes.h" 330b57cec5SDimitry Andric #include "llvm/CodeGen/TargetPassConfig.h" 340b57cec5SDimitry Andric #include "llvm/IR/Attributes.h" 350b57cec5SDimitry Andric #include "llvm/IR/Function.h" 36480093f4SDimitry Andric #include "llvm/InitializePasses.h" 370b57cec5SDimitry Andric #include "llvm/MC/MCAsmInfo.h" 380b57cec5SDimitry Andric #include "llvm/MC/MCTargetOptions.h" 390b57cec5SDimitry Andric #include "llvm/Pass.h" 400b57cec5SDimitry Andric #include "llvm/Support/CodeGen.h" 410b57cec5SDimitry Andric #include "llvm/Support/CommandLine.h" 420b57cec5SDimitry Andric #include "llvm/Support/TargetRegistry.h" 430b57cec5SDimitry Andric #include "llvm/Target/TargetLoweringObjectFile.h" 440b57cec5SDimitry Andric #include "llvm/Target/TargetOptions.h" 45480093f4SDimitry Andric #include "llvm/Transforms/CFGuard.h" 460b57cec5SDimitry Andric #include "llvm/Transforms/Scalar.h" 470b57cec5SDimitry Andric #include <memory> 480b57cec5SDimitry Andric #include <string> 490b57cec5SDimitry Andric 500b57cec5SDimitry Andric using namespace llvm; 510b57cec5SDimitry Andric 520b57cec5SDimitry Andric static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp", 530b57cec5SDimitry Andric cl::desc("Enable the CCMP formation pass"), 540b57cec5SDimitry Andric cl::init(true), cl::Hidden); 550b57cec5SDimitry Andric 560b57cec5SDimitry Andric static cl::opt<bool> 570b57cec5SDimitry Andric EnableCondBrTuning("aarch64-enable-cond-br-tune", 580b57cec5SDimitry Andric cl::desc("Enable the conditional branch tuning pass"), 590b57cec5SDimitry Andric cl::init(true), cl::Hidden); 600b57cec5SDimitry Andric 610b57cec5SDimitry Andric static cl::opt<bool> EnableMCR("aarch64-enable-mcr", 620b57cec5SDimitry Andric cl::desc("Enable the machine combiner pass"), 630b57cec5SDimitry Andric cl::init(true), cl::Hidden); 640b57cec5SDimitry Andric 650b57cec5SDimitry Andric static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress", 660b57cec5SDimitry Andric cl::desc("Suppress STP for AArch64"), 670b57cec5SDimitry Andric cl::init(true), cl::Hidden); 680b57cec5SDimitry Andric 690b57cec5SDimitry Andric static cl::opt<bool> EnableAdvSIMDScalar( 700b57cec5SDimitry Andric "aarch64-enable-simd-scalar", 710b57cec5SDimitry Andric cl::desc("Enable use of AdvSIMD scalar integer instructions"), 720b57cec5SDimitry Andric cl::init(false), cl::Hidden); 730b57cec5SDimitry Andric 740b57cec5SDimitry Andric static cl::opt<bool> 750b57cec5SDimitry Andric EnablePromoteConstant("aarch64-enable-promote-const", 760b57cec5SDimitry Andric cl::desc("Enable the promote constant pass"), 770b57cec5SDimitry Andric cl::init(true), cl::Hidden); 780b57cec5SDimitry Andric 790b57cec5SDimitry Andric static cl::opt<bool> EnableCollectLOH( 800b57cec5SDimitry Andric "aarch64-enable-collect-loh", 810b57cec5SDimitry Andric cl::desc("Enable the pass that emits the linker optimization hints (LOH)"), 820b57cec5SDimitry Andric cl::init(true), cl::Hidden); 830b57cec5SDimitry Andric 840b57cec5SDimitry Andric static cl::opt<bool> 850b57cec5SDimitry Andric EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden, 860b57cec5SDimitry Andric cl::desc("Enable the pass that removes dead" 870b57cec5SDimitry Andric " definitons and replaces stores to" 880b57cec5SDimitry Andric " them with stores to the zero" 890b57cec5SDimitry Andric " register"), 900b57cec5SDimitry Andric cl::init(true)); 910b57cec5SDimitry Andric 920b57cec5SDimitry Andric static cl::opt<bool> EnableRedundantCopyElimination( 930b57cec5SDimitry Andric "aarch64-enable-copyelim", 940b57cec5SDimitry Andric cl::desc("Enable the redundant copy elimination pass"), cl::init(true), 950b57cec5SDimitry Andric cl::Hidden); 960b57cec5SDimitry Andric 970b57cec5SDimitry Andric static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt", 980b57cec5SDimitry Andric cl::desc("Enable the load/store pair" 990b57cec5SDimitry Andric " optimization pass"), 1000b57cec5SDimitry Andric cl::init(true), cl::Hidden); 1010b57cec5SDimitry Andric 1020b57cec5SDimitry Andric static cl::opt<bool> EnableAtomicTidy( 1030b57cec5SDimitry Andric "aarch64-enable-atomic-cfg-tidy", cl::Hidden, 1040b57cec5SDimitry Andric cl::desc("Run SimplifyCFG after expanding atomic operations" 1050b57cec5SDimitry Andric " to make use of cmpxchg flow-based information"), 1060b57cec5SDimitry Andric cl::init(true)); 1070b57cec5SDimitry Andric 1080b57cec5SDimitry Andric static cl::opt<bool> 1090b57cec5SDimitry Andric EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, 1100b57cec5SDimitry Andric cl::desc("Run early if-conversion"), 1110b57cec5SDimitry Andric cl::init(true)); 1120b57cec5SDimitry Andric 1130b57cec5SDimitry Andric static cl::opt<bool> 1140b57cec5SDimitry Andric EnableCondOpt("aarch64-enable-condopt", 1150b57cec5SDimitry Andric cl::desc("Enable the condition optimizer pass"), 1160b57cec5SDimitry Andric cl::init(true), cl::Hidden); 1170b57cec5SDimitry Andric 1180b57cec5SDimitry Andric static cl::opt<bool> 1190b57cec5SDimitry Andric EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden, 1200b57cec5SDimitry Andric cl::desc("Work around Cortex-A53 erratum 835769"), 1210b57cec5SDimitry Andric cl::init(false)); 1220b57cec5SDimitry Andric 1230b57cec5SDimitry Andric static cl::opt<bool> 1240b57cec5SDimitry Andric EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden, 1250b57cec5SDimitry Andric cl::desc("Enable optimizations on complex GEPs"), 1260b57cec5SDimitry Andric cl::init(false)); 1270b57cec5SDimitry Andric 1280b57cec5SDimitry Andric static cl::opt<bool> 1290b57cec5SDimitry Andric BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true), 1300b57cec5SDimitry Andric cl::desc("Relax out of range conditional branches")); 1310b57cec5SDimitry Andric 1320b57cec5SDimitry Andric static cl::opt<bool> EnableCompressJumpTables( 1330b57cec5SDimitry Andric "aarch64-enable-compress-jump-tables", cl::Hidden, cl::init(true), 1340b57cec5SDimitry Andric cl::desc("Use smallest entry possible for jump tables")); 1350b57cec5SDimitry Andric 1360b57cec5SDimitry Andric // FIXME: Unify control over GlobalMerge. 1370b57cec5SDimitry Andric static cl::opt<cl::boolOrDefault> 1380b57cec5SDimitry Andric EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden, 1390b57cec5SDimitry Andric cl::desc("Enable the global merge pass")); 1400b57cec5SDimitry Andric 1410b57cec5SDimitry Andric static cl::opt<bool> 1420b57cec5SDimitry Andric EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden, 1430b57cec5SDimitry Andric cl::desc("Enable the loop data prefetch pass"), 1440b57cec5SDimitry Andric cl::init(true)); 1450b57cec5SDimitry Andric 1460b57cec5SDimitry Andric static cl::opt<int> EnableGlobalISelAtO( 1470b57cec5SDimitry Andric "aarch64-enable-global-isel-at-O", cl::Hidden, 1480b57cec5SDimitry Andric cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"), 1490b57cec5SDimitry Andric cl::init(0)); 1500b57cec5SDimitry Andric 151e8d8bef9SDimitry Andric static cl::opt<bool> 152e8d8bef9SDimitry Andric EnableSVEIntrinsicOpts("aarch64-enable-sve-intrinsic-opts", cl::Hidden, 1535ffd83dbSDimitry Andric cl::desc("Enable SVE intrinsic opts"), 1545ffd83dbSDimitry Andric cl::init(true)); 1555ffd83dbSDimitry Andric 1560b57cec5SDimitry Andric static cl::opt<bool> EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix", 1570b57cec5SDimitry Andric cl::init(true), cl::Hidden); 1580b57cec5SDimitry Andric 1590b57cec5SDimitry Andric static cl::opt<bool> 1600b57cec5SDimitry Andric EnableBranchTargets("aarch64-enable-branch-targets", cl::Hidden, 161*fe6060f1SDimitry Andric cl::desc("Enable the AArch64 branch target pass"), 1620b57cec5SDimitry Andric cl::init(true)); 1630b57cec5SDimitry Andric 164*fe6060f1SDimitry Andric static cl::opt<unsigned> SVEVectorBitsMaxOpt( 165*fe6060f1SDimitry Andric "aarch64-sve-vector-bits-max", 166*fe6060f1SDimitry Andric cl::desc("Assume SVE vector registers are at most this big, " 167*fe6060f1SDimitry Andric "with zero meaning no maximum size is assumed."), 168*fe6060f1SDimitry Andric cl::init(0), cl::Hidden); 169*fe6060f1SDimitry Andric 170*fe6060f1SDimitry Andric static cl::opt<unsigned> SVEVectorBitsMinOpt( 171*fe6060f1SDimitry Andric "aarch64-sve-vector-bits-min", 172*fe6060f1SDimitry Andric cl::desc("Assume SVE vector registers are at least this big, " 173*fe6060f1SDimitry Andric "with zero meaning no minimum size is assumed."), 174*fe6060f1SDimitry Andric cl::init(0), cl::Hidden); 175*fe6060f1SDimitry Andric 176*fe6060f1SDimitry Andric extern cl::opt<bool> EnableHomogeneousPrologEpilog; 177*fe6060f1SDimitry Andric 178480093f4SDimitry Andric extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64Target() { 1790b57cec5SDimitry Andric // Register the target. 1800b57cec5SDimitry Andric RegisterTargetMachine<AArch64leTargetMachine> X(getTheAArch64leTarget()); 1810b57cec5SDimitry Andric RegisterTargetMachine<AArch64beTargetMachine> Y(getTheAArch64beTarget()); 1820b57cec5SDimitry Andric RegisterTargetMachine<AArch64leTargetMachine> Z(getTheARM64Target()); 1838bcb0991SDimitry Andric RegisterTargetMachine<AArch64leTargetMachine> W(getTheARM64_32Target()); 1848bcb0991SDimitry Andric RegisterTargetMachine<AArch64leTargetMachine> V(getTheAArch64_32Target()); 1850b57cec5SDimitry Andric auto PR = PassRegistry::getPassRegistry(); 1860b57cec5SDimitry Andric initializeGlobalISel(*PR); 1870b57cec5SDimitry Andric initializeAArch64A53Fix835769Pass(*PR); 1880b57cec5SDimitry Andric initializeAArch64A57FPLoadBalancingPass(*PR); 1890b57cec5SDimitry Andric initializeAArch64AdvSIMDScalarPass(*PR); 1900b57cec5SDimitry Andric initializeAArch64BranchTargetsPass(*PR); 1910b57cec5SDimitry Andric initializeAArch64CollectLOHPass(*PR); 1920b57cec5SDimitry Andric initializeAArch64CompressJumpTablesPass(*PR); 1930b57cec5SDimitry Andric initializeAArch64ConditionalComparesPass(*PR); 1940b57cec5SDimitry Andric initializeAArch64ConditionOptimizerPass(*PR); 1950b57cec5SDimitry Andric initializeAArch64DeadRegisterDefinitionsPass(*PR); 1960b57cec5SDimitry Andric initializeAArch64ExpandPseudoPass(*PR); 1970b57cec5SDimitry Andric initializeAArch64LoadStoreOptPass(*PR); 1980b57cec5SDimitry Andric initializeAArch64SIMDInstrOptPass(*PR); 199*fe6060f1SDimitry Andric initializeAArch64O0PreLegalizerCombinerPass(*PR); 2000b57cec5SDimitry Andric initializeAArch64PreLegalizerCombinerPass(*PR); 2015ffd83dbSDimitry Andric initializeAArch64PostLegalizerCombinerPass(*PR); 202e8d8bef9SDimitry Andric initializeAArch64PostLegalizerLoweringPass(*PR); 203e8d8bef9SDimitry Andric initializeAArch64PostSelectOptimizePass(*PR); 2040b57cec5SDimitry Andric initializeAArch64PromoteConstantPass(*PR); 2050b57cec5SDimitry Andric initializeAArch64RedundantCopyEliminationPass(*PR); 2060b57cec5SDimitry Andric initializeAArch64StorePairSuppressPass(*PR); 2070b57cec5SDimitry Andric initializeFalkorHWPFFixPass(*PR); 2080b57cec5SDimitry Andric initializeFalkorMarkStridedAccessesLegacyPass(*PR); 2090b57cec5SDimitry Andric initializeLDTLSCleanupPass(*PR); 2105ffd83dbSDimitry Andric initializeSVEIntrinsicOptsPass(*PR); 2110b57cec5SDimitry Andric initializeAArch64SpeculationHardeningPass(*PR); 2125ffd83dbSDimitry Andric initializeAArch64SLSHardeningPass(*PR); 2130b57cec5SDimitry Andric initializeAArch64StackTaggingPass(*PR); 2148bcb0991SDimitry Andric initializeAArch64StackTaggingPreRAPass(*PR); 215*fe6060f1SDimitry Andric initializeAArch64LowerHomogeneousPrologEpilogPass(*PR); 2160b57cec5SDimitry Andric } 2170b57cec5SDimitry Andric 2180b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 2190b57cec5SDimitry Andric // AArch64 Lowering public interface. 2200b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 2210b57cec5SDimitry Andric static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 2220b57cec5SDimitry Andric if (TT.isOSBinFormatMachO()) 2238bcb0991SDimitry Andric return std::make_unique<AArch64_MachoTargetObjectFile>(); 2240b57cec5SDimitry Andric if (TT.isOSBinFormatCOFF()) 2258bcb0991SDimitry Andric return std::make_unique<AArch64_COFFTargetObjectFile>(); 2260b57cec5SDimitry Andric 2278bcb0991SDimitry Andric return std::make_unique<AArch64_ELFTargetObjectFile>(); 2280b57cec5SDimitry Andric } 2290b57cec5SDimitry Andric 2300b57cec5SDimitry Andric // Helper function to build a DataLayout string 2310b57cec5SDimitry Andric static std::string computeDataLayout(const Triple &TT, 2320b57cec5SDimitry Andric const MCTargetOptions &Options, 2330b57cec5SDimitry Andric bool LittleEndian) { 2348bcb0991SDimitry Andric if (TT.isOSBinFormatMachO()) { 2358bcb0991SDimitry Andric if (TT.getArch() == Triple::aarch64_32) 2368bcb0991SDimitry Andric return "e-m:o-p:32:32-i64:64-i128:128-n32:64-S128"; 2370b57cec5SDimitry Andric return "e-m:o-i64:64-i128:128-n32:64-S128"; 2388bcb0991SDimitry Andric } 2390b57cec5SDimitry Andric if (TT.isOSBinFormatCOFF()) 2400b57cec5SDimitry Andric return "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128"; 241e8d8bef9SDimitry Andric std::string Endian = LittleEndian ? "e" : "E"; 242e8d8bef9SDimitry Andric std::string Ptr32 = TT.getEnvironment() == Triple::GNUILP32 ? "-p:32:32" : ""; 243e8d8bef9SDimitry Andric return Endian + "-m:e" + Ptr32 + 244e8d8bef9SDimitry Andric "-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"; 245e8d8bef9SDimitry Andric } 246e8d8bef9SDimitry Andric 247e8d8bef9SDimitry Andric static StringRef computeDefaultCPU(const Triple &TT, StringRef CPU) { 248e8d8bef9SDimitry Andric if (CPU.empty() && TT.isArm64e()) 249e8d8bef9SDimitry Andric return "apple-a12"; 250e8d8bef9SDimitry Andric return CPU; 2510b57cec5SDimitry Andric } 2520b57cec5SDimitry Andric 2530b57cec5SDimitry Andric static Reloc::Model getEffectiveRelocModel(const Triple &TT, 2540b57cec5SDimitry Andric Optional<Reloc::Model> RM) { 2550b57cec5SDimitry Andric // AArch64 Darwin and Windows are always PIC. 2560b57cec5SDimitry Andric if (TT.isOSDarwin() || TT.isOSWindows()) 2570b57cec5SDimitry Andric return Reloc::PIC_; 2580b57cec5SDimitry Andric // On ELF platforms the default static relocation model has a smart enough 2590b57cec5SDimitry Andric // linker to cope with referencing external symbols defined in a shared 2600b57cec5SDimitry Andric // library. Hence DynamicNoPIC doesn't need to be promoted to PIC. 2610b57cec5SDimitry Andric if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC) 2620b57cec5SDimitry Andric return Reloc::Static; 2630b57cec5SDimitry Andric return *RM; 2640b57cec5SDimitry Andric } 2650b57cec5SDimitry Andric 2660b57cec5SDimitry Andric static CodeModel::Model 2670b57cec5SDimitry Andric getEffectiveAArch64CodeModel(const Triple &TT, Optional<CodeModel::Model> CM, 2680b57cec5SDimitry Andric bool JIT) { 2690b57cec5SDimitry Andric if (CM) { 2700b57cec5SDimitry Andric if (*CM != CodeModel::Small && *CM != CodeModel::Tiny && 2710b57cec5SDimitry Andric *CM != CodeModel::Large) { 2720b57cec5SDimitry Andric report_fatal_error( 2730b57cec5SDimitry Andric "Only small, tiny and large code models are allowed on AArch64"); 2740b57cec5SDimitry Andric } else if (*CM == CodeModel::Tiny && !TT.isOSBinFormatELF()) 2750b57cec5SDimitry Andric report_fatal_error("tiny code model is only supported on ELF"); 2760b57cec5SDimitry Andric return *CM; 2770b57cec5SDimitry Andric } 2780b57cec5SDimitry Andric // The default MCJIT memory managers make no guarantees about where they can 2790b57cec5SDimitry Andric // find an executable page; JITed code needs to be able to refer to globals 2800b57cec5SDimitry Andric // no matter how far away they are. 281480093f4SDimitry Andric // We should set the CodeModel::Small for Windows ARM64 in JIT mode, 282480093f4SDimitry Andric // since with large code model LLVM generating 4 MOV instructions, and 283480093f4SDimitry Andric // Windows doesn't support relocating these long branch (4 MOVs). 284480093f4SDimitry Andric if (JIT && !TT.isOSWindows()) 2850b57cec5SDimitry Andric return CodeModel::Large; 2860b57cec5SDimitry Andric return CodeModel::Small; 2870b57cec5SDimitry Andric } 2880b57cec5SDimitry Andric 2890b57cec5SDimitry Andric /// Create an AArch64 architecture model. 2900b57cec5SDimitry Andric /// 2910b57cec5SDimitry Andric AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT, 2920b57cec5SDimitry Andric StringRef CPU, StringRef FS, 2930b57cec5SDimitry Andric const TargetOptions &Options, 2940b57cec5SDimitry Andric Optional<Reloc::Model> RM, 2950b57cec5SDimitry Andric Optional<CodeModel::Model> CM, 2960b57cec5SDimitry Andric CodeGenOpt::Level OL, bool JIT, 2970b57cec5SDimitry Andric bool LittleEndian) 2980b57cec5SDimitry Andric : LLVMTargetMachine(T, 2990b57cec5SDimitry Andric computeDataLayout(TT, Options.MCOptions, LittleEndian), 300e8d8bef9SDimitry Andric TT, computeDefaultCPU(TT, CPU), FS, Options, 301e8d8bef9SDimitry Andric getEffectiveRelocModel(TT, RM), 3020b57cec5SDimitry Andric getEffectiveAArch64CodeModel(TT, CM, JIT), OL), 3030b57cec5SDimitry Andric TLOF(createTLOF(getTargetTriple())), isLittle(LittleEndian) { 3040b57cec5SDimitry Andric initAsmInfo(); 3050b57cec5SDimitry Andric 3060b57cec5SDimitry Andric if (TT.isOSBinFormatMachO()) { 3070b57cec5SDimitry Andric this->Options.TrapUnreachable = true; 3080b57cec5SDimitry Andric this->Options.NoTrapAfterNoreturn = true; 3090b57cec5SDimitry Andric } 3100b57cec5SDimitry Andric 3110b57cec5SDimitry Andric if (getMCAsmInfo()->usesWindowsCFI()) { 3120b57cec5SDimitry Andric // Unwinding can get confused if the last instruction in an 3130b57cec5SDimitry Andric // exception-handling region (function, funclet, try block, etc.) 3140b57cec5SDimitry Andric // is a call. 3150b57cec5SDimitry Andric // 3160b57cec5SDimitry Andric // FIXME: We could elide the trap if the next instruction would be in 3170b57cec5SDimitry Andric // the same region anyway. 3180b57cec5SDimitry Andric this->Options.TrapUnreachable = true; 3190b57cec5SDimitry Andric } 3200b57cec5SDimitry Andric 321480093f4SDimitry Andric if (this->Options.TLSSize == 0) // default 322480093f4SDimitry Andric this->Options.TLSSize = 24; 323480093f4SDimitry Andric if ((getCodeModel() == CodeModel::Small || 324480093f4SDimitry Andric getCodeModel() == CodeModel::Kernel) && 325480093f4SDimitry Andric this->Options.TLSSize > 32) 326480093f4SDimitry Andric // for the small (and kernel) code model, the maximum TLS size is 4GiB 327480093f4SDimitry Andric this->Options.TLSSize = 32; 328480093f4SDimitry Andric else if (getCodeModel() == CodeModel::Tiny && this->Options.TLSSize > 24) 329480093f4SDimitry Andric // for the tiny code model, the maximum TLS size is 1MiB (< 16MiB) 330480093f4SDimitry Andric this->Options.TLSSize = 24; 331480093f4SDimitry Andric 3328bcb0991SDimitry Andric // Enable GlobalISel at or below EnableGlobalISelAt0, unless this is 3338bcb0991SDimitry Andric // MachO/CodeModel::Large, which GlobalISel does not support. 3348bcb0991SDimitry Andric if (getOptLevel() <= EnableGlobalISelAtO && 3358bcb0991SDimitry Andric TT.getArch() != Triple::aarch64_32 && 336e8d8bef9SDimitry Andric TT.getEnvironment() != Triple::GNUILP32 && 3378bcb0991SDimitry Andric !(getCodeModel() == CodeModel::Large && TT.isOSBinFormatMachO())) { 3380b57cec5SDimitry Andric setGlobalISel(true); 3390b57cec5SDimitry Andric setGlobalISelAbort(GlobalISelAbortMode::Disable); 3400b57cec5SDimitry Andric } 3410b57cec5SDimitry Andric 3420b57cec5SDimitry Andric // AArch64 supports the MachineOutliner. 3430b57cec5SDimitry Andric setMachineOutliner(true); 3440b57cec5SDimitry Andric 3450b57cec5SDimitry Andric // AArch64 supports default outlining behaviour. 3460b57cec5SDimitry Andric setSupportsDefaultOutlining(true); 3475ffd83dbSDimitry Andric 3485ffd83dbSDimitry Andric // AArch64 supports the debug entry values. 3495ffd83dbSDimitry Andric setSupportsDebugEntryValues(true); 3500b57cec5SDimitry Andric } 3510b57cec5SDimitry Andric 3520b57cec5SDimitry Andric AArch64TargetMachine::~AArch64TargetMachine() = default; 3530b57cec5SDimitry Andric 3540b57cec5SDimitry Andric const AArch64Subtarget * 3550b57cec5SDimitry Andric AArch64TargetMachine::getSubtargetImpl(const Function &F) const { 3560b57cec5SDimitry Andric Attribute CPUAttr = F.getFnAttribute("target-cpu"); 3570b57cec5SDimitry Andric Attribute FSAttr = F.getFnAttribute("target-features"); 3580b57cec5SDimitry Andric 359e8d8bef9SDimitry Andric std::string CPU = 360e8d8bef9SDimitry Andric CPUAttr.isValid() ? CPUAttr.getValueAsString().str() : TargetCPU; 361e8d8bef9SDimitry Andric std::string FS = 362e8d8bef9SDimitry Andric FSAttr.isValid() ? FSAttr.getValueAsString().str() : TargetFS; 3630b57cec5SDimitry Andric 364*fe6060f1SDimitry Andric SmallString<512> Key; 365*fe6060f1SDimitry Andric 366*fe6060f1SDimitry Andric unsigned MinSVEVectorSize = 0; 367*fe6060f1SDimitry Andric unsigned MaxSVEVectorSize = 0; 368*fe6060f1SDimitry Andric Attribute VScaleRangeAttr = F.getFnAttribute(Attribute::VScaleRange); 369*fe6060f1SDimitry Andric if (VScaleRangeAttr.isValid()) { 370*fe6060f1SDimitry Andric std::tie(MinSVEVectorSize, MaxSVEVectorSize) = 371*fe6060f1SDimitry Andric VScaleRangeAttr.getVScaleRangeArgs(); 372*fe6060f1SDimitry Andric MinSVEVectorSize *= 128; 373*fe6060f1SDimitry Andric MaxSVEVectorSize *= 128; 374*fe6060f1SDimitry Andric } else { 375*fe6060f1SDimitry Andric MinSVEVectorSize = SVEVectorBitsMinOpt; 376*fe6060f1SDimitry Andric MaxSVEVectorSize = SVEVectorBitsMaxOpt; 377*fe6060f1SDimitry Andric } 378*fe6060f1SDimitry Andric 379*fe6060f1SDimitry Andric assert(MinSVEVectorSize % 128 == 0 && 380*fe6060f1SDimitry Andric "SVE requires vector length in multiples of 128!"); 381*fe6060f1SDimitry Andric assert(MaxSVEVectorSize % 128 == 0 && 382*fe6060f1SDimitry Andric "SVE requires vector length in multiples of 128!"); 383*fe6060f1SDimitry Andric assert((MaxSVEVectorSize >= MinSVEVectorSize || MaxSVEVectorSize == 0) && 384*fe6060f1SDimitry Andric "Minimum SVE vector size should not be larger than its maximum!"); 385*fe6060f1SDimitry Andric 386*fe6060f1SDimitry Andric // Sanitize user input in case of no asserts 387*fe6060f1SDimitry Andric if (MaxSVEVectorSize == 0) 388*fe6060f1SDimitry Andric MinSVEVectorSize = (MinSVEVectorSize / 128) * 128; 389*fe6060f1SDimitry Andric else { 390*fe6060f1SDimitry Andric MinSVEVectorSize = 391*fe6060f1SDimitry Andric (std::min(MinSVEVectorSize, MaxSVEVectorSize) / 128) * 128; 392*fe6060f1SDimitry Andric MaxSVEVectorSize = 393*fe6060f1SDimitry Andric (std::max(MinSVEVectorSize, MaxSVEVectorSize) / 128) * 128; 394*fe6060f1SDimitry Andric } 395*fe6060f1SDimitry Andric 396*fe6060f1SDimitry Andric Key += "SVEMin"; 397*fe6060f1SDimitry Andric Key += std::to_string(MinSVEVectorSize); 398*fe6060f1SDimitry Andric Key += "SVEMax"; 399*fe6060f1SDimitry Andric Key += std::to_string(MaxSVEVectorSize); 400*fe6060f1SDimitry Andric Key += CPU; 401*fe6060f1SDimitry Andric Key += FS; 402*fe6060f1SDimitry Andric 403*fe6060f1SDimitry Andric auto &I = SubtargetMap[Key]; 4040b57cec5SDimitry Andric if (!I) { 4050b57cec5SDimitry Andric // This needs to be done before we create a new subtarget since any 4060b57cec5SDimitry Andric // creation will depend on the TM and the code generation flags on the 4070b57cec5SDimitry Andric // function that reside in TargetOptions. 4080b57cec5SDimitry Andric resetTargetOptions(F); 4098bcb0991SDimitry Andric I = std::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this, 410*fe6060f1SDimitry Andric isLittle, MinSVEVectorSize, 411*fe6060f1SDimitry Andric MaxSVEVectorSize); 4120b57cec5SDimitry Andric } 4130b57cec5SDimitry Andric return I.get(); 4140b57cec5SDimitry Andric } 4150b57cec5SDimitry Andric 4160b57cec5SDimitry Andric void AArch64leTargetMachine::anchor() { } 4170b57cec5SDimitry Andric 4180b57cec5SDimitry Andric AArch64leTargetMachine::AArch64leTargetMachine( 4190b57cec5SDimitry Andric const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 4200b57cec5SDimitry Andric const TargetOptions &Options, Optional<Reloc::Model> RM, 4210b57cec5SDimitry Andric Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT) 4220b57cec5SDimitry Andric : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, true) {} 4230b57cec5SDimitry Andric 4240b57cec5SDimitry Andric void AArch64beTargetMachine::anchor() { } 4250b57cec5SDimitry Andric 4260b57cec5SDimitry Andric AArch64beTargetMachine::AArch64beTargetMachine( 4270b57cec5SDimitry Andric const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 4280b57cec5SDimitry Andric const TargetOptions &Options, Optional<Reloc::Model> RM, 4290b57cec5SDimitry Andric Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT) 4300b57cec5SDimitry Andric : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, false) {} 4310b57cec5SDimitry Andric 4320b57cec5SDimitry Andric namespace { 4330b57cec5SDimitry Andric 4340b57cec5SDimitry Andric /// AArch64 Code Generator Pass Configuration Options. 4350b57cec5SDimitry Andric class AArch64PassConfig : public TargetPassConfig { 4360b57cec5SDimitry Andric public: 4370b57cec5SDimitry Andric AArch64PassConfig(AArch64TargetMachine &TM, PassManagerBase &PM) 4380b57cec5SDimitry Andric : TargetPassConfig(TM, PM) { 4390b57cec5SDimitry Andric if (TM.getOptLevel() != CodeGenOpt::None) 4400b57cec5SDimitry Andric substitutePass(&PostRASchedulerID, &PostMachineSchedulerID); 4410b57cec5SDimitry Andric } 4420b57cec5SDimitry Andric 4430b57cec5SDimitry Andric AArch64TargetMachine &getAArch64TargetMachine() const { 4440b57cec5SDimitry Andric return getTM<AArch64TargetMachine>(); 4450b57cec5SDimitry Andric } 4460b57cec5SDimitry Andric 4470b57cec5SDimitry Andric ScheduleDAGInstrs * 4480b57cec5SDimitry Andric createMachineScheduler(MachineSchedContext *C) const override { 4490b57cec5SDimitry Andric const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>(); 4500b57cec5SDimitry Andric ScheduleDAGMILive *DAG = createGenericSchedLive(C); 4510b57cec5SDimitry Andric DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 4520b57cec5SDimitry Andric DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 4530b57cec5SDimitry Andric if (ST.hasFusion()) 4540b57cec5SDimitry Andric DAG->addMutation(createAArch64MacroFusionDAGMutation()); 4550b57cec5SDimitry Andric return DAG; 4560b57cec5SDimitry Andric } 4570b57cec5SDimitry Andric 4580b57cec5SDimitry Andric ScheduleDAGInstrs * 4590b57cec5SDimitry Andric createPostMachineScheduler(MachineSchedContext *C) const override { 4600b57cec5SDimitry Andric const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>(); 4610b57cec5SDimitry Andric if (ST.hasFusion()) { 4620b57cec5SDimitry Andric // Run the Macro Fusion after RA again since literals are expanded from 4630b57cec5SDimitry Andric // pseudos then (v. addPreSched2()). 4640b57cec5SDimitry Andric ScheduleDAGMI *DAG = createGenericSchedPostRA(C); 4650b57cec5SDimitry Andric DAG->addMutation(createAArch64MacroFusionDAGMutation()); 4660b57cec5SDimitry Andric return DAG; 4670b57cec5SDimitry Andric } 4680b57cec5SDimitry Andric 4690b57cec5SDimitry Andric return nullptr; 4700b57cec5SDimitry Andric } 4710b57cec5SDimitry Andric 4720b57cec5SDimitry Andric void addIRPasses() override; 4730b57cec5SDimitry Andric bool addPreISel() override; 4740b57cec5SDimitry Andric bool addInstSelector() override; 4750b57cec5SDimitry Andric bool addIRTranslator() override; 4760b57cec5SDimitry Andric void addPreLegalizeMachineIR() override; 4770b57cec5SDimitry Andric bool addLegalizeMachineIR() override; 4785ffd83dbSDimitry Andric void addPreRegBankSelect() override; 4790b57cec5SDimitry Andric bool addRegBankSelect() override; 4800b57cec5SDimitry Andric void addPreGlobalInstructionSelect() override; 4810b57cec5SDimitry Andric bool addGlobalInstructionSelect() override; 4820b57cec5SDimitry Andric bool addILPOpts() override; 4830b57cec5SDimitry Andric void addPreRegAlloc() override; 4840b57cec5SDimitry Andric void addPostRegAlloc() override; 4850b57cec5SDimitry Andric void addPreSched2() override; 4860b57cec5SDimitry Andric void addPreEmitPass() override; 487*fe6060f1SDimitry Andric void addPreEmitPass2() override; 4880b57cec5SDimitry Andric 4890b57cec5SDimitry Andric std::unique_ptr<CSEConfigBase> getCSEConfig() const override; 4900b57cec5SDimitry Andric }; 4910b57cec5SDimitry Andric 4920b57cec5SDimitry Andric } // end anonymous namespace 4930b57cec5SDimitry Andric 4940b57cec5SDimitry Andric TargetTransformInfo 4950b57cec5SDimitry Andric AArch64TargetMachine::getTargetTransformInfo(const Function &F) { 4960b57cec5SDimitry Andric return TargetTransformInfo(AArch64TTIImpl(this, F)); 4970b57cec5SDimitry Andric } 4980b57cec5SDimitry Andric 4990b57cec5SDimitry Andric TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) { 5000b57cec5SDimitry Andric return new AArch64PassConfig(*this, PM); 5010b57cec5SDimitry Andric } 5020b57cec5SDimitry Andric 5030b57cec5SDimitry Andric std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const { 5040b57cec5SDimitry Andric return getStandardCSEConfigForOpt(TM->getOptLevel()); 5050b57cec5SDimitry Andric } 5060b57cec5SDimitry Andric 5070b57cec5SDimitry Andric void AArch64PassConfig::addIRPasses() { 5080b57cec5SDimitry Andric // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg 5090b57cec5SDimitry Andric // ourselves. 5100b57cec5SDimitry Andric addPass(createAtomicExpandPass()); 5110b57cec5SDimitry Andric 5125ffd83dbSDimitry Andric // Expand any SVE vector library calls that we can't code generate directly. 5135ffd83dbSDimitry Andric if (EnableSVEIntrinsicOpts && TM->getOptLevel() == CodeGenOpt::Aggressive) 5145ffd83dbSDimitry Andric addPass(createSVEIntrinsicOptsPass()); 5155ffd83dbSDimitry Andric 5160b57cec5SDimitry Andric // Cmpxchg instructions are often used with a subsequent comparison to 5170b57cec5SDimitry Andric // determine whether it succeeded. We can exploit existing control-flow in 5180b57cec5SDimitry Andric // ldrex/strex loops to simplify this, but it needs tidying up. 5190b57cec5SDimitry Andric if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) 520e8d8bef9SDimitry Andric addPass(createCFGSimplificationPass(SimplifyCFGOptions() 521e8d8bef9SDimitry Andric .forwardSwitchCondToPhi(true) 522e8d8bef9SDimitry Andric .convertSwitchToLookupTable(true) 523e8d8bef9SDimitry Andric .needCanonicalLoops(false) 524e8d8bef9SDimitry Andric .hoistCommonInsts(true) 525e8d8bef9SDimitry Andric .sinkCommonInsts(true))); 5260b57cec5SDimitry Andric 5270b57cec5SDimitry Andric // Run LoopDataPrefetch 5280b57cec5SDimitry Andric // 5290b57cec5SDimitry Andric // Run this before LSR to remove the multiplies involved in computing the 5300b57cec5SDimitry Andric // pointer values N iterations ahead. 5310b57cec5SDimitry Andric if (TM->getOptLevel() != CodeGenOpt::None) { 5320b57cec5SDimitry Andric if (EnableLoopDataPrefetch) 5330b57cec5SDimitry Andric addPass(createLoopDataPrefetchPass()); 5340b57cec5SDimitry Andric if (EnableFalkorHWPFFix) 5350b57cec5SDimitry Andric addPass(createFalkorMarkStridedAccessesPass()); 5360b57cec5SDimitry Andric } 5370b57cec5SDimitry Andric 5380b57cec5SDimitry Andric TargetPassConfig::addIRPasses(); 5390b57cec5SDimitry Andric 5405ffd83dbSDimitry Andric addPass(createAArch64StackTaggingPass( 5415ffd83dbSDimitry Andric /*IsOptNone=*/TM->getOptLevel() == CodeGenOpt::None)); 5425ffd83dbSDimitry Andric 5430b57cec5SDimitry Andric // Match interleaved memory accesses to ldN/stN intrinsics. 5440b57cec5SDimitry Andric if (TM->getOptLevel() != CodeGenOpt::None) { 5450b57cec5SDimitry Andric addPass(createInterleavedLoadCombinePass()); 5460b57cec5SDimitry Andric addPass(createInterleavedAccessPass()); 5470b57cec5SDimitry Andric } 5480b57cec5SDimitry Andric 5490b57cec5SDimitry Andric if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) { 5500b57cec5SDimitry Andric // Call SeparateConstOffsetFromGEP pass to extract constants within indices 5510b57cec5SDimitry Andric // and lower a GEP with multiple indices to either arithmetic operations or 5520b57cec5SDimitry Andric // multiple GEPs with single index. 5530b57cec5SDimitry Andric addPass(createSeparateConstOffsetFromGEPPass(true)); 5540b57cec5SDimitry Andric // Call EarlyCSE pass to find and remove subexpressions in the lowered 5550b57cec5SDimitry Andric // result. 5560b57cec5SDimitry Andric addPass(createEarlyCSEPass()); 5570b57cec5SDimitry Andric // Do loop invariant code motion in case part of the lowered result is 5580b57cec5SDimitry Andric // invariant. 5590b57cec5SDimitry Andric addPass(createLICMPass()); 5600b57cec5SDimitry Andric } 5610b57cec5SDimitry Andric 562480093f4SDimitry Andric // Add Control Flow Guard checks. 563480093f4SDimitry Andric if (TM->getTargetTriple().isOSWindows()) 564480093f4SDimitry Andric addPass(createCFGuardCheckPass()); 5650b57cec5SDimitry Andric } 5660b57cec5SDimitry Andric 5670b57cec5SDimitry Andric // Pass Pipeline Configuration 5680b57cec5SDimitry Andric bool AArch64PassConfig::addPreISel() { 5690b57cec5SDimitry Andric // Run promote constant before global merge, so that the promoted constants 5700b57cec5SDimitry Andric // get a chance to be merged 5710b57cec5SDimitry Andric if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant) 5720b57cec5SDimitry Andric addPass(createAArch64PromoteConstantPass()); 5730b57cec5SDimitry Andric // FIXME: On AArch64, this depends on the type. 5740b57cec5SDimitry Andric // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes(). 5750b57cec5SDimitry Andric // and the offset has to be a multiple of the related size in bytes. 5760b57cec5SDimitry Andric if ((TM->getOptLevel() != CodeGenOpt::None && 5770b57cec5SDimitry Andric EnableGlobalMerge == cl::BOU_UNSET) || 5780b57cec5SDimitry Andric EnableGlobalMerge == cl::BOU_TRUE) { 5790b57cec5SDimitry Andric bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) && 5800b57cec5SDimitry Andric (EnableGlobalMerge == cl::BOU_UNSET); 5810b57cec5SDimitry Andric 5820b57cec5SDimitry Andric // Merging of extern globals is enabled by default on non-Mach-O as we 5830b57cec5SDimitry Andric // expect it to be generally either beneficial or harmless. On Mach-O it 5840b57cec5SDimitry Andric // is disabled as we emit the .subsections_via_symbols directive which 5850b57cec5SDimitry Andric // means that merging extern globals is not safe. 5860b57cec5SDimitry Andric bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO(); 5870b57cec5SDimitry Andric 5880b57cec5SDimitry Andric // FIXME: extern global merging is only enabled when we optimise for size 5890b57cec5SDimitry Andric // because there are some regressions with it also enabled for performance. 5900b57cec5SDimitry Andric if (!OnlyOptimizeForSize) 5910b57cec5SDimitry Andric MergeExternalByDefault = false; 5920b57cec5SDimitry Andric 5930b57cec5SDimitry Andric addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize, 5940b57cec5SDimitry Andric MergeExternalByDefault)); 5950b57cec5SDimitry Andric } 5960b57cec5SDimitry Andric 5970b57cec5SDimitry Andric return false; 5980b57cec5SDimitry Andric } 5990b57cec5SDimitry Andric 6000b57cec5SDimitry Andric bool AArch64PassConfig::addInstSelector() { 6010b57cec5SDimitry Andric addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel())); 6020b57cec5SDimitry Andric 6030b57cec5SDimitry Andric // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many 6040b57cec5SDimitry Andric // references to _TLS_MODULE_BASE_ as possible. 6050b57cec5SDimitry Andric if (TM->getTargetTriple().isOSBinFormatELF() && 6060b57cec5SDimitry Andric getOptLevel() != CodeGenOpt::None) 6070b57cec5SDimitry Andric addPass(createAArch64CleanupLocalDynamicTLSPass()); 6080b57cec5SDimitry Andric 6090b57cec5SDimitry Andric return false; 6100b57cec5SDimitry Andric } 6110b57cec5SDimitry Andric 6120b57cec5SDimitry Andric bool AArch64PassConfig::addIRTranslator() { 613e8d8bef9SDimitry Andric addPass(new IRTranslator(getOptLevel())); 6140b57cec5SDimitry Andric return false; 6150b57cec5SDimitry Andric } 6160b57cec5SDimitry Andric 6170b57cec5SDimitry Andric void AArch64PassConfig::addPreLegalizeMachineIR() { 618*fe6060f1SDimitry Andric if (getOptLevel() == CodeGenOpt::None) 619*fe6060f1SDimitry Andric addPass(createAArch64O0PreLegalizerCombiner()); 620*fe6060f1SDimitry Andric else 621*fe6060f1SDimitry Andric addPass(createAArch64PreLegalizerCombiner()); 6220b57cec5SDimitry Andric } 6230b57cec5SDimitry Andric 6240b57cec5SDimitry Andric bool AArch64PassConfig::addLegalizeMachineIR() { 6250b57cec5SDimitry Andric addPass(new Legalizer()); 6260b57cec5SDimitry Andric return false; 6270b57cec5SDimitry Andric } 6280b57cec5SDimitry Andric 6295ffd83dbSDimitry Andric void AArch64PassConfig::addPreRegBankSelect() { 6305ffd83dbSDimitry Andric bool IsOptNone = getOptLevel() == CodeGenOpt::None; 6315ffd83dbSDimitry Andric if (!IsOptNone) 632e8d8bef9SDimitry Andric addPass(createAArch64PostLegalizerCombiner(IsOptNone)); 633e8d8bef9SDimitry Andric addPass(createAArch64PostLegalizerLowering()); 6345ffd83dbSDimitry Andric } 6355ffd83dbSDimitry Andric 6360b57cec5SDimitry Andric bool AArch64PassConfig::addRegBankSelect() { 6370b57cec5SDimitry Andric addPass(new RegBankSelect()); 6380b57cec5SDimitry Andric return false; 6390b57cec5SDimitry Andric } 6400b57cec5SDimitry Andric 6410b57cec5SDimitry Andric void AArch64PassConfig::addPreGlobalInstructionSelect() { 6420b57cec5SDimitry Andric addPass(new Localizer()); 6430b57cec5SDimitry Andric } 6440b57cec5SDimitry Andric 6450b57cec5SDimitry Andric bool AArch64PassConfig::addGlobalInstructionSelect() { 646*fe6060f1SDimitry Andric addPass(new InstructionSelect(getOptLevel())); 647e8d8bef9SDimitry Andric if (getOptLevel() != CodeGenOpt::None) 648e8d8bef9SDimitry Andric addPass(createAArch64PostSelectOptimize()); 6490b57cec5SDimitry Andric return false; 6500b57cec5SDimitry Andric } 6510b57cec5SDimitry Andric 6520b57cec5SDimitry Andric bool AArch64PassConfig::addILPOpts() { 6530b57cec5SDimitry Andric if (EnableCondOpt) 6540b57cec5SDimitry Andric addPass(createAArch64ConditionOptimizerPass()); 6550b57cec5SDimitry Andric if (EnableCCMP) 6560b57cec5SDimitry Andric addPass(createAArch64ConditionalCompares()); 6570b57cec5SDimitry Andric if (EnableMCR) 6580b57cec5SDimitry Andric addPass(&MachineCombinerID); 6590b57cec5SDimitry Andric if (EnableCondBrTuning) 6600b57cec5SDimitry Andric addPass(createAArch64CondBrTuning()); 6610b57cec5SDimitry Andric if (EnableEarlyIfConversion) 6620b57cec5SDimitry Andric addPass(&EarlyIfConverterID); 6630b57cec5SDimitry Andric if (EnableStPairSuppress) 6640b57cec5SDimitry Andric addPass(createAArch64StorePairSuppressPass()); 6650b57cec5SDimitry Andric addPass(createAArch64SIMDInstrOptPass()); 6668bcb0991SDimitry Andric if (TM->getOptLevel() != CodeGenOpt::None) 6678bcb0991SDimitry Andric addPass(createAArch64StackTaggingPreRAPass()); 6680b57cec5SDimitry Andric return true; 6690b57cec5SDimitry Andric } 6700b57cec5SDimitry Andric 6710b57cec5SDimitry Andric void AArch64PassConfig::addPreRegAlloc() { 6720b57cec5SDimitry Andric // Change dead register definitions to refer to the zero register. 6730b57cec5SDimitry Andric if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination) 6740b57cec5SDimitry Andric addPass(createAArch64DeadRegisterDefinitions()); 6750b57cec5SDimitry Andric 6760b57cec5SDimitry Andric // Use AdvSIMD scalar instructions whenever profitable. 6770b57cec5SDimitry Andric if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) { 6780b57cec5SDimitry Andric addPass(createAArch64AdvSIMDScalar()); 6790b57cec5SDimitry Andric // The AdvSIMD pass may produce copies that can be rewritten to 680480093f4SDimitry Andric // be register coalescer friendly. 6810b57cec5SDimitry Andric addPass(&PeepholeOptimizerID); 6820b57cec5SDimitry Andric } 6830b57cec5SDimitry Andric } 6840b57cec5SDimitry Andric 6850b57cec5SDimitry Andric void AArch64PassConfig::addPostRegAlloc() { 6860b57cec5SDimitry Andric // Remove redundant copy instructions. 6870b57cec5SDimitry Andric if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination) 6880b57cec5SDimitry Andric addPass(createAArch64RedundantCopyEliminationPass()); 6890b57cec5SDimitry Andric 6900b57cec5SDimitry Andric if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc()) 6910b57cec5SDimitry Andric // Improve performance for some FP/SIMD code for A57. 6920b57cec5SDimitry Andric addPass(createAArch64A57FPLoadBalancing()); 6930b57cec5SDimitry Andric } 6940b57cec5SDimitry Andric 6950b57cec5SDimitry Andric void AArch64PassConfig::addPreSched2() { 696*fe6060f1SDimitry Andric // Lower homogeneous frame instructions 697*fe6060f1SDimitry Andric if (EnableHomogeneousPrologEpilog) 698*fe6060f1SDimitry Andric addPass(createAArch64LowerHomogeneousPrologEpilogPass()); 6990b57cec5SDimitry Andric // Expand some pseudo instructions to allow proper scheduling. 7000b57cec5SDimitry Andric addPass(createAArch64ExpandPseudoPass()); 7010b57cec5SDimitry Andric // Use load/store pair instructions when possible. 7020b57cec5SDimitry Andric if (TM->getOptLevel() != CodeGenOpt::None) { 7030b57cec5SDimitry Andric if (EnableLoadStoreOpt) 7040b57cec5SDimitry Andric addPass(createAArch64LoadStoreOptimizationPass()); 7050b57cec5SDimitry Andric } 7060b57cec5SDimitry Andric 7070b57cec5SDimitry Andric // The AArch64SpeculationHardeningPass destroys dominator tree and natural 7080b57cec5SDimitry Andric // loop info, which is needed for the FalkorHWPFFixPass and also later on. 7090b57cec5SDimitry Andric // Therefore, run the AArch64SpeculationHardeningPass before the 7100b57cec5SDimitry Andric // FalkorHWPFFixPass to avoid recomputing dominator tree and natural loop 7110b57cec5SDimitry Andric // info. 7120b57cec5SDimitry Andric addPass(createAArch64SpeculationHardeningPass()); 7130b57cec5SDimitry Andric 7145ffd83dbSDimitry Andric addPass(createAArch64IndirectThunks()); 7155ffd83dbSDimitry Andric addPass(createAArch64SLSHardeningPass()); 7165ffd83dbSDimitry Andric 7170b57cec5SDimitry Andric if (TM->getOptLevel() != CodeGenOpt::None) { 7180b57cec5SDimitry Andric if (EnableFalkorHWPFFix) 7190b57cec5SDimitry Andric addPass(createFalkorHWPFFixPass()); 7200b57cec5SDimitry Andric } 7210b57cec5SDimitry Andric } 7220b57cec5SDimitry Andric 7230b57cec5SDimitry Andric void AArch64PassConfig::addPreEmitPass() { 7240b57cec5SDimitry Andric // Machine Block Placement might have created new opportunities when run 7250b57cec5SDimitry Andric // at O3, where the Tail Duplication Threshold is set to 4 instructions. 7260b57cec5SDimitry Andric // Run the load/store optimizer once more. 7270b57cec5SDimitry Andric if (TM->getOptLevel() >= CodeGenOpt::Aggressive && EnableLoadStoreOpt) 7280b57cec5SDimitry Andric addPass(createAArch64LoadStoreOptimizationPass()); 7290b57cec5SDimitry Andric 7300b57cec5SDimitry Andric if (EnableA53Fix835769) 7310b57cec5SDimitry Andric addPass(createAArch64A53Fix835769()); 732480093f4SDimitry Andric 733480093f4SDimitry Andric if (EnableBranchTargets) 734480093f4SDimitry Andric addPass(createAArch64BranchTargetsPass()); 735480093f4SDimitry Andric 7360b57cec5SDimitry Andric // Relax conditional branch instructions if they're otherwise out of 7370b57cec5SDimitry Andric // range of their destination. 7380b57cec5SDimitry Andric if (BranchRelaxation) 7390b57cec5SDimitry Andric addPass(&BranchRelaxationPassID); 7400b57cec5SDimitry Andric 741*fe6060f1SDimitry Andric if (TM->getTargetTriple().isOSWindows()) { 742480093f4SDimitry Andric // Identify valid longjmp targets for Windows Control Flow Guard. 743480093f4SDimitry Andric addPass(createCFGuardLongjmpPass()); 744*fe6060f1SDimitry Andric // Identify valid eh continuation targets for Windows EHCont Guard. 745*fe6060f1SDimitry Andric addPass(createEHContGuardCatchretPass()); 746*fe6060f1SDimitry Andric } 7470b57cec5SDimitry Andric 7480b57cec5SDimitry Andric if (TM->getOptLevel() != CodeGenOpt::None && EnableCompressJumpTables) 7490b57cec5SDimitry Andric addPass(createAArch64CompressJumpTablesPass()); 7500b57cec5SDimitry Andric 7510b57cec5SDimitry Andric if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH && 7520b57cec5SDimitry Andric TM->getTargetTriple().isOSBinFormatMachO()) 7530b57cec5SDimitry Andric addPass(createAArch64CollectLOHPass()); 754*fe6060f1SDimitry Andric } 7555ffd83dbSDimitry Andric 756*fe6060f1SDimitry Andric void AArch64PassConfig::addPreEmitPass2() { 757*fe6060f1SDimitry Andric // SVE bundles move prefixes with destructive operations. BLR_RVMARKER pseudo 758*fe6060f1SDimitry Andric // instructions are lowered to bundles as well. 7595ffd83dbSDimitry Andric addPass(createUnpackMachineBundles(nullptr)); 7605ffd83dbSDimitry Andric } 7615ffd83dbSDimitry Andric 7625ffd83dbSDimitry Andric yaml::MachineFunctionInfo * 7635ffd83dbSDimitry Andric AArch64TargetMachine::createDefaultFuncInfoYAML() const { 7645ffd83dbSDimitry Andric return new yaml::AArch64FunctionInfo(); 7655ffd83dbSDimitry Andric } 7665ffd83dbSDimitry Andric 7675ffd83dbSDimitry Andric yaml::MachineFunctionInfo * 7685ffd83dbSDimitry Andric AArch64TargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const { 7695ffd83dbSDimitry Andric const auto *MFI = MF.getInfo<AArch64FunctionInfo>(); 7705ffd83dbSDimitry Andric return new yaml::AArch64FunctionInfo(*MFI); 7715ffd83dbSDimitry Andric } 7725ffd83dbSDimitry Andric 7735ffd83dbSDimitry Andric bool AArch64TargetMachine::parseMachineFunctionInfo( 7745ffd83dbSDimitry Andric const yaml::MachineFunctionInfo &MFI, PerFunctionMIParsingState &PFS, 7755ffd83dbSDimitry Andric SMDiagnostic &Error, SMRange &SourceRange) const { 7765ffd83dbSDimitry Andric const auto &YamlMFI = 7775ffd83dbSDimitry Andric reinterpret_cast<const yaml::AArch64FunctionInfo &>(MFI); 7785ffd83dbSDimitry Andric MachineFunction &MF = PFS.MF; 7795ffd83dbSDimitry Andric MF.getInfo<AArch64FunctionInfo>()->initializeBaseYamlFields(YamlMFI); 7805ffd83dbSDimitry Andric return false; 7810b57cec5SDimitry Andric } 782