1 //===-- RISCVTargetMachine.cpp - Define TargetMachine for RISC-V ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Implements the info about RISC-V target spec. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "RISCVTargetMachine.h" 14 #include "MCTargetDesc/RISCVBaseInfo.h" 15 #include "RISCV.h" 16 #include "RISCVMachineFunctionInfo.h" 17 #include "RISCVTargetObjectFile.h" 18 #include "RISCVTargetTransformInfo.h" 19 #include "TargetInfo/RISCVTargetInfo.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 23 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 24 #include "llvm/CodeGen/GlobalISel/Legalizer.h" 25 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 26 #include "llvm/CodeGen/MIRParser/MIParser.h" 27 #include "llvm/CodeGen/MIRYamlMapping.h" 28 #include "llvm/CodeGen/MachineScheduler.h" 29 #include "llvm/CodeGen/MacroFusion.h" 30 #include "llvm/CodeGen/Passes.h" 31 #include "llvm/CodeGen/RegAllocRegistry.h" 32 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 33 #include "llvm/CodeGen/TargetPassConfig.h" 34 #include "llvm/InitializePasses.h" 35 #include "llvm/MC/TargetRegistry.h" 36 #include "llvm/Passes/PassBuilder.h" 37 #include "llvm/Support/FormattedStream.h" 38 #include "llvm/Target/TargetOptions.h" 39 #include "llvm/Transforms/IPO.h" 40 #include "llvm/Transforms/Scalar.h" 41 #include "llvm/Transforms/Vectorize/LoopIdiomVectorize.h" 42 #include <optional> 43 using namespace llvm; 44 45 static cl::opt<bool> EnableRedundantCopyElimination( 46 "riscv-enable-copyelim", 47 cl::desc("Enable the redundant copy elimination pass"), cl::init(true), 48 cl::Hidden); 49 50 // FIXME: Unify control over GlobalMerge. 51 static cl::opt<cl::boolOrDefault> 52 EnableGlobalMerge("riscv-enable-global-merge", cl::Hidden, 53 cl::desc("Enable the global merge pass")); 54 55 static cl::opt<bool> 56 EnableMachineCombiner("riscv-enable-machine-combiner", 57 cl::desc("Enable the machine combiner pass"), 58 cl::init(true), cl::Hidden); 59 60 static cl::opt<unsigned> RVVVectorBitsMaxOpt( 61 "riscv-v-vector-bits-max", 62 cl::desc("Assume V extension vector registers are at most this big, " 63 "with zero meaning no maximum size is assumed."), 64 cl::init(0), cl::Hidden); 65 66 static cl::opt<int> RVVVectorBitsMinOpt( 67 "riscv-v-vector-bits-min", 68 cl::desc("Assume V extension vector registers are at least this big, " 69 "with zero meaning no minimum size is assumed. A value of -1 " 70 "means use Zvl*b extension. This is primarily used to enable " 71 "autovectorization with fixed width vectors."), 72 cl::init(-1), cl::Hidden); 73 74 static cl::opt<bool> EnableRISCVCopyPropagation( 75 "riscv-enable-copy-propagation", 76 cl::desc("Enable the copy propagation with RISC-V copy instr"), 77 cl::init(true), cl::Hidden); 78 79 static cl::opt<bool> EnableRISCVDeadRegisterElimination( 80 "riscv-enable-dead-defs", cl::Hidden, 81 cl::desc("Enable the pass that removes dead" 82 " definitons and replaces stores to" 83 " them with stores to x0"), 84 cl::init(true)); 85 86 static cl::opt<bool> 87 EnableSinkFold("riscv-enable-sink-fold", 88 cl::desc("Enable sinking and folding of instruction copies"), 89 cl::init(true), cl::Hidden); 90 91 static cl::opt<bool> 92 EnableLoopDataPrefetch("riscv-enable-loop-data-prefetch", cl::Hidden, 93 cl::desc("Enable the loop data prefetch pass"), 94 cl::init(true)); 95 96 static cl::opt<bool> EnableMISchedLoadClustering( 97 "riscv-misched-load-clustering", cl::Hidden, 98 cl::desc("Enable load clustering in the machine scheduler"), 99 cl::init(false)); 100 101 static cl::opt<bool> EnableVSETVLIAfterRVVRegAlloc( 102 "riscv-vsetvl-after-rvv-regalloc", cl::Hidden, 103 cl::desc("Insert vsetvls after vector register allocation"), 104 cl::init(true)); 105 106 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() { 107 RegisterTargetMachine<RISCVTargetMachine> X(getTheRISCV32Target()); 108 RegisterTargetMachine<RISCVTargetMachine> Y(getTheRISCV64Target()); 109 auto *PR = PassRegistry::getPassRegistry(); 110 initializeGlobalISel(*PR); 111 initializeRISCVO0PreLegalizerCombinerPass(*PR); 112 initializeRISCVPreLegalizerCombinerPass(*PR); 113 initializeRISCVPostLegalizerCombinerPass(*PR); 114 initializeKCFIPass(*PR); 115 initializeRISCVDeadRegisterDefinitionsPass(*PR); 116 initializeRISCVMakeCompressibleOptPass(*PR); 117 initializeRISCVGatherScatterLoweringPass(*PR); 118 initializeRISCVCodeGenPreparePass(*PR); 119 initializeRISCVPostRAExpandPseudoPass(*PR); 120 initializeRISCVMergeBaseOffsetOptPass(*PR); 121 initializeRISCVOptWInstrsPass(*PR); 122 initializeRISCVPreRAExpandPseudoPass(*PR); 123 initializeRISCVExpandPseudoPass(*PR); 124 initializeRISCVVectorPeepholePass(*PR); 125 initializeRISCVInsertVSETVLIPass(*PR); 126 initializeRISCVInsertReadWriteCSRPass(*PR); 127 initializeRISCVInsertWriteVXRMPass(*PR); 128 initializeRISCVDAGToDAGISelLegacyPass(*PR); 129 initializeRISCVMoveMergePass(*PR); 130 initializeRISCVPushPopOptPass(*PR); 131 } 132 133 static StringRef computeDataLayout(const Triple &TT, 134 const TargetOptions &Options) { 135 StringRef ABIName = Options.MCOptions.getABIName(); 136 if (TT.isArch64Bit()) { 137 if (ABIName == "lp64e") 138 return "e-m:e-p:64:64-i64:64-i128:128-n32:64-S64"; 139 140 return "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"; 141 } 142 assert(TT.isArch32Bit() && "only RV32 and RV64 are currently supported"); 143 144 if (ABIName == "ilp32e") 145 return "e-m:e-p:32:32-i64:64-n32-S32"; 146 147 return "e-m:e-p:32:32-i64:64-n32-S128"; 148 } 149 150 static Reloc::Model getEffectiveRelocModel(const Triple &TT, 151 std::optional<Reloc::Model> RM) { 152 return RM.value_or(Reloc::Static); 153 } 154 155 RISCVTargetMachine::RISCVTargetMachine(const Target &T, const Triple &TT, 156 StringRef CPU, StringRef FS, 157 const TargetOptions &Options, 158 std::optional<Reloc::Model> RM, 159 std::optional<CodeModel::Model> CM, 160 CodeGenOptLevel OL, bool JIT) 161 : LLVMTargetMachine(T, computeDataLayout(TT, Options), TT, CPU, FS, Options, 162 getEffectiveRelocModel(TT, RM), 163 getEffectiveCodeModel(CM, CodeModel::Small), OL), 164 TLOF(std::make_unique<RISCVELFTargetObjectFile>()) { 165 initAsmInfo(); 166 167 // RISC-V supports the MachineOutliner. 168 setMachineOutliner(true); 169 setSupportsDefaultOutlining(true); 170 171 if (TT.isOSFuchsia() && !TT.isArch64Bit()) 172 report_fatal_error("Fuchsia is only supported for 64-bit"); 173 } 174 175 const RISCVSubtarget * 176 RISCVTargetMachine::getSubtargetImpl(const Function &F) const { 177 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 178 Attribute TuneAttr = F.getFnAttribute("tune-cpu"); 179 Attribute FSAttr = F.getFnAttribute("target-features"); 180 181 std::string CPU = 182 CPUAttr.isValid() ? CPUAttr.getValueAsString().str() : TargetCPU; 183 std::string TuneCPU = 184 TuneAttr.isValid() ? TuneAttr.getValueAsString().str() : CPU; 185 std::string FS = 186 FSAttr.isValid() ? FSAttr.getValueAsString().str() : TargetFS; 187 188 unsigned RVVBitsMin = RVVVectorBitsMinOpt; 189 unsigned RVVBitsMax = RVVVectorBitsMaxOpt; 190 191 Attribute VScaleRangeAttr = F.getFnAttribute(Attribute::VScaleRange); 192 if (VScaleRangeAttr.isValid()) { 193 if (!RVVVectorBitsMinOpt.getNumOccurrences()) 194 RVVBitsMin = VScaleRangeAttr.getVScaleRangeMin() * RISCV::RVVBitsPerBlock; 195 std::optional<unsigned> VScaleMax = VScaleRangeAttr.getVScaleRangeMax(); 196 if (VScaleMax.has_value() && !RVVVectorBitsMaxOpt.getNumOccurrences()) 197 RVVBitsMax = *VScaleMax * RISCV::RVVBitsPerBlock; 198 } 199 200 if (RVVBitsMin != -1U) { 201 // FIXME: Change to >= 32 when VLEN = 32 is supported. 202 assert((RVVBitsMin == 0 || (RVVBitsMin >= 64 && RVVBitsMin <= 65536 && 203 isPowerOf2_32(RVVBitsMin))) && 204 "V or Zve* extension requires vector length to be in the range of " 205 "64 to 65536 and a power 2!"); 206 assert((RVVBitsMax >= RVVBitsMin || RVVBitsMax == 0) && 207 "Minimum V extension vector length should not be larger than its " 208 "maximum!"); 209 } 210 assert((RVVBitsMax == 0 || (RVVBitsMax >= 64 && RVVBitsMax <= 65536 && 211 isPowerOf2_32(RVVBitsMax))) && 212 "V or Zve* extension requires vector length to be in the range of " 213 "64 to 65536 and a power 2!"); 214 215 if (RVVBitsMin != -1U) { 216 if (RVVBitsMax != 0) { 217 RVVBitsMin = std::min(RVVBitsMin, RVVBitsMax); 218 RVVBitsMax = std::max(RVVBitsMin, RVVBitsMax); 219 } 220 221 RVVBitsMin = llvm::bit_floor( 222 (RVVBitsMin < 64 || RVVBitsMin > 65536) ? 0 : RVVBitsMin); 223 } 224 RVVBitsMax = 225 llvm::bit_floor((RVVBitsMax < 64 || RVVBitsMax > 65536) ? 0 : RVVBitsMax); 226 227 SmallString<512> Key; 228 raw_svector_ostream(Key) << "RVVMin" << RVVBitsMin << "RVVMax" << RVVBitsMax 229 << CPU << TuneCPU << FS; 230 auto &I = SubtargetMap[Key]; 231 if (!I) { 232 // This needs to be done before we create a new subtarget since any 233 // creation will depend on the TM and the code generation flags on the 234 // function that reside in TargetOptions. 235 resetTargetOptions(F); 236 auto ABIName = Options.MCOptions.getABIName(); 237 if (const MDString *ModuleTargetABI = dyn_cast_or_null<MDString>( 238 F.getParent()->getModuleFlag("target-abi"))) { 239 auto TargetABI = RISCVABI::getTargetABI(ABIName); 240 if (TargetABI != RISCVABI::ABI_Unknown && 241 ModuleTargetABI->getString() != ABIName) { 242 report_fatal_error("-target-abi option != target-abi module flag"); 243 } 244 ABIName = ModuleTargetABI->getString(); 245 } 246 I = std::make_unique<RISCVSubtarget>( 247 TargetTriple, CPU, TuneCPU, FS, ABIName, RVVBitsMin, RVVBitsMax, *this); 248 } 249 return I.get(); 250 } 251 252 MachineFunctionInfo *RISCVTargetMachine::createMachineFunctionInfo( 253 BumpPtrAllocator &Allocator, const Function &F, 254 const TargetSubtargetInfo *STI) const { 255 return RISCVMachineFunctionInfo::create<RISCVMachineFunctionInfo>(Allocator, 256 F, STI); 257 } 258 259 TargetTransformInfo 260 RISCVTargetMachine::getTargetTransformInfo(const Function &F) const { 261 return TargetTransformInfo(RISCVTTIImpl(this, F)); 262 } 263 264 // A RISC-V hart has a single byte-addressable address space of 2^XLEN bytes 265 // for all memory accesses, so it is reasonable to assume that an 266 // implementation has no-op address space casts. If an implementation makes a 267 // change to this, they can override it here. 268 bool RISCVTargetMachine::isNoopAddrSpaceCast(unsigned SrcAS, 269 unsigned DstAS) const { 270 return true; 271 } 272 273 namespace { 274 275 class RVVRegisterRegAlloc : public RegisterRegAllocBase<RVVRegisterRegAlloc> { 276 public: 277 RVVRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C) 278 : RegisterRegAllocBase(N, D, C) {} 279 }; 280 281 static bool onlyAllocateRVVReg(const TargetRegisterInfo &TRI, 282 const MachineRegisterInfo &MRI, 283 const Register Reg) { 284 const TargetRegisterClass *RC = MRI.getRegClass(Reg); 285 return RISCVRegisterInfo::isRVVRegClass(RC); 286 } 287 288 static FunctionPass *useDefaultRegisterAllocator() { return nullptr; } 289 290 static llvm::once_flag InitializeDefaultRVVRegisterAllocatorFlag; 291 292 /// -riscv-rvv-regalloc=<fast|basic|greedy> command line option. 293 /// This option could designate the rvv register allocator only. 294 /// For example: -riscv-rvv-regalloc=basic 295 static cl::opt<RVVRegisterRegAlloc::FunctionPassCtor, false, 296 RegisterPassParser<RVVRegisterRegAlloc>> 297 RVVRegAlloc("riscv-rvv-regalloc", cl::Hidden, 298 cl::init(&useDefaultRegisterAllocator), 299 cl::desc("Register allocator to use for RVV register.")); 300 301 static void initializeDefaultRVVRegisterAllocatorOnce() { 302 RegisterRegAlloc::FunctionPassCtor Ctor = RVVRegisterRegAlloc::getDefault(); 303 304 if (!Ctor) { 305 Ctor = RVVRegAlloc; 306 RVVRegisterRegAlloc::setDefault(RVVRegAlloc); 307 } 308 } 309 310 static FunctionPass *createBasicRVVRegisterAllocator() { 311 return createBasicRegisterAllocator(onlyAllocateRVVReg); 312 } 313 314 static FunctionPass *createGreedyRVVRegisterAllocator() { 315 return createGreedyRegisterAllocator(onlyAllocateRVVReg); 316 } 317 318 static FunctionPass *createFastRVVRegisterAllocator() { 319 return createFastRegisterAllocator(onlyAllocateRVVReg, false); 320 } 321 322 static RVVRegisterRegAlloc basicRegAllocRVVReg("basic", 323 "basic register allocator", 324 createBasicRVVRegisterAllocator); 325 static RVVRegisterRegAlloc 326 greedyRegAllocRVVReg("greedy", "greedy register allocator", 327 createGreedyRVVRegisterAllocator); 328 329 static RVVRegisterRegAlloc fastRegAllocRVVReg("fast", "fast register allocator", 330 createFastRVVRegisterAllocator); 331 332 class RISCVPassConfig : public TargetPassConfig { 333 public: 334 RISCVPassConfig(RISCVTargetMachine &TM, PassManagerBase &PM) 335 : TargetPassConfig(TM, PM) { 336 if (TM.getOptLevel() != CodeGenOptLevel::None) 337 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID); 338 setEnableSinkAndFold(EnableSinkFold); 339 } 340 341 RISCVTargetMachine &getRISCVTargetMachine() const { 342 return getTM<RISCVTargetMachine>(); 343 } 344 345 ScheduleDAGInstrs * 346 createMachineScheduler(MachineSchedContext *C) const override { 347 ScheduleDAGMILive *DAG = nullptr; 348 if (EnableMISchedLoadClustering) { 349 DAG = createGenericSchedLive(C); 350 DAG->addMutation(createLoadClusterDAGMutation( 351 DAG->TII, DAG->TRI, /*ReorderWhileClustering=*/true)); 352 } 353 return DAG; 354 } 355 356 void addIRPasses() override; 357 bool addPreISel() override; 358 void addCodeGenPrepare() override; 359 bool addInstSelector() override; 360 bool addIRTranslator() override; 361 void addPreLegalizeMachineIR() override; 362 bool addLegalizeMachineIR() override; 363 void addPreRegBankSelect() override; 364 bool addRegBankSelect() override; 365 bool addGlobalInstructionSelect() override; 366 void addPreEmitPass() override; 367 void addPreEmitPass2() override; 368 void addPreSched2() override; 369 void addMachineSSAOptimization() override; 370 FunctionPass *createRVVRegAllocPass(bool Optimized); 371 bool addRegAssignAndRewriteFast() override; 372 bool addRegAssignAndRewriteOptimized() override; 373 void addPreRegAlloc() override; 374 void addPostRegAlloc() override; 375 void addFastRegAlloc() override; 376 }; 377 } // namespace 378 379 TargetPassConfig *RISCVTargetMachine::createPassConfig(PassManagerBase &PM) { 380 return new RISCVPassConfig(*this, PM); 381 } 382 383 FunctionPass *RISCVPassConfig::createRVVRegAllocPass(bool Optimized) { 384 // Initialize the global default. 385 llvm::call_once(InitializeDefaultRVVRegisterAllocatorFlag, 386 initializeDefaultRVVRegisterAllocatorOnce); 387 388 RegisterRegAlloc::FunctionPassCtor Ctor = RVVRegisterRegAlloc::getDefault(); 389 if (Ctor != useDefaultRegisterAllocator) 390 return Ctor(); 391 392 if (Optimized) 393 return createGreedyRVVRegisterAllocator(); 394 395 return createFastRVVRegisterAllocator(); 396 } 397 398 bool RISCVPassConfig::addRegAssignAndRewriteFast() { 399 addPass(createRVVRegAllocPass(false)); 400 if (EnableVSETVLIAfterRVVRegAlloc) 401 addPass(createRISCVInsertVSETVLIPass()); 402 if (TM->getOptLevel() != CodeGenOptLevel::None && 403 EnableRISCVDeadRegisterElimination) 404 addPass(createRISCVDeadRegisterDefinitionsPass()); 405 return TargetPassConfig::addRegAssignAndRewriteFast(); 406 } 407 408 bool RISCVPassConfig::addRegAssignAndRewriteOptimized() { 409 addPass(createRVVRegAllocPass(true)); 410 addPass(createVirtRegRewriter(false)); 411 if (EnableVSETVLIAfterRVVRegAlloc) 412 addPass(createRISCVInsertVSETVLIPass()); 413 if (TM->getOptLevel() != CodeGenOptLevel::None && 414 EnableRISCVDeadRegisterElimination) 415 addPass(createRISCVDeadRegisterDefinitionsPass()); 416 return TargetPassConfig::addRegAssignAndRewriteOptimized(); 417 } 418 419 void RISCVPassConfig::addIRPasses() { 420 addPass(createAtomicExpandLegacyPass()); 421 422 if (getOptLevel() != CodeGenOptLevel::None) { 423 if (EnableLoopDataPrefetch) 424 addPass(createLoopDataPrefetchPass()); 425 426 addPass(createRISCVGatherScatterLoweringPass()); 427 addPass(createInterleavedAccessPass()); 428 addPass(createRISCVCodeGenPreparePass()); 429 } 430 431 TargetPassConfig::addIRPasses(); 432 } 433 434 bool RISCVPassConfig::addPreISel() { 435 if (TM->getOptLevel() != CodeGenOptLevel::None) { 436 // Add a barrier before instruction selection so that we will not get 437 // deleted block address after enabling default outlining. See D99707 for 438 // more details. 439 addPass(createBarrierNoopPass()); 440 } 441 442 if (EnableGlobalMerge == cl::BOU_TRUE) { 443 addPass(createGlobalMergePass(TM, /* MaxOffset */ 2047, 444 /* OnlyOptimizeForSize */ false, 445 /* MergeExternalByDefault */ true)); 446 } 447 448 return false; 449 } 450 451 void RISCVPassConfig::addCodeGenPrepare() { 452 if (getOptLevel() != CodeGenOptLevel::None) 453 addPass(createTypePromotionLegacyPass()); 454 TargetPassConfig::addCodeGenPrepare(); 455 } 456 457 bool RISCVPassConfig::addInstSelector() { 458 addPass(createRISCVISelDag(getRISCVTargetMachine(), getOptLevel())); 459 460 return false; 461 } 462 463 bool RISCVPassConfig::addIRTranslator() { 464 addPass(new IRTranslator(getOptLevel())); 465 return false; 466 } 467 468 void RISCVPassConfig::addPreLegalizeMachineIR() { 469 if (getOptLevel() == CodeGenOptLevel::None) { 470 addPass(createRISCVO0PreLegalizerCombiner()); 471 } else { 472 addPass(createRISCVPreLegalizerCombiner()); 473 } 474 } 475 476 bool RISCVPassConfig::addLegalizeMachineIR() { 477 addPass(new Legalizer()); 478 return false; 479 } 480 481 void RISCVPassConfig::addPreRegBankSelect() { 482 if (getOptLevel() != CodeGenOptLevel::None) 483 addPass(createRISCVPostLegalizerCombiner()); 484 } 485 486 bool RISCVPassConfig::addRegBankSelect() { 487 addPass(new RegBankSelect()); 488 return false; 489 } 490 491 bool RISCVPassConfig::addGlobalInstructionSelect() { 492 addPass(new InstructionSelect(getOptLevel())); 493 return false; 494 } 495 496 void RISCVPassConfig::addPreSched2() { 497 addPass(createRISCVPostRAExpandPseudoPass()); 498 499 // Emit KCFI checks for indirect calls. 500 addPass(createKCFIPass()); 501 } 502 503 void RISCVPassConfig::addPreEmitPass() { 504 // TODO: It would potentially be better to schedule copy propagation after 505 // expanding pseudos (in addPreEmitPass2). However, performing copy 506 // propagation after the machine outliner (which runs after addPreEmitPass) 507 // currently leads to incorrect code-gen, where copies to registers within 508 // outlined functions are removed erroneously. 509 if (TM->getOptLevel() >= CodeGenOptLevel::Default && 510 EnableRISCVCopyPropagation) 511 addPass(createMachineCopyPropagationPass(true)); 512 addPass(&BranchRelaxationPassID); 513 addPass(createRISCVMakeCompressibleOptPass()); 514 } 515 516 void RISCVPassConfig::addPreEmitPass2() { 517 if (TM->getOptLevel() != CodeGenOptLevel::None) { 518 addPass(createRISCVMoveMergePass()); 519 // Schedule PushPop Optimization before expansion of Pseudo instruction, 520 // ensuring return instruction is detected correctly. 521 addPass(createRISCVPushPopOptimizationPass()); 522 } 523 addPass(createRISCVExpandPseudoPass()); 524 525 // Schedule the expansion of AMOs at the last possible moment, avoiding the 526 // possibility for other passes to break the requirements for forward 527 // progress in the LR/SC block. 528 addPass(createRISCVExpandAtomicPseudoPass()); 529 530 // KCFI indirect call checks are lowered to a bundle. 531 addPass(createUnpackMachineBundles([&](const MachineFunction &MF) { 532 return MF.getFunction().getParent()->getModuleFlag("kcfi"); 533 })); 534 } 535 536 void RISCVPassConfig::addMachineSSAOptimization() { 537 addPass(createRISCVVectorPeepholePass()); 538 539 TargetPassConfig::addMachineSSAOptimization(); 540 541 if (EnableMachineCombiner) 542 addPass(&MachineCombinerID); 543 544 if (TM->getTargetTriple().isRISCV64()) { 545 addPass(createRISCVOptWInstrsPass()); 546 } 547 } 548 549 void RISCVPassConfig::addPreRegAlloc() { 550 addPass(createRISCVPreRAExpandPseudoPass()); 551 if (TM->getOptLevel() != CodeGenOptLevel::None) 552 addPass(createRISCVMergeBaseOffsetOptPass()); 553 554 addPass(createRISCVInsertReadWriteCSRPass()); 555 addPass(createRISCVInsertWriteVXRMPass()); 556 557 // Run RISCVInsertVSETVLI after PHI elimination. On O1 and above do it after 558 // register coalescing so needVSETVLIPHI doesn't need to look through COPYs. 559 if (!EnableVSETVLIAfterRVVRegAlloc) { 560 if (TM->getOptLevel() == CodeGenOptLevel::None) 561 insertPass(&PHIEliminationID, &RISCVInsertVSETVLIID); 562 else 563 insertPass(&RegisterCoalescerID, &RISCVInsertVSETVLIID); 564 } 565 } 566 567 void RISCVPassConfig::addFastRegAlloc() { 568 addPass(&InitUndefID); 569 TargetPassConfig::addFastRegAlloc(); 570 } 571 572 573 void RISCVPassConfig::addPostRegAlloc() { 574 if (TM->getOptLevel() != CodeGenOptLevel::None && 575 EnableRedundantCopyElimination) 576 addPass(createRISCVRedundantCopyEliminationPass()); 577 } 578 579 void RISCVTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { 580 PB.registerLateLoopOptimizationsEPCallback([=](LoopPassManager &LPM, 581 OptimizationLevel Level) { 582 LPM.addPass(LoopIdiomVectorizePass(LoopIdiomVectorizeStyle::Predicated)); 583 }); 584 } 585 586 yaml::MachineFunctionInfo * 587 RISCVTargetMachine::createDefaultFuncInfoYAML() const { 588 return new yaml::RISCVMachineFunctionInfo(); 589 } 590 591 yaml::MachineFunctionInfo * 592 RISCVTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const { 593 const auto *MFI = MF.getInfo<RISCVMachineFunctionInfo>(); 594 return new yaml::RISCVMachineFunctionInfo(*MFI); 595 } 596 597 bool RISCVTargetMachine::parseMachineFunctionInfo( 598 const yaml::MachineFunctionInfo &MFI, PerFunctionMIParsingState &PFS, 599 SMDiagnostic &Error, SMRange &SourceRange) const { 600 const auto &YamlMFI = 601 static_cast<const yaml::RISCVMachineFunctionInfo &>(MFI); 602 PFS.MF.getInfo<RISCVMachineFunctionInfo>()->initializeBaseYamlFields(YamlMFI); 603 return false; 604 } 605