1 //===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/Analysis/TargetTransformInfo.h" 10 #include "llvm/Analysis/CFG.h" 11 #include "llvm/Analysis/LoopIterator.h" 12 #include "llvm/Analysis/TargetTransformInfoImpl.h" 13 #include "llvm/IR/CFG.h" 14 #include "llvm/IR/Dominators.h" 15 #include "llvm/IR/Instruction.h" 16 #include "llvm/IR/Instructions.h" 17 #include "llvm/IR/IntrinsicInst.h" 18 #include "llvm/IR/Module.h" 19 #include "llvm/IR/Operator.h" 20 #include "llvm/IR/PatternMatch.h" 21 #include "llvm/InitializePasses.h" 22 #include "llvm/Support/CommandLine.h" 23 #include <optional> 24 #include <utility> 25 26 using namespace llvm; 27 using namespace PatternMatch; 28 29 #define DEBUG_TYPE "tti" 30 31 static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false), 32 cl::Hidden, 33 cl::desc("Recognize reduction patterns.")); 34 35 static cl::opt<unsigned> CacheLineSize( 36 "cache-line-size", cl::init(0), cl::Hidden, 37 cl::desc("Use this to override the target cache line size when " 38 "specified by the user.")); 39 40 namespace { 41 /// No-op implementation of the TTI interface using the utility base 42 /// classes. 43 /// 44 /// This is used when no target specific information is available. 45 struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> { 46 explicit NoTTIImpl(const DataLayout &DL) 47 : TargetTransformInfoImplCRTPBase<NoTTIImpl>(DL) {} 48 }; 49 } // namespace 50 51 bool HardwareLoopInfo::canAnalyze(LoopInfo &LI) { 52 // If the loop has irreducible control flow, it can not be converted to 53 // Hardware loop. 54 LoopBlocksRPO RPOT(L); 55 RPOT.perform(&LI); 56 if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI)) 57 return false; 58 return true; 59 } 60 61 IntrinsicCostAttributes::IntrinsicCostAttributes( 62 Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarizationCost, 63 bool TypeBasedOnly) 64 : II(dyn_cast<IntrinsicInst>(&CI)), RetTy(CI.getType()), IID(Id), 65 ScalarizationCost(ScalarizationCost) { 66 67 if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI)) 68 FMF = FPMO->getFastMathFlags(); 69 70 if (!TypeBasedOnly) 71 Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end()); 72 FunctionType *FTy = CI.getCalledFunction()->getFunctionType(); 73 ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end()); 74 } 75 76 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy, 77 ArrayRef<Type *> Tys, 78 FastMathFlags Flags, 79 const IntrinsicInst *I, 80 InstructionCost ScalarCost) 81 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) { 82 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end()); 83 } 84 85 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *Ty, 86 ArrayRef<const Value *> Args) 87 : RetTy(Ty), IID(Id) { 88 89 Arguments.insert(Arguments.begin(), Args.begin(), Args.end()); 90 ParamTys.reserve(Arguments.size()); 91 for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx) 92 ParamTys.push_back(Arguments[Idx]->getType()); 93 } 94 95 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy, 96 ArrayRef<const Value *> Args, 97 ArrayRef<Type *> Tys, 98 FastMathFlags Flags, 99 const IntrinsicInst *I, 100 InstructionCost ScalarCost) 101 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) { 102 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end()); 103 Arguments.insert(Arguments.begin(), Args.begin(), Args.end()); 104 } 105 106 bool HardwareLoopInfo::isHardwareLoopCandidate(ScalarEvolution &SE, 107 LoopInfo &LI, DominatorTree &DT, 108 bool ForceNestedLoop, 109 bool ForceHardwareLoopPHI) { 110 SmallVector<BasicBlock *, 4> ExitingBlocks; 111 L->getExitingBlocks(ExitingBlocks); 112 113 for (BasicBlock *BB : ExitingBlocks) { 114 // If we pass the updated counter back through a phi, we need to know 115 // which latch the updated value will be coming from. 116 if (!L->isLoopLatch(BB)) { 117 if (ForceHardwareLoopPHI || CounterInReg) 118 continue; 119 } 120 121 const SCEV *EC = SE.getExitCount(L, BB); 122 if (isa<SCEVCouldNotCompute>(EC)) 123 continue; 124 if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) { 125 if (ConstEC->getValue()->isZero()) 126 continue; 127 } else if (!SE.isLoopInvariant(EC, L)) 128 continue; 129 130 if (SE.getTypeSizeInBits(EC->getType()) > CountType->getBitWidth()) 131 continue; 132 133 // If this exiting block is contained in a nested loop, it is not eligible 134 // for insertion of the branch-and-decrement since the inner loop would 135 // end up messing up the value in the CTR. 136 if (!IsNestingLegal && LI.getLoopFor(BB) != L && !ForceNestedLoop) 137 continue; 138 139 // We now have a loop-invariant count of loop iterations (which is not the 140 // constant zero) for which we know that this loop will not exit via this 141 // existing block. 142 143 // We need to make sure that this block will run on every loop iteration. 144 // For this to be true, we must dominate all blocks with backedges. Such 145 // blocks are in-loop predecessors to the header block. 146 bool NotAlways = false; 147 for (BasicBlock *Pred : predecessors(L->getHeader())) { 148 if (!L->contains(Pred)) 149 continue; 150 151 if (!DT.dominates(BB, Pred)) { 152 NotAlways = true; 153 break; 154 } 155 } 156 157 if (NotAlways) 158 continue; 159 160 // Make sure this blocks ends with a conditional branch. 161 Instruction *TI = BB->getTerminator(); 162 if (!TI) 163 continue; 164 165 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 166 if (!BI->isConditional()) 167 continue; 168 169 ExitBranch = BI; 170 } else 171 continue; 172 173 // Note that this block may not be the loop latch block, even if the loop 174 // has a latch block. 175 ExitBlock = BB; 176 ExitCount = EC; 177 break; 178 } 179 180 if (!ExitBlock) 181 return false; 182 return true; 183 } 184 185 TargetTransformInfo::TargetTransformInfo(const DataLayout &DL) 186 : TTIImpl(new Model<NoTTIImpl>(NoTTIImpl(DL))) {} 187 188 TargetTransformInfo::~TargetTransformInfo() = default; 189 190 TargetTransformInfo::TargetTransformInfo(TargetTransformInfo &&Arg) 191 : TTIImpl(std::move(Arg.TTIImpl)) {} 192 193 TargetTransformInfo &TargetTransformInfo::operator=(TargetTransformInfo &&RHS) { 194 TTIImpl = std::move(RHS.TTIImpl); 195 return *this; 196 } 197 198 unsigned TargetTransformInfo::getInliningThresholdMultiplier() const { 199 return TTIImpl->getInliningThresholdMultiplier(); 200 } 201 202 unsigned 203 TargetTransformInfo::adjustInliningThreshold(const CallBase *CB) const { 204 return TTIImpl->adjustInliningThreshold(CB); 205 } 206 207 int TargetTransformInfo::getInlinerVectorBonusPercent() const { 208 return TTIImpl->getInlinerVectorBonusPercent(); 209 } 210 211 InstructionCost 212 TargetTransformInfo::getGEPCost(Type *PointeeType, const Value *Ptr, 213 ArrayRef<const Value *> Operands, 214 TTI::TargetCostKind CostKind) const { 215 return TTIImpl->getGEPCost(PointeeType, Ptr, Operands, CostKind); 216 } 217 218 unsigned TargetTransformInfo::getEstimatedNumberOfCaseClusters( 219 const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI, 220 BlockFrequencyInfo *BFI) const { 221 return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI); 222 } 223 224 InstructionCost 225 TargetTransformInfo::getInstructionCost(const User *U, 226 ArrayRef<const Value *> Operands, 227 enum TargetCostKind CostKind) const { 228 InstructionCost Cost = TTIImpl->getInstructionCost(U, Operands, CostKind); 229 assert((CostKind == TTI::TCK_RecipThroughput || Cost >= 0) && 230 "TTI should not produce negative costs!"); 231 return Cost; 232 } 233 234 BranchProbability TargetTransformInfo::getPredictableBranchThreshold() const { 235 return TTIImpl->getPredictableBranchThreshold(); 236 } 237 238 bool TargetTransformInfo::hasBranchDivergence() const { 239 return TTIImpl->hasBranchDivergence(); 240 } 241 242 bool TargetTransformInfo::useGPUDivergenceAnalysis() const { 243 return TTIImpl->useGPUDivergenceAnalysis(); 244 } 245 246 bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const { 247 return TTIImpl->isSourceOfDivergence(V); 248 } 249 250 bool llvm::TargetTransformInfo::isAlwaysUniform(const Value *V) const { 251 return TTIImpl->isAlwaysUniform(V); 252 } 253 254 unsigned TargetTransformInfo::getFlatAddressSpace() const { 255 return TTIImpl->getFlatAddressSpace(); 256 } 257 258 bool TargetTransformInfo::collectFlatAddressOperands( 259 SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const { 260 return TTIImpl->collectFlatAddressOperands(OpIndexes, IID); 261 } 262 263 bool TargetTransformInfo::isNoopAddrSpaceCast(unsigned FromAS, 264 unsigned ToAS) const { 265 return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS); 266 } 267 268 bool TargetTransformInfo::canHaveNonUndefGlobalInitializerInAddressSpace( 269 unsigned AS) const { 270 return TTIImpl->canHaveNonUndefGlobalInitializerInAddressSpace(AS); 271 } 272 273 unsigned TargetTransformInfo::getAssumedAddrSpace(const Value *V) const { 274 return TTIImpl->getAssumedAddrSpace(V); 275 } 276 277 bool TargetTransformInfo::isSingleThreaded() const { 278 return TTIImpl->isSingleThreaded(); 279 } 280 281 std::pair<const Value *, unsigned> 282 TargetTransformInfo::getPredicatedAddrSpace(const Value *V) const { 283 return TTIImpl->getPredicatedAddrSpace(V); 284 } 285 286 Value *TargetTransformInfo::rewriteIntrinsicWithAddressSpace( 287 IntrinsicInst *II, Value *OldV, Value *NewV) const { 288 return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV); 289 } 290 291 bool TargetTransformInfo::isLoweredToCall(const Function *F) const { 292 return TTIImpl->isLoweredToCall(F); 293 } 294 295 bool TargetTransformInfo::isHardwareLoopProfitable( 296 Loop *L, ScalarEvolution &SE, AssumptionCache &AC, 297 TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const { 298 return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo); 299 } 300 301 bool TargetTransformInfo::preferPredicateOverEpilogue( 302 Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC, 303 TargetLibraryInfo *TLI, DominatorTree *DT, LoopVectorizationLegality *LVL, 304 InterleavedAccessInfo *IAI) const { 305 return TTIImpl->preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LVL, IAI); 306 } 307 308 PredicationStyle TargetTransformInfo::emitGetActiveLaneMask() const { 309 return TTIImpl->emitGetActiveLaneMask(); 310 } 311 312 std::optional<Instruction *> 313 TargetTransformInfo::instCombineIntrinsic(InstCombiner &IC, 314 IntrinsicInst &II) const { 315 return TTIImpl->instCombineIntrinsic(IC, II); 316 } 317 318 std::optional<Value *> TargetTransformInfo::simplifyDemandedUseBitsIntrinsic( 319 InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, 320 bool &KnownBitsComputed) const { 321 return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known, 322 KnownBitsComputed); 323 } 324 325 std::optional<Value *> TargetTransformInfo::simplifyDemandedVectorEltsIntrinsic( 326 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, 327 APInt &UndefElts2, APInt &UndefElts3, 328 std::function<void(Instruction *, unsigned, APInt, APInt &)> 329 SimplifyAndSetOp) const { 330 return TTIImpl->simplifyDemandedVectorEltsIntrinsic( 331 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3, 332 SimplifyAndSetOp); 333 } 334 335 void TargetTransformInfo::getUnrollingPreferences( 336 Loop *L, ScalarEvolution &SE, UnrollingPreferences &UP, 337 OptimizationRemarkEmitter *ORE) const { 338 return TTIImpl->getUnrollingPreferences(L, SE, UP, ORE); 339 } 340 341 void TargetTransformInfo::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 342 PeelingPreferences &PP) const { 343 return TTIImpl->getPeelingPreferences(L, SE, PP); 344 } 345 346 bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm) const { 347 return TTIImpl->isLegalAddImmediate(Imm); 348 } 349 350 bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm) const { 351 return TTIImpl->isLegalICmpImmediate(Imm); 352 } 353 354 bool TargetTransformInfo::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, 355 int64_t BaseOffset, 356 bool HasBaseReg, int64_t Scale, 357 unsigned AddrSpace, 358 Instruction *I) const { 359 return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, 360 Scale, AddrSpace, I); 361 } 362 363 bool TargetTransformInfo::isLSRCostLess(const LSRCost &C1, 364 const LSRCost &C2) const { 365 return TTIImpl->isLSRCostLess(C1, C2); 366 } 367 368 bool TargetTransformInfo::isNumRegsMajorCostOfLSR() const { 369 return TTIImpl->isNumRegsMajorCostOfLSR(); 370 } 371 372 bool TargetTransformInfo::isProfitableLSRChainElement(Instruction *I) const { 373 return TTIImpl->isProfitableLSRChainElement(I); 374 } 375 376 bool TargetTransformInfo::canMacroFuseCmp() const { 377 return TTIImpl->canMacroFuseCmp(); 378 } 379 380 bool TargetTransformInfo::canSaveCmp(Loop *L, BranchInst **BI, 381 ScalarEvolution *SE, LoopInfo *LI, 382 DominatorTree *DT, AssumptionCache *AC, 383 TargetLibraryInfo *LibInfo) const { 384 return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo); 385 } 386 387 TTI::AddressingModeKind 388 TargetTransformInfo::getPreferredAddressingMode(const Loop *L, 389 ScalarEvolution *SE) const { 390 return TTIImpl->getPreferredAddressingMode(L, SE); 391 } 392 393 bool TargetTransformInfo::isLegalMaskedStore(Type *DataType, 394 Align Alignment) const { 395 return TTIImpl->isLegalMaskedStore(DataType, Alignment); 396 } 397 398 bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType, 399 Align Alignment) const { 400 return TTIImpl->isLegalMaskedLoad(DataType, Alignment); 401 } 402 403 bool TargetTransformInfo::isLegalNTStore(Type *DataType, 404 Align Alignment) const { 405 return TTIImpl->isLegalNTStore(DataType, Alignment); 406 } 407 408 bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const { 409 return TTIImpl->isLegalNTLoad(DataType, Alignment); 410 } 411 412 bool TargetTransformInfo::isLegalBroadcastLoad(Type *ElementTy, 413 ElementCount NumElements) const { 414 return TTIImpl->isLegalBroadcastLoad(ElementTy, NumElements); 415 } 416 417 bool TargetTransformInfo::isLegalMaskedGather(Type *DataType, 418 Align Alignment) const { 419 return TTIImpl->isLegalMaskedGather(DataType, Alignment); 420 } 421 422 bool TargetTransformInfo::isLegalAltInstr( 423 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, 424 const SmallBitVector &OpcodeMask) const { 425 return TTIImpl->isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask); 426 } 427 428 bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType, 429 Align Alignment) const { 430 return TTIImpl->isLegalMaskedScatter(DataType, Alignment); 431 } 432 433 bool TargetTransformInfo::forceScalarizeMaskedGather(VectorType *DataType, 434 Align Alignment) const { 435 return TTIImpl->forceScalarizeMaskedGather(DataType, Alignment); 436 } 437 438 bool TargetTransformInfo::forceScalarizeMaskedScatter(VectorType *DataType, 439 Align Alignment) const { 440 return TTIImpl->forceScalarizeMaskedScatter(DataType, Alignment); 441 } 442 443 bool TargetTransformInfo::isLegalMaskedCompressStore(Type *DataType) const { 444 return TTIImpl->isLegalMaskedCompressStore(DataType); 445 } 446 447 bool TargetTransformInfo::isLegalMaskedExpandLoad(Type *DataType) const { 448 return TTIImpl->isLegalMaskedExpandLoad(DataType); 449 } 450 451 bool TargetTransformInfo::enableOrderedReductions() const { 452 return TTIImpl->enableOrderedReductions(); 453 } 454 455 bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const { 456 return TTIImpl->hasDivRemOp(DataType, IsSigned); 457 } 458 459 bool TargetTransformInfo::hasVolatileVariant(Instruction *I, 460 unsigned AddrSpace) const { 461 return TTIImpl->hasVolatileVariant(I, AddrSpace); 462 } 463 464 bool TargetTransformInfo::prefersVectorizedAddressing() const { 465 return TTIImpl->prefersVectorizedAddressing(); 466 } 467 468 InstructionCost TargetTransformInfo::getScalingFactorCost( 469 Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, 470 int64_t Scale, unsigned AddrSpace) const { 471 InstructionCost Cost = TTIImpl->getScalingFactorCost( 472 Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace); 473 assert(Cost >= 0 && "TTI should not produce negative costs!"); 474 return Cost; 475 } 476 477 bool TargetTransformInfo::LSRWithInstrQueries() const { 478 return TTIImpl->LSRWithInstrQueries(); 479 } 480 481 bool TargetTransformInfo::isTruncateFree(Type *Ty1, Type *Ty2) const { 482 return TTIImpl->isTruncateFree(Ty1, Ty2); 483 } 484 485 bool TargetTransformInfo::isProfitableToHoist(Instruction *I) const { 486 return TTIImpl->isProfitableToHoist(I); 487 } 488 489 bool TargetTransformInfo::useAA() const { return TTIImpl->useAA(); } 490 491 bool TargetTransformInfo::isTypeLegal(Type *Ty) const { 492 return TTIImpl->isTypeLegal(Ty); 493 } 494 495 unsigned TargetTransformInfo::getRegUsageForType(Type *Ty) const { 496 return TTIImpl->getRegUsageForType(Ty); 497 } 498 499 bool TargetTransformInfo::shouldBuildLookupTables() const { 500 return TTIImpl->shouldBuildLookupTables(); 501 } 502 503 bool TargetTransformInfo::shouldBuildLookupTablesForConstant( 504 Constant *C) const { 505 return TTIImpl->shouldBuildLookupTablesForConstant(C); 506 } 507 508 bool TargetTransformInfo::shouldBuildRelLookupTables() const { 509 return TTIImpl->shouldBuildRelLookupTables(); 510 } 511 512 bool TargetTransformInfo::useColdCCForColdCall(Function &F) const { 513 return TTIImpl->useColdCCForColdCall(F); 514 } 515 516 InstructionCost TargetTransformInfo::getScalarizationOverhead( 517 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, 518 TTI::TargetCostKind CostKind) const { 519 return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract, 520 CostKind); 521 } 522 523 InstructionCost TargetTransformInfo::getOperandsScalarizationOverhead( 524 ArrayRef<const Value *> Args, ArrayRef<Type *> Tys, 525 TTI::TargetCostKind CostKind) const { 526 return TTIImpl->getOperandsScalarizationOverhead(Args, Tys, CostKind); 527 } 528 529 bool TargetTransformInfo::supportsEfficientVectorElementLoadStore() const { 530 return TTIImpl->supportsEfficientVectorElementLoadStore(); 531 } 532 533 bool TargetTransformInfo::supportsTailCalls() const { 534 return TTIImpl->supportsTailCalls(); 535 } 536 537 bool TargetTransformInfo::supportsTailCallFor(const CallBase *CB) const { 538 return TTIImpl->supportsTailCallFor(CB); 539 } 540 541 bool TargetTransformInfo::enableAggressiveInterleaving( 542 bool LoopHasReductions) const { 543 return TTIImpl->enableAggressiveInterleaving(LoopHasReductions); 544 } 545 546 TargetTransformInfo::MemCmpExpansionOptions 547 TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 548 return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp); 549 } 550 551 bool TargetTransformInfo::enableSelectOptimize() const { 552 return TTIImpl->enableSelectOptimize(); 553 } 554 555 bool TargetTransformInfo::enableInterleavedAccessVectorization() const { 556 return TTIImpl->enableInterleavedAccessVectorization(); 557 } 558 559 bool TargetTransformInfo::enableMaskedInterleavedAccessVectorization() const { 560 return TTIImpl->enableMaskedInterleavedAccessVectorization(); 561 } 562 563 bool TargetTransformInfo::isFPVectorizationPotentiallyUnsafe() const { 564 return TTIImpl->isFPVectorizationPotentiallyUnsafe(); 565 } 566 567 bool 568 TargetTransformInfo::allowsMisalignedMemoryAccesses(LLVMContext &Context, 569 unsigned BitWidth, 570 unsigned AddressSpace, 571 Align Alignment, 572 unsigned *Fast) const { 573 return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth, 574 AddressSpace, Alignment, Fast); 575 } 576 577 TargetTransformInfo::PopcntSupportKind 578 TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const { 579 return TTIImpl->getPopcntSupport(IntTyWidthInBit); 580 } 581 582 bool TargetTransformInfo::haveFastSqrt(Type *Ty) const { 583 return TTIImpl->haveFastSqrt(Ty); 584 } 585 586 bool TargetTransformInfo::isExpensiveToSpeculativelyExecute( 587 const Instruction *I) const { 588 return TTIImpl->isExpensiveToSpeculativelyExecute(I); 589 } 590 591 bool TargetTransformInfo::isFCmpOrdCheaperThanFCmpZero(Type *Ty) const { 592 return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty); 593 } 594 595 InstructionCost TargetTransformInfo::getFPOpCost(Type *Ty) const { 596 InstructionCost Cost = TTIImpl->getFPOpCost(Ty); 597 assert(Cost >= 0 && "TTI should not produce negative costs!"); 598 return Cost; 599 } 600 601 InstructionCost TargetTransformInfo::getIntImmCodeSizeCost(unsigned Opcode, 602 unsigned Idx, 603 const APInt &Imm, 604 Type *Ty) const { 605 InstructionCost Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty); 606 assert(Cost >= 0 && "TTI should not produce negative costs!"); 607 return Cost; 608 } 609 610 InstructionCost 611 TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty, 612 TTI::TargetCostKind CostKind) const { 613 InstructionCost Cost = TTIImpl->getIntImmCost(Imm, Ty, CostKind); 614 assert(Cost >= 0 && "TTI should not produce negative costs!"); 615 return Cost; 616 } 617 618 InstructionCost TargetTransformInfo::getIntImmCostInst( 619 unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, 620 TTI::TargetCostKind CostKind, Instruction *Inst) const { 621 InstructionCost Cost = 622 TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst); 623 assert(Cost >= 0 && "TTI should not produce negative costs!"); 624 return Cost; 625 } 626 627 InstructionCost 628 TargetTransformInfo::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 629 const APInt &Imm, Type *Ty, 630 TTI::TargetCostKind CostKind) const { 631 InstructionCost Cost = 632 TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind); 633 assert(Cost >= 0 && "TTI should not produce negative costs!"); 634 return Cost; 635 } 636 637 unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID) const { 638 return TTIImpl->getNumberOfRegisters(ClassID); 639 } 640 641 unsigned TargetTransformInfo::getRegisterClassForType(bool Vector, 642 Type *Ty) const { 643 return TTIImpl->getRegisterClassForType(Vector, Ty); 644 } 645 646 const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID) const { 647 return TTIImpl->getRegisterClassName(ClassID); 648 } 649 650 TypeSize TargetTransformInfo::getRegisterBitWidth( 651 TargetTransformInfo::RegisterKind K) const { 652 return TTIImpl->getRegisterBitWidth(K); 653 } 654 655 unsigned TargetTransformInfo::getMinVectorRegisterBitWidth() const { 656 return TTIImpl->getMinVectorRegisterBitWidth(); 657 } 658 659 std::optional<unsigned> TargetTransformInfo::getMaxVScale() const { 660 return TTIImpl->getMaxVScale(); 661 } 662 663 std::optional<unsigned> TargetTransformInfo::getVScaleForTuning() const { 664 return TTIImpl->getVScaleForTuning(); 665 } 666 667 bool TargetTransformInfo::shouldMaximizeVectorBandwidth( 668 TargetTransformInfo::RegisterKind K) const { 669 return TTIImpl->shouldMaximizeVectorBandwidth(K); 670 } 671 672 ElementCount TargetTransformInfo::getMinimumVF(unsigned ElemWidth, 673 bool IsScalable) const { 674 return TTIImpl->getMinimumVF(ElemWidth, IsScalable); 675 } 676 677 unsigned TargetTransformInfo::getMaximumVF(unsigned ElemWidth, 678 unsigned Opcode) const { 679 return TTIImpl->getMaximumVF(ElemWidth, Opcode); 680 } 681 682 unsigned TargetTransformInfo::getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, 683 Type *ScalarValTy) const { 684 return TTIImpl->getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy); 685 } 686 687 bool TargetTransformInfo::shouldConsiderAddressTypePromotion( 688 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const { 689 return TTIImpl->shouldConsiderAddressTypePromotion( 690 I, AllowPromotionWithoutCommonHeader); 691 } 692 693 unsigned TargetTransformInfo::getCacheLineSize() const { 694 return CacheLineSize.getNumOccurrences() > 0 ? CacheLineSize 695 : TTIImpl->getCacheLineSize(); 696 } 697 698 std::optional<unsigned> 699 TargetTransformInfo::getCacheSize(CacheLevel Level) const { 700 return TTIImpl->getCacheSize(Level); 701 } 702 703 std::optional<unsigned> 704 TargetTransformInfo::getCacheAssociativity(CacheLevel Level) const { 705 return TTIImpl->getCacheAssociativity(Level); 706 } 707 708 unsigned TargetTransformInfo::getPrefetchDistance() const { 709 return TTIImpl->getPrefetchDistance(); 710 } 711 712 unsigned TargetTransformInfo::getMinPrefetchStride( 713 unsigned NumMemAccesses, unsigned NumStridedMemAccesses, 714 unsigned NumPrefetches, bool HasCall) const { 715 return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses, 716 NumPrefetches, HasCall); 717 } 718 719 unsigned TargetTransformInfo::getMaxPrefetchIterationsAhead() const { 720 return TTIImpl->getMaxPrefetchIterationsAhead(); 721 } 722 723 bool TargetTransformInfo::enableWritePrefetching() const { 724 return TTIImpl->enableWritePrefetching(); 725 } 726 727 bool TargetTransformInfo::shouldPrefetchAddressSpace(unsigned AS) const { 728 return TTIImpl->shouldPrefetchAddressSpace(AS); 729 } 730 731 unsigned TargetTransformInfo::getMaxInterleaveFactor(unsigned VF) const { 732 return TTIImpl->getMaxInterleaveFactor(VF); 733 } 734 735 TargetTransformInfo::OperandValueInfo 736 TargetTransformInfo::getOperandInfo(const Value *V) { 737 OperandValueKind OpInfo = OK_AnyValue; 738 OperandValueProperties OpProps = OP_None; 739 740 if (isa<ConstantInt>(V) || isa<ConstantFP>(V)) { 741 if (const auto *CI = dyn_cast<ConstantInt>(V)) { 742 if (CI->getValue().isPowerOf2()) 743 OpProps = OP_PowerOf2; 744 else if (CI->getValue().isNegatedPowerOf2()) 745 OpProps = OP_NegatedPowerOf2; 746 } 747 return {OK_UniformConstantValue, OpProps}; 748 } 749 750 // A broadcast shuffle creates a uniform value. 751 // TODO: Add support for non-zero index broadcasts. 752 // TODO: Add support for different source vector width. 753 if (const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V)) 754 if (ShuffleInst->isZeroEltSplat()) 755 OpInfo = OK_UniformValue; 756 757 const Value *Splat = getSplatValue(V); 758 759 // Check for a splat of a constant or for a non uniform vector of constants 760 // and check if the constant(s) are all powers of two. 761 if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) { 762 OpInfo = OK_NonUniformConstantValue; 763 if (Splat) { 764 OpInfo = OK_UniformConstantValue; 765 if (auto *CI = dyn_cast<ConstantInt>(Splat)) { 766 if (CI->getValue().isPowerOf2()) 767 OpProps = OP_PowerOf2; 768 else if (CI->getValue().isNegatedPowerOf2()) 769 OpProps = OP_NegatedPowerOf2; 770 } 771 } else if (const auto *CDS = dyn_cast<ConstantDataSequential>(V)) { 772 bool AllPow2 = true, AllNegPow2 = true; 773 for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) { 774 if (auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(I))) { 775 AllPow2 &= CI->getValue().isPowerOf2(); 776 AllNegPow2 &= CI->getValue().isNegatedPowerOf2(); 777 if (AllPow2 || AllNegPow2) 778 continue; 779 } 780 AllPow2 = AllNegPow2 = false; 781 break; 782 } 783 OpProps = AllPow2 ? OP_PowerOf2 : OpProps; 784 OpProps = AllNegPow2 ? OP_NegatedPowerOf2 : OpProps; 785 } 786 } 787 788 // Check for a splat of a uniform value. This is not loop aware, so return 789 // true only for the obviously uniform cases (argument, globalvalue) 790 if (Splat && (isa<Argument>(Splat) || isa<GlobalValue>(Splat))) 791 OpInfo = OK_UniformValue; 792 793 return {OpInfo, OpProps}; 794 } 795 796 InstructionCost TargetTransformInfo::getArithmeticInstrCost( 797 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 798 OperandValueInfo Op1Info, OperandValueInfo Op2Info, 799 ArrayRef<const Value *> Args, const Instruction *CxtI) const { 800 InstructionCost Cost = 801 TTIImpl->getArithmeticInstrCost(Opcode, Ty, CostKind, 802 Op1Info, Op2Info, 803 Args, CxtI); 804 assert(Cost >= 0 && "TTI should not produce negative costs!"); 805 return Cost; 806 } 807 808 InstructionCost TargetTransformInfo::getShuffleCost( 809 ShuffleKind Kind, VectorType *Ty, ArrayRef<int> Mask, 810 TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, 811 ArrayRef<const Value *> Args) const { 812 InstructionCost Cost = 813 TTIImpl->getShuffleCost(Kind, Ty, Mask, CostKind, Index, SubTp, Args); 814 assert(Cost >= 0 && "TTI should not produce negative costs!"); 815 return Cost; 816 } 817 818 TTI::CastContextHint 819 TargetTransformInfo::getCastContextHint(const Instruction *I) { 820 if (!I) 821 return CastContextHint::None; 822 823 auto getLoadStoreKind = [](const Value *V, unsigned LdStOp, unsigned MaskedOp, 824 unsigned GatScatOp) { 825 const Instruction *I = dyn_cast<Instruction>(V); 826 if (!I) 827 return CastContextHint::None; 828 829 if (I->getOpcode() == LdStOp) 830 return CastContextHint::Normal; 831 832 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 833 if (II->getIntrinsicID() == MaskedOp) 834 return TTI::CastContextHint::Masked; 835 if (II->getIntrinsicID() == GatScatOp) 836 return TTI::CastContextHint::GatherScatter; 837 } 838 839 return TTI::CastContextHint::None; 840 }; 841 842 switch (I->getOpcode()) { 843 case Instruction::ZExt: 844 case Instruction::SExt: 845 case Instruction::FPExt: 846 return getLoadStoreKind(I->getOperand(0), Instruction::Load, 847 Intrinsic::masked_load, Intrinsic::masked_gather); 848 case Instruction::Trunc: 849 case Instruction::FPTrunc: 850 if (I->hasOneUse()) 851 return getLoadStoreKind(*I->user_begin(), Instruction::Store, 852 Intrinsic::masked_store, 853 Intrinsic::masked_scatter); 854 break; 855 default: 856 return CastContextHint::None; 857 } 858 859 return TTI::CastContextHint::None; 860 } 861 862 InstructionCost TargetTransformInfo::getCastInstrCost( 863 unsigned Opcode, Type *Dst, Type *Src, CastContextHint CCH, 864 TTI::TargetCostKind CostKind, const Instruction *I) const { 865 assert((I == nullptr || I->getOpcode() == Opcode) && 866 "Opcode should reflect passed instruction."); 867 InstructionCost Cost = 868 TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); 869 assert(Cost >= 0 && "TTI should not produce negative costs!"); 870 return Cost; 871 } 872 873 InstructionCost TargetTransformInfo::getExtractWithExtendCost( 874 unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) const { 875 InstructionCost Cost = 876 TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index); 877 assert(Cost >= 0 && "TTI should not produce negative costs!"); 878 return Cost; 879 } 880 881 InstructionCost TargetTransformInfo::getCFInstrCost( 882 unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I) const { 883 assert((I == nullptr || I->getOpcode() == Opcode) && 884 "Opcode should reflect passed instruction."); 885 InstructionCost Cost = TTIImpl->getCFInstrCost(Opcode, CostKind, I); 886 assert(Cost >= 0 && "TTI should not produce negative costs!"); 887 return Cost; 888 } 889 890 InstructionCost TargetTransformInfo::getCmpSelInstrCost( 891 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, 892 TTI::TargetCostKind CostKind, const Instruction *I) const { 893 assert((I == nullptr || I->getOpcode() == Opcode) && 894 "Opcode should reflect passed instruction."); 895 InstructionCost Cost = 896 TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 897 assert(Cost >= 0 && "TTI should not produce negative costs!"); 898 return Cost; 899 } 900 901 InstructionCost TargetTransformInfo::getVectorInstrCost( 902 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, 903 Value *Op0, Value *Op1) const { 904 // FIXME: Assert that Opcode is either InsertElement or ExtractElement. 905 // This is mentioned in the interface description and respected by all 906 // callers, but never asserted upon. 907 InstructionCost Cost = 908 TTIImpl->getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1); 909 assert(Cost >= 0 && "TTI should not produce negative costs!"); 910 return Cost; 911 } 912 913 InstructionCost 914 TargetTransformInfo::getVectorInstrCost(const Instruction &I, Type *Val, 915 TTI::TargetCostKind CostKind, 916 unsigned Index) const { 917 // FIXME: Assert that Opcode is either InsertElement or ExtractElement. 918 // This is mentioned in the interface description and respected by all 919 // callers, but never asserted upon. 920 InstructionCost Cost = TTIImpl->getVectorInstrCost(I, Val, CostKind, Index); 921 assert(Cost >= 0 && "TTI should not produce negative costs!"); 922 return Cost; 923 } 924 925 InstructionCost TargetTransformInfo::getReplicationShuffleCost( 926 Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, 927 TTI::TargetCostKind CostKind) { 928 InstructionCost Cost = TTIImpl->getReplicationShuffleCost( 929 EltTy, ReplicationFactor, VF, DemandedDstElts, CostKind); 930 assert(Cost >= 0 && "TTI should not produce negative costs!"); 931 return Cost; 932 } 933 934 InstructionCost TargetTransformInfo::getMemoryOpCost( 935 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, 936 TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo, 937 const Instruction *I) const { 938 assert((I == nullptr || I->getOpcode() == Opcode) && 939 "Opcode should reflect passed instruction."); 940 InstructionCost Cost = TTIImpl->getMemoryOpCost( 941 Opcode, Src, Alignment, AddressSpace, CostKind, OpInfo, I); 942 assert(Cost >= 0 && "TTI should not produce negative costs!"); 943 return Cost; 944 } 945 946 InstructionCost TargetTransformInfo::getMaskedMemoryOpCost( 947 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, 948 TTI::TargetCostKind CostKind) const { 949 InstructionCost Cost = TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment, 950 AddressSpace, CostKind); 951 assert(Cost >= 0 && "TTI should not produce negative costs!"); 952 return Cost; 953 } 954 955 InstructionCost TargetTransformInfo::getGatherScatterOpCost( 956 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, 957 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { 958 InstructionCost Cost = TTIImpl->getGatherScatterOpCost( 959 Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I); 960 assert(Cost >= 0 && "TTI should not produce negative costs!"); 961 return Cost; 962 } 963 964 InstructionCost TargetTransformInfo::getInterleavedMemoryOpCost( 965 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 966 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 967 bool UseMaskForCond, bool UseMaskForGaps) const { 968 InstructionCost Cost = TTIImpl->getInterleavedMemoryOpCost( 969 Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, CostKind, 970 UseMaskForCond, UseMaskForGaps); 971 assert(Cost >= 0 && "TTI should not produce negative costs!"); 972 return Cost; 973 } 974 975 InstructionCost 976 TargetTransformInfo::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 977 TTI::TargetCostKind CostKind) const { 978 InstructionCost Cost = TTIImpl->getIntrinsicInstrCost(ICA, CostKind); 979 assert(Cost >= 0 && "TTI should not produce negative costs!"); 980 return Cost; 981 } 982 983 InstructionCost 984 TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy, 985 ArrayRef<Type *> Tys, 986 TTI::TargetCostKind CostKind) const { 987 InstructionCost Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys, CostKind); 988 assert(Cost >= 0 && "TTI should not produce negative costs!"); 989 return Cost; 990 } 991 992 unsigned TargetTransformInfo::getNumberOfParts(Type *Tp) const { 993 return TTIImpl->getNumberOfParts(Tp); 994 } 995 996 InstructionCost 997 TargetTransformInfo::getAddressComputationCost(Type *Tp, ScalarEvolution *SE, 998 const SCEV *Ptr) const { 999 InstructionCost Cost = TTIImpl->getAddressComputationCost(Tp, SE, Ptr); 1000 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1001 return Cost; 1002 } 1003 1004 InstructionCost TargetTransformInfo::getMemcpyCost(const Instruction *I) const { 1005 InstructionCost Cost = TTIImpl->getMemcpyCost(I); 1006 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1007 return Cost; 1008 } 1009 1010 InstructionCost TargetTransformInfo::getArithmeticReductionCost( 1011 unsigned Opcode, VectorType *Ty, std::optional<FastMathFlags> FMF, 1012 TTI::TargetCostKind CostKind) const { 1013 InstructionCost Cost = 1014 TTIImpl->getArithmeticReductionCost(Opcode, Ty, FMF, CostKind); 1015 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1016 return Cost; 1017 } 1018 1019 InstructionCost TargetTransformInfo::getMinMaxReductionCost( 1020 VectorType *Ty, VectorType *CondTy, bool IsUnsigned, 1021 TTI::TargetCostKind CostKind) const { 1022 InstructionCost Cost = 1023 TTIImpl->getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind); 1024 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1025 return Cost; 1026 } 1027 1028 InstructionCost TargetTransformInfo::getExtendedReductionCost( 1029 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, 1030 std::optional<FastMathFlags> FMF, TTI::TargetCostKind CostKind) const { 1031 return TTIImpl->getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty, FMF, 1032 CostKind); 1033 } 1034 1035 InstructionCost TargetTransformInfo::getMulAccReductionCost( 1036 bool IsUnsigned, Type *ResTy, VectorType *Ty, 1037 TTI::TargetCostKind CostKind) const { 1038 return TTIImpl->getMulAccReductionCost(IsUnsigned, ResTy, Ty, CostKind); 1039 } 1040 1041 InstructionCost 1042 TargetTransformInfo::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const { 1043 return TTIImpl->getCostOfKeepingLiveOverCall(Tys); 1044 } 1045 1046 bool TargetTransformInfo::getTgtMemIntrinsic(IntrinsicInst *Inst, 1047 MemIntrinsicInfo &Info) const { 1048 return TTIImpl->getTgtMemIntrinsic(Inst, Info); 1049 } 1050 1051 unsigned TargetTransformInfo::getAtomicMemIntrinsicMaxElementSize() const { 1052 return TTIImpl->getAtomicMemIntrinsicMaxElementSize(); 1053 } 1054 1055 Value *TargetTransformInfo::getOrCreateResultFromMemIntrinsic( 1056 IntrinsicInst *Inst, Type *ExpectedType) const { 1057 return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType); 1058 } 1059 1060 Type *TargetTransformInfo::getMemcpyLoopLoweringType( 1061 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, 1062 unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign, 1063 std::optional<uint32_t> AtomicElementSize) const { 1064 return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace, 1065 DestAddrSpace, SrcAlign, DestAlign, 1066 AtomicElementSize); 1067 } 1068 1069 void TargetTransformInfo::getMemcpyLoopResidualLoweringType( 1070 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context, 1071 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, 1072 unsigned SrcAlign, unsigned DestAlign, 1073 std::optional<uint32_t> AtomicCpySize) const { 1074 TTIImpl->getMemcpyLoopResidualLoweringType( 1075 OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign, 1076 DestAlign, AtomicCpySize); 1077 } 1078 1079 bool TargetTransformInfo::areInlineCompatible(const Function *Caller, 1080 const Function *Callee) const { 1081 return TTIImpl->areInlineCompatible(Caller, Callee); 1082 } 1083 1084 bool TargetTransformInfo::areTypesABICompatible( 1085 const Function *Caller, const Function *Callee, 1086 const ArrayRef<Type *> &Types) const { 1087 return TTIImpl->areTypesABICompatible(Caller, Callee, Types); 1088 } 1089 1090 bool TargetTransformInfo::isIndexedLoadLegal(MemIndexedMode Mode, 1091 Type *Ty) const { 1092 return TTIImpl->isIndexedLoadLegal(Mode, Ty); 1093 } 1094 1095 bool TargetTransformInfo::isIndexedStoreLegal(MemIndexedMode Mode, 1096 Type *Ty) const { 1097 return TTIImpl->isIndexedStoreLegal(Mode, Ty); 1098 } 1099 1100 unsigned TargetTransformInfo::getLoadStoreVecRegBitWidth(unsigned AS) const { 1101 return TTIImpl->getLoadStoreVecRegBitWidth(AS); 1102 } 1103 1104 bool TargetTransformInfo::isLegalToVectorizeLoad(LoadInst *LI) const { 1105 return TTIImpl->isLegalToVectorizeLoad(LI); 1106 } 1107 1108 bool TargetTransformInfo::isLegalToVectorizeStore(StoreInst *SI) const { 1109 return TTIImpl->isLegalToVectorizeStore(SI); 1110 } 1111 1112 bool TargetTransformInfo::isLegalToVectorizeLoadChain( 1113 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const { 1114 return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment, 1115 AddrSpace); 1116 } 1117 1118 bool TargetTransformInfo::isLegalToVectorizeStoreChain( 1119 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const { 1120 return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment, 1121 AddrSpace); 1122 } 1123 1124 bool TargetTransformInfo::isLegalToVectorizeReduction( 1125 const RecurrenceDescriptor &RdxDesc, ElementCount VF) const { 1126 return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF); 1127 } 1128 1129 bool TargetTransformInfo::isElementTypeLegalForScalableVector(Type *Ty) const { 1130 return TTIImpl->isElementTypeLegalForScalableVector(Ty); 1131 } 1132 1133 unsigned TargetTransformInfo::getLoadVectorFactor(unsigned VF, 1134 unsigned LoadSize, 1135 unsigned ChainSizeInBytes, 1136 VectorType *VecTy) const { 1137 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy); 1138 } 1139 1140 unsigned TargetTransformInfo::getStoreVectorFactor(unsigned VF, 1141 unsigned StoreSize, 1142 unsigned ChainSizeInBytes, 1143 VectorType *VecTy) const { 1144 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy); 1145 } 1146 1147 bool TargetTransformInfo::preferInLoopReduction(unsigned Opcode, Type *Ty, 1148 ReductionFlags Flags) const { 1149 return TTIImpl->preferInLoopReduction(Opcode, Ty, Flags); 1150 } 1151 1152 bool TargetTransformInfo::preferPredicatedReductionSelect( 1153 unsigned Opcode, Type *Ty, ReductionFlags Flags) const { 1154 return TTIImpl->preferPredicatedReductionSelect(Opcode, Ty, Flags); 1155 } 1156 1157 bool TargetTransformInfo::preferEpilogueVectorization() const { 1158 return TTIImpl->preferEpilogueVectorization(); 1159 } 1160 1161 TargetTransformInfo::VPLegalization 1162 TargetTransformInfo::getVPLegalizationStrategy(const VPIntrinsic &VPI) const { 1163 return TTIImpl->getVPLegalizationStrategy(VPI); 1164 } 1165 1166 bool TargetTransformInfo::shouldExpandReduction(const IntrinsicInst *II) const { 1167 return TTIImpl->shouldExpandReduction(II); 1168 } 1169 1170 unsigned TargetTransformInfo::getGISelRematGlobalCost() const { 1171 return TTIImpl->getGISelRematGlobalCost(); 1172 } 1173 1174 unsigned TargetTransformInfo::getMinTripCountTailFoldingThreshold() const { 1175 return TTIImpl->getMinTripCountTailFoldingThreshold(); 1176 } 1177 1178 bool TargetTransformInfo::supportsScalableVectors() const { 1179 return TTIImpl->supportsScalableVectors(); 1180 } 1181 1182 bool TargetTransformInfo::enableScalableVectorization() const { 1183 return TTIImpl->enableScalableVectorization(); 1184 } 1185 1186 bool TargetTransformInfo::hasActiveVectorLength(unsigned Opcode, Type *DataType, 1187 Align Alignment) const { 1188 return TTIImpl->hasActiveVectorLength(Opcode, DataType, Alignment); 1189 } 1190 1191 TargetTransformInfo::Concept::~Concept() = default; 1192 1193 TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {} 1194 1195 TargetIRAnalysis::TargetIRAnalysis( 1196 std::function<Result(const Function &)> TTICallback) 1197 : TTICallback(std::move(TTICallback)) {} 1198 1199 TargetIRAnalysis::Result TargetIRAnalysis::run(const Function &F, 1200 FunctionAnalysisManager &) { 1201 return TTICallback(F); 1202 } 1203 1204 AnalysisKey TargetIRAnalysis::Key; 1205 1206 TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(const Function &F) { 1207 return Result(F.getParent()->getDataLayout()); 1208 } 1209 1210 // Register the basic pass. 1211 INITIALIZE_PASS(TargetTransformInfoWrapperPass, "tti", 1212 "Target Transform Information", false, true) 1213 char TargetTransformInfoWrapperPass::ID = 0; 1214 1215 void TargetTransformInfoWrapperPass::anchor() {} 1216 1217 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass() 1218 : ImmutablePass(ID) { 1219 initializeTargetTransformInfoWrapperPassPass( 1220 *PassRegistry::getPassRegistry()); 1221 } 1222 1223 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass( 1224 TargetIRAnalysis TIRA) 1225 : ImmutablePass(ID), TIRA(std::move(TIRA)) { 1226 initializeTargetTransformInfoWrapperPassPass( 1227 *PassRegistry::getPassRegistry()); 1228 } 1229 1230 TargetTransformInfo &TargetTransformInfoWrapperPass::getTTI(const Function &F) { 1231 FunctionAnalysisManager DummyFAM; 1232 TTI = TIRA.run(F, DummyFAM); 1233 return *TTI; 1234 } 1235 1236 ImmutablePass * 1237 llvm::createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA) { 1238 return new TargetTransformInfoWrapperPass(std::move(TIRA)); 1239 } 1240