1 //===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/Analysis/TargetTransformInfo.h" 10 #include "llvm/Analysis/CFG.h" 11 #include "llvm/Analysis/LoopIterator.h" 12 #include "llvm/Analysis/TargetTransformInfoImpl.h" 13 #include "llvm/IR/CFG.h" 14 #include "llvm/IR/Dominators.h" 15 #include "llvm/IR/Instruction.h" 16 #include "llvm/IR/Instructions.h" 17 #include "llvm/IR/IntrinsicInst.h" 18 #include "llvm/IR/Module.h" 19 #include "llvm/IR/Operator.h" 20 #include "llvm/IR/PatternMatch.h" 21 #include "llvm/InitializePasses.h" 22 #include "llvm/Support/CommandLine.h" 23 #include <utility> 24 25 using namespace llvm; 26 using namespace PatternMatch; 27 28 #define DEBUG_TYPE "tti" 29 30 static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false), 31 cl::Hidden, 32 cl::desc("Recognize reduction patterns.")); 33 34 static cl::opt<unsigned> CacheLineSize( 35 "cache-line-size", cl::init(0), cl::Hidden, 36 cl::desc("Use this to override the target cache line size when " 37 "specified by the user.")); 38 39 namespace { 40 /// No-op implementation of the TTI interface using the utility base 41 /// classes. 42 /// 43 /// This is used when no target specific information is available. 44 struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> { 45 explicit NoTTIImpl(const DataLayout &DL) 46 : TargetTransformInfoImplCRTPBase<NoTTIImpl>(DL) {} 47 }; 48 } // namespace 49 50 bool HardwareLoopInfo::canAnalyze(LoopInfo &LI) { 51 // If the loop has irreducible control flow, it can not be converted to 52 // Hardware loop. 53 LoopBlocksRPO RPOT(L); 54 RPOT.perform(&LI); 55 if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI)) 56 return false; 57 return true; 58 } 59 60 IntrinsicCostAttributes::IntrinsicCostAttributes( 61 Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarizationCost, 62 bool TypeBasedOnly) 63 : II(dyn_cast<IntrinsicInst>(&CI)), RetTy(CI.getType()), IID(Id), 64 ScalarizationCost(ScalarizationCost) { 65 66 if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI)) 67 FMF = FPMO->getFastMathFlags(); 68 69 if (!TypeBasedOnly) 70 Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end()); 71 FunctionType *FTy = CI.getCalledFunction()->getFunctionType(); 72 ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end()); 73 } 74 75 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy, 76 ArrayRef<Type *> Tys, 77 FastMathFlags Flags, 78 const IntrinsicInst *I, 79 InstructionCost ScalarCost) 80 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) { 81 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end()); 82 } 83 84 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *Ty, 85 ArrayRef<const Value *> Args) 86 : RetTy(Ty), IID(Id) { 87 88 Arguments.insert(Arguments.begin(), Args.begin(), Args.end()); 89 ParamTys.reserve(Arguments.size()); 90 for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx) 91 ParamTys.push_back(Arguments[Idx]->getType()); 92 } 93 94 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy, 95 ArrayRef<const Value *> Args, 96 ArrayRef<Type *> Tys, 97 FastMathFlags Flags, 98 const IntrinsicInst *I, 99 InstructionCost ScalarCost) 100 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) { 101 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end()); 102 Arguments.insert(Arguments.begin(), Args.begin(), Args.end()); 103 } 104 105 bool HardwareLoopInfo::isHardwareLoopCandidate(ScalarEvolution &SE, 106 LoopInfo &LI, DominatorTree &DT, 107 bool ForceNestedLoop, 108 bool ForceHardwareLoopPHI) { 109 SmallVector<BasicBlock *, 4> ExitingBlocks; 110 L->getExitingBlocks(ExitingBlocks); 111 112 for (BasicBlock *BB : ExitingBlocks) { 113 // If we pass the updated counter back through a phi, we need to know 114 // which latch the updated value will be coming from. 115 if (!L->isLoopLatch(BB)) { 116 if (ForceHardwareLoopPHI || CounterInReg) 117 continue; 118 } 119 120 const SCEV *EC = SE.getExitCount(L, BB); 121 if (isa<SCEVCouldNotCompute>(EC)) 122 continue; 123 if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) { 124 if (ConstEC->getValue()->isZero()) 125 continue; 126 } else if (!SE.isLoopInvariant(EC, L)) 127 continue; 128 129 if (SE.getTypeSizeInBits(EC->getType()) > CountType->getBitWidth()) 130 continue; 131 132 // If this exiting block is contained in a nested loop, it is not eligible 133 // for insertion of the branch-and-decrement since the inner loop would 134 // end up messing up the value in the CTR. 135 if (!IsNestingLegal && LI.getLoopFor(BB) != L && !ForceNestedLoop) 136 continue; 137 138 // We now have a loop-invariant count of loop iterations (which is not the 139 // constant zero) for which we know that this loop will not exit via this 140 // existing block. 141 142 // We need to make sure that this block will run on every loop iteration. 143 // For this to be true, we must dominate all blocks with backedges. Such 144 // blocks are in-loop predecessors to the header block. 145 bool NotAlways = false; 146 for (BasicBlock *Pred : predecessors(L->getHeader())) { 147 if (!L->contains(Pred)) 148 continue; 149 150 if (!DT.dominates(BB, Pred)) { 151 NotAlways = true; 152 break; 153 } 154 } 155 156 if (NotAlways) 157 continue; 158 159 // Make sure this blocks ends with a conditional branch. 160 Instruction *TI = BB->getTerminator(); 161 if (!TI) 162 continue; 163 164 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 165 if (!BI->isConditional()) 166 continue; 167 168 ExitBranch = BI; 169 } else 170 continue; 171 172 // Note that this block may not be the loop latch block, even if the loop 173 // has a latch block. 174 ExitBlock = BB; 175 ExitCount = EC; 176 break; 177 } 178 179 if (!ExitBlock) 180 return false; 181 return true; 182 } 183 184 TargetTransformInfo::TargetTransformInfo(const DataLayout &DL) 185 : TTIImpl(new Model<NoTTIImpl>(NoTTIImpl(DL))) {} 186 187 TargetTransformInfo::~TargetTransformInfo() = default; 188 189 TargetTransformInfo::TargetTransformInfo(TargetTransformInfo &&Arg) 190 : TTIImpl(std::move(Arg.TTIImpl)) {} 191 192 TargetTransformInfo &TargetTransformInfo::operator=(TargetTransformInfo &&RHS) { 193 TTIImpl = std::move(RHS.TTIImpl); 194 return *this; 195 } 196 197 unsigned TargetTransformInfo::getInliningThresholdMultiplier() const { 198 return TTIImpl->getInliningThresholdMultiplier(); 199 } 200 201 unsigned 202 TargetTransformInfo::adjustInliningThreshold(const CallBase *CB) const { 203 return TTIImpl->adjustInliningThreshold(CB); 204 } 205 206 int TargetTransformInfo::getInlinerVectorBonusPercent() const { 207 return TTIImpl->getInlinerVectorBonusPercent(); 208 } 209 210 InstructionCost 211 TargetTransformInfo::getGEPCost(Type *PointeeType, const Value *Ptr, 212 ArrayRef<const Value *> Operands, 213 TTI::TargetCostKind CostKind) const { 214 return TTIImpl->getGEPCost(PointeeType, Ptr, Operands, CostKind); 215 } 216 217 unsigned TargetTransformInfo::getEstimatedNumberOfCaseClusters( 218 const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI, 219 BlockFrequencyInfo *BFI) const { 220 return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI); 221 } 222 223 InstructionCost 224 TargetTransformInfo::getUserCost(const User *U, 225 ArrayRef<const Value *> Operands, 226 enum TargetCostKind CostKind) const { 227 InstructionCost Cost = TTIImpl->getUserCost(U, Operands, CostKind); 228 assert((CostKind == TTI::TCK_RecipThroughput || Cost >= 0) && 229 "TTI should not produce negative costs!"); 230 return Cost; 231 } 232 233 BranchProbability TargetTransformInfo::getPredictableBranchThreshold() const { 234 return TTIImpl->getPredictableBranchThreshold(); 235 } 236 237 bool TargetTransformInfo::hasBranchDivergence() const { 238 return TTIImpl->hasBranchDivergence(); 239 } 240 241 bool TargetTransformInfo::useGPUDivergenceAnalysis() const { 242 return TTIImpl->useGPUDivergenceAnalysis(); 243 } 244 245 bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const { 246 return TTIImpl->isSourceOfDivergence(V); 247 } 248 249 bool llvm::TargetTransformInfo::isAlwaysUniform(const Value *V) const { 250 return TTIImpl->isAlwaysUniform(V); 251 } 252 253 unsigned TargetTransformInfo::getFlatAddressSpace() const { 254 return TTIImpl->getFlatAddressSpace(); 255 } 256 257 bool TargetTransformInfo::collectFlatAddressOperands( 258 SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const { 259 return TTIImpl->collectFlatAddressOperands(OpIndexes, IID); 260 } 261 262 bool TargetTransformInfo::isNoopAddrSpaceCast(unsigned FromAS, 263 unsigned ToAS) const { 264 return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS); 265 } 266 267 bool TargetTransformInfo::canHaveNonUndefGlobalInitializerInAddressSpace( 268 unsigned AS) const { 269 return TTIImpl->canHaveNonUndefGlobalInitializerInAddressSpace(AS); 270 } 271 272 unsigned TargetTransformInfo::getAssumedAddrSpace(const Value *V) const { 273 return TTIImpl->getAssumedAddrSpace(V); 274 } 275 276 std::pair<const Value *, unsigned> 277 TargetTransformInfo::getPredicatedAddrSpace(const Value *V) const { 278 return TTIImpl->getPredicatedAddrSpace(V); 279 } 280 281 Value *TargetTransformInfo::rewriteIntrinsicWithAddressSpace( 282 IntrinsicInst *II, Value *OldV, Value *NewV) const { 283 return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV); 284 } 285 286 bool TargetTransformInfo::isLoweredToCall(const Function *F) const { 287 return TTIImpl->isLoweredToCall(F); 288 } 289 290 bool TargetTransformInfo::isHardwareLoopProfitable( 291 Loop *L, ScalarEvolution &SE, AssumptionCache &AC, 292 TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const { 293 return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo); 294 } 295 296 bool TargetTransformInfo::preferPredicateOverEpilogue( 297 Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC, 298 TargetLibraryInfo *TLI, DominatorTree *DT, 299 LoopVectorizationLegality *LVL) const { 300 return TTIImpl->preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LVL); 301 } 302 303 PredicationStyle TargetTransformInfo::emitGetActiveLaneMask() const { 304 return TTIImpl->emitGetActiveLaneMask(); 305 } 306 307 Optional<Instruction *> 308 TargetTransformInfo::instCombineIntrinsic(InstCombiner &IC, 309 IntrinsicInst &II) const { 310 return TTIImpl->instCombineIntrinsic(IC, II); 311 } 312 313 Optional<Value *> TargetTransformInfo::simplifyDemandedUseBitsIntrinsic( 314 InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, 315 bool &KnownBitsComputed) const { 316 return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known, 317 KnownBitsComputed); 318 } 319 320 Optional<Value *> TargetTransformInfo::simplifyDemandedVectorEltsIntrinsic( 321 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, 322 APInt &UndefElts2, APInt &UndefElts3, 323 std::function<void(Instruction *, unsigned, APInt, APInt &)> 324 SimplifyAndSetOp) const { 325 return TTIImpl->simplifyDemandedVectorEltsIntrinsic( 326 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3, 327 SimplifyAndSetOp); 328 } 329 330 void TargetTransformInfo::getUnrollingPreferences( 331 Loop *L, ScalarEvolution &SE, UnrollingPreferences &UP, 332 OptimizationRemarkEmitter *ORE) const { 333 return TTIImpl->getUnrollingPreferences(L, SE, UP, ORE); 334 } 335 336 void TargetTransformInfo::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 337 PeelingPreferences &PP) const { 338 return TTIImpl->getPeelingPreferences(L, SE, PP); 339 } 340 341 bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm) const { 342 return TTIImpl->isLegalAddImmediate(Imm); 343 } 344 345 bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm) const { 346 return TTIImpl->isLegalICmpImmediate(Imm); 347 } 348 349 bool TargetTransformInfo::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, 350 int64_t BaseOffset, 351 bool HasBaseReg, int64_t Scale, 352 unsigned AddrSpace, 353 Instruction *I) const { 354 return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, 355 Scale, AddrSpace, I); 356 } 357 358 bool TargetTransformInfo::isLSRCostLess(const LSRCost &C1, 359 const LSRCost &C2) const { 360 return TTIImpl->isLSRCostLess(C1, C2); 361 } 362 363 bool TargetTransformInfo::isNumRegsMajorCostOfLSR() const { 364 return TTIImpl->isNumRegsMajorCostOfLSR(); 365 } 366 367 bool TargetTransformInfo::isProfitableLSRChainElement(Instruction *I) const { 368 return TTIImpl->isProfitableLSRChainElement(I); 369 } 370 371 bool TargetTransformInfo::canMacroFuseCmp() const { 372 return TTIImpl->canMacroFuseCmp(); 373 } 374 375 bool TargetTransformInfo::canSaveCmp(Loop *L, BranchInst **BI, 376 ScalarEvolution *SE, LoopInfo *LI, 377 DominatorTree *DT, AssumptionCache *AC, 378 TargetLibraryInfo *LibInfo) const { 379 return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo); 380 } 381 382 TTI::AddressingModeKind 383 TargetTransformInfo::getPreferredAddressingMode(const Loop *L, 384 ScalarEvolution *SE) const { 385 return TTIImpl->getPreferredAddressingMode(L, SE); 386 } 387 388 bool TargetTransformInfo::isLegalMaskedStore(Type *DataType, 389 Align Alignment) const { 390 return TTIImpl->isLegalMaskedStore(DataType, Alignment); 391 } 392 393 bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType, 394 Align Alignment) const { 395 return TTIImpl->isLegalMaskedLoad(DataType, Alignment); 396 } 397 398 bool TargetTransformInfo::isLegalNTStore(Type *DataType, 399 Align Alignment) const { 400 return TTIImpl->isLegalNTStore(DataType, Alignment); 401 } 402 403 bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const { 404 return TTIImpl->isLegalNTLoad(DataType, Alignment); 405 } 406 407 bool TargetTransformInfo::isLegalBroadcastLoad(Type *ElementTy, 408 ElementCount NumElements) const { 409 return TTIImpl->isLegalBroadcastLoad(ElementTy, NumElements); 410 } 411 412 bool TargetTransformInfo::isLegalMaskedGather(Type *DataType, 413 Align Alignment) const { 414 return TTIImpl->isLegalMaskedGather(DataType, Alignment); 415 } 416 417 bool TargetTransformInfo::isLegalAltInstr( 418 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, 419 const SmallBitVector &OpcodeMask) const { 420 return TTIImpl->isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask); 421 } 422 423 bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType, 424 Align Alignment) const { 425 return TTIImpl->isLegalMaskedScatter(DataType, Alignment); 426 } 427 428 bool TargetTransformInfo::forceScalarizeMaskedGather(VectorType *DataType, 429 Align Alignment) const { 430 return TTIImpl->forceScalarizeMaskedGather(DataType, Alignment); 431 } 432 433 bool TargetTransformInfo::forceScalarizeMaskedScatter(VectorType *DataType, 434 Align Alignment) const { 435 return TTIImpl->forceScalarizeMaskedScatter(DataType, Alignment); 436 } 437 438 bool TargetTransformInfo::isLegalMaskedCompressStore(Type *DataType) const { 439 return TTIImpl->isLegalMaskedCompressStore(DataType); 440 } 441 442 bool TargetTransformInfo::isLegalMaskedExpandLoad(Type *DataType) const { 443 return TTIImpl->isLegalMaskedExpandLoad(DataType); 444 } 445 446 bool TargetTransformInfo::enableOrderedReductions() const { 447 return TTIImpl->enableOrderedReductions(); 448 } 449 450 bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const { 451 return TTIImpl->hasDivRemOp(DataType, IsSigned); 452 } 453 454 bool TargetTransformInfo::hasVolatileVariant(Instruction *I, 455 unsigned AddrSpace) const { 456 return TTIImpl->hasVolatileVariant(I, AddrSpace); 457 } 458 459 bool TargetTransformInfo::prefersVectorizedAddressing() const { 460 return TTIImpl->prefersVectorizedAddressing(); 461 } 462 463 InstructionCost TargetTransformInfo::getScalingFactorCost( 464 Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, 465 int64_t Scale, unsigned AddrSpace) const { 466 InstructionCost Cost = TTIImpl->getScalingFactorCost( 467 Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace); 468 assert(Cost >= 0 && "TTI should not produce negative costs!"); 469 return Cost; 470 } 471 472 bool TargetTransformInfo::LSRWithInstrQueries() const { 473 return TTIImpl->LSRWithInstrQueries(); 474 } 475 476 bool TargetTransformInfo::isTruncateFree(Type *Ty1, Type *Ty2) const { 477 return TTIImpl->isTruncateFree(Ty1, Ty2); 478 } 479 480 bool TargetTransformInfo::isProfitableToHoist(Instruction *I) const { 481 return TTIImpl->isProfitableToHoist(I); 482 } 483 484 bool TargetTransformInfo::useAA() const { return TTIImpl->useAA(); } 485 486 bool TargetTransformInfo::isTypeLegal(Type *Ty) const { 487 return TTIImpl->isTypeLegal(Ty); 488 } 489 490 unsigned TargetTransformInfo::getRegUsageForType(Type *Ty) const { 491 return TTIImpl->getRegUsageForType(Ty); 492 } 493 494 bool TargetTransformInfo::shouldBuildLookupTables() const { 495 return TTIImpl->shouldBuildLookupTables(); 496 } 497 498 bool TargetTransformInfo::shouldBuildLookupTablesForConstant( 499 Constant *C) const { 500 return TTIImpl->shouldBuildLookupTablesForConstant(C); 501 } 502 503 bool TargetTransformInfo::shouldBuildRelLookupTables() const { 504 return TTIImpl->shouldBuildRelLookupTables(); 505 } 506 507 bool TargetTransformInfo::useColdCCForColdCall(Function &F) const { 508 return TTIImpl->useColdCCForColdCall(F); 509 } 510 511 InstructionCost 512 TargetTransformInfo::getScalarizationOverhead(VectorType *Ty, 513 const APInt &DemandedElts, 514 bool Insert, bool Extract) const { 515 return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract); 516 } 517 518 InstructionCost TargetTransformInfo::getOperandsScalarizationOverhead( 519 ArrayRef<const Value *> Args, ArrayRef<Type *> Tys) const { 520 return TTIImpl->getOperandsScalarizationOverhead(Args, Tys); 521 } 522 523 bool TargetTransformInfo::supportsEfficientVectorElementLoadStore() const { 524 return TTIImpl->supportsEfficientVectorElementLoadStore(); 525 } 526 527 bool TargetTransformInfo::supportsTailCalls() const { 528 return TTIImpl->supportsTailCalls(); 529 } 530 531 bool TargetTransformInfo::enableAggressiveInterleaving( 532 bool LoopHasReductions) const { 533 return TTIImpl->enableAggressiveInterleaving(LoopHasReductions); 534 } 535 536 TargetTransformInfo::MemCmpExpansionOptions 537 TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 538 return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp); 539 } 540 541 bool TargetTransformInfo::enableInterleavedAccessVectorization() const { 542 return TTIImpl->enableInterleavedAccessVectorization(); 543 } 544 545 bool TargetTransformInfo::enableMaskedInterleavedAccessVectorization() const { 546 return TTIImpl->enableMaskedInterleavedAccessVectorization(); 547 } 548 549 bool TargetTransformInfo::isFPVectorizationPotentiallyUnsafe() const { 550 return TTIImpl->isFPVectorizationPotentiallyUnsafe(); 551 } 552 553 bool TargetTransformInfo::allowsMisalignedMemoryAccesses(LLVMContext &Context, 554 unsigned BitWidth, 555 unsigned AddressSpace, 556 Align Alignment, 557 bool *Fast) const { 558 return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth, 559 AddressSpace, Alignment, Fast); 560 } 561 562 TargetTransformInfo::PopcntSupportKind 563 TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const { 564 return TTIImpl->getPopcntSupport(IntTyWidthInBit); 565 } 566 567 bool TargetTransformInfo::haveFastSqrt(Type *Ty) const { 568 return TTIImpl->haveFastSqrt(Ty); 569 } 570 571 bool TargetTransformInfo::isFCmpOrdCheaperThanFCmpZero(Type *Ty) const { 572 return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty); 573 } 574 575 InstructionCost TargetTransformInfo::getFPOpCost(Type *Ty) const { 576 InstructionCost Cost = TTIImpl->getFPOpCost(Ty); 577 assert(Cost >= 0 && "TTI should not produce negative costs!"); 578 return Cost; 579 } 580 581 InstructionCost TargetTransformInfo::getIntImmCodeSizeCost(unsigned Opcode, 582 unsigned Idx, 583 const APInt &Imm, 584 Type *Ty) const { 585 InstructionCost Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty); 586 assert(Cost >= 0 && "TTI should not produce negative costs!"); 587 return Cost; 588 } 589 590 InstructionCost 591 TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty, 592 TTI::TargetCostKind CostKind) const { 593 InstructionCost Cost = TTIImpl->getIntImmCost(Imm, Ty, CostKind); 594 assert(Cost >= 0 && "TTI should not produce negative costs!"); 595 return Cost; 596 } 597 598 InstructionCost TargetTransformInfo::getIntImmCostInst( 599 unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, 600 TTI::TargetCostKind CostKind, Instruction *Inst) const { 601 InstructionCost Cost = 602 TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst); 603 assert(Cost >= 0 && "TTI should not produce negative costs!"); 604 return Cost; 605 } 606 607 InstructionCost 608 TargetTransformInfo::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 609 const APInt &Imm, Type *Ty, 610 TTI::TargetCostKind CostKind) const { 611 InstructionCost Cost = 612 TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind); 613 assert(Cost >= 0 && "TTI should not produce negative costs!"); 614 return Cost; 615 } 616 617 unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID) const { 618 return TTIImpl->getNumberOfRegisters(ClassID); 619 } 620 621 unsigned TargetTransformInfo::getRegisterClassForType(bool Vector, 622 Type *Ty) const { 623 return TTIImpl->getRegisterClassForType(Vector, Ty); 624 } 625 626 const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID) const { 627 return TTIImpl->getRegisterClassName(ClassID); 628 } 629 630 TypeSize TargetTransformInfo::getRegisterBitWidth( 631 TargetTransformInfo::RegisterKind K) const { 632 return TTIImpl->getRegisterBitWidth(K); 633 } 634 635 unsigned TargetTransformInfo::getMinVectorRegisterBitWidth() const { 636 return TTIImpl->getMinVectorRegisterBitWidth(); 637 } 638 639 Optional<unsigned> TargetTransformInfo::getMaxVScale() const { 640 return TTIImpl->getMaxVScale(); 641 } 642 643 Optional<unsigned> TargetTransformInfo::getVScaleForTuning() const { 644 return TTIImpl->getVScaleForTuning(); 645 } 646 647 bool TargetTransformInfo::shouldMaximizeVectorBandwidth( 648 TargetTransformInfo::RegisterKind K) const { 649 return TTIImpl->shouldMaximizeVectorBandwidth(K); 650 } 651 652 ElementCount TargetTransformInfo::getMinimumVF(unsigned ElemWidth, 653 bool IsScalable) const { 654 return TTIImpl->getMinimumVF(ElemWidth, IsScalable); 655 } 656 657 unsigned TargetTransformInfo::getMaximumVF(unsigned ElemWidth, 658 unsigned Opcode) const { 659 return TTIImpl->getMaximumVF(ElemWidth, Opcode); 660 } 661 662 unsigned TargetTransformInfo::getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, 663 Type *ScalarValTy) const { 664 return TTIImpl->getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy); 665 } 666 667 bool TargetTransformInfo::shouldConsiderAddressTypePromotion( 668 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const { 669 return TTIImpl->shouldConsiderAddressTypePromotion( 670 I, AllowPromotionWithoutCommonHeader); 671 } 672 673 unsigned TargetTransformInfo::getCacheLineSize() const { 674 return CacheLineSize.getNumOccurrences() > 0 ? CacheLineSize 675 : TTIImpl->getCacheLineSize(); 676 } 677 678 llvm::Optional<unsigned> 679 TargetTransformInfo::getCacheSize(CacheLevel Level) const { 680 return TTIImpl->getCacheSize(Level); 681 } 682 683 llvm::Optional<unsigned> 684 TargetTransformInfo::getCacheAssociativity(CacheLevel Level) const { 685 return TTIImpl->getCacheAssociativity(Level); 686 } 687 688 unsigned TargetTransformInfo::getPrefetchDistance() const { 689 return TTIImpl->getPrefetchDistance(); 690 } 691 692 unsigned TargetTransformInfo::getMinPrefetchStride( 693 unsigned NumMemAccesses, unsigned NumStridedMemAccesses, 694 unsigned NumPrefetches, bool HasCall) const { 695 return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses, 696 NumPrefetches, HasCall); 697 } 698 699 unsigned TargetTransformInfo::getMaxPrefetchIterationsAhead() const { 700 return TTIImpl->getMaxPrefetchIterationsAhead(); 701 } 702 703 bool TargetTransformInfo::enableWritePrefetching() const { 704 return TTIImpl->enableWritePrefetching(); 705 } 706 707 unsigned TargetTransformInfo::getMaxInterleaveFactor(unsigned VF) const { 708 return TTIImpl->getMaxInterleaveFactor(VF); 709 } 710 711 TargetTransformInfo::OperandValueKind 712 TargetTransformInfo::getOperandInfo(const Value *V, 713 OperandValueProperties &OpProps) { 714 OperandValueKind OpInfo = OK_AnyValue; 715 OpProps = OP_None; 716 717 if (const auto *CI = dyn_cast<ConstantInt>(V)) { 718 if (CI->getValue().isPowerOf2()) 719 OpProps = OP_PowerOf2; 720 return OK_UniformConstantValue; 721 } 722 723 // A broadcast shuffle creates a uniform value. 724 // TODO: Add support for non-zero index broadcasts. 725 // TODO: Add support for different source vector width. 726 if (const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V)) 727 if (ShuffleInst->isZeroEltSplat()) 728 OpInfo = OK_UniformValue; 729 730 const Value *Splat = getSplatValue(V); 731 732 // Check for a splat of a constant or for a non uniform vector of constants 733 // and check if the constant(s) are all powers of two. 734 if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) { 735 OpInfo = OK_NonUniformConstantValue; 736 if (Splat) { 737 OpInfo = OK_UniformConstantValue; 738 if (auto *CI = dyn_cast<ConstantInt>(Splat)) 739 if (CI->getValue().isPowerOf2()) 740 OpProps = OP_PowerOf2; 741 } else if (const auto *CDS = dyn_cast<ConstantDataSequential>(V)) { 742 OpProps = OP_PowerOf2; 743 for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) { 744 if (auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(I))) 745 if (CI->getValue().isPowerOf2()) 746 continue; 747 OpProps = OP_None; 748 break; 749 } 750 } 751 } 752 753 // Check for a splat of a uniform value. This is not loop aware, so return 754 // true only for the obviously uniform cases (argument, globalvalue) 755 if (Splat && (isa<Argument>(Splat) || isa<GlobalValue>(Splat))) 756 OpInfo = OK_UniformValue; 757 758 return OpInfo; 759 } 760 761 InstructionCost TargetTransformInfo::getArithmeticInstrCost( 762 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 763 OperandValueKind Opd1Info, OperandValueKind Opd2Info, 764 OperandValueProperties Opd1PropInfo, OperandValueProperties Opd2PropInfo, 765 ArrayRef<const Value *> Args, const Instruction *CxtI) const { 766 InstructionCost Cost = 767 TTIImpl->getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info, 768 Opd1PropInfo, Opd2PropInfo, Args, CxtI); 769 assert(Cost >= 0 && "TTI should not produce negative costs!"); 770 return Cost; 771 } 772 773 InstructionCost TargetTransformInfo::getShuffleCost( 774 ShuffleKind Kind, VectorType *Ty, ArrayRef<int> Mask, int Index, 775 VectorType *SubTp, ArrayRef<const Value *> Args) const { 776 InstructionCost Cost = 777 TTIImpl->getShuffleCost(Kind, Ty, Mask, Index, SubTp, Args); 778 assert(Cost >= 0 && "TTI should not produce negative costs!"); 779 return Cost; 780 } 781 782 TTI::CastContextHint 783 TargetTransformInfo::getCastContextHint(const Instruction *I) { 784 if (!I) 785 return CastContextHint::None; 786 787 auto getLoadStoreKind = [](const Value *V, unsigned LdStOp, unsigned MaskedOp, 788 unsigned GatScatOp) { 789 const Instruction *I = dyn_cast<Instruction>(V); 790 if (!I) 791 return CastContextHint::None; 792 793 if (I->getOpcode() == LdStOp) 794 return CastContextHint::Normal; 795 796 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 797 if (II->getIntrinsicID() == MaskedOp) 798 return TTI::CastContextHint::Masked; 799 if (II->getIntrinsicID() == GatScatOp) 800 return TTI::CastContextHint::GatherScatter; 801 } 802 803 return TTI::CastContextHint::None; 804 }; 805 806 switch (I->getOpcode()) { 807 case Instruction::ZExt: 808 case Instruction::SExt: 809 case Instruction::FPExt: 810 return getLoadStoreKind(I->getOperand(0), Instruction::Load, 811 Intrinsic::masked_load, Intrinsic::masked_gather); 812 case Instruction::Trunc: 813 case Instruction::FPTrunc: 814 if (I->hasOneUse()) 815 return getLoadStoreKind(*I->user_begin(), Instruction::Store, 816 Intrinsic::masked_store, 817 Intrinsic::masked_scatter); 818 break; 819 default: 820 return CastContextHint::None; 821 } 822 823 return TTI::CastContextHint::None; 824 } 825 826 InstructionCost TargetTransformInfo::getCastInstrCost( 827 unsigned Opcode, Type *Dst, Type *Src, CastContextHint CCH, 828 TTI::TargetCostKind CostKind, const Instruction *I) const { 829 assert((I == nullptr || I->getOpcode() == Opcode) && 830 "Opcode should reflect passed instruction."); 831 InstructionCost Cost = 832 TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); 833 assert(Cost >= 0 && "TTI should not produce negative costs!"); 834 return Cost; 835 } 836 837 InstructionCost TargetTransformInfo::getExtractWithExtendCost( 838 unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) const { 839 InstructionCost Cost = 840 TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index); 841 assert(Cost >= 0 && "TTI should not produce negative costs!"); 842 return Cost; 843 } 844 845 InstructionCost TargetTransformInfo::getCFInstrCost( 846 unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I) const { 847 assert((I == nullptr || I->getOpcode() == Opcode) && 848 "Opcode should reflect passed instruction."); 849 InstructionCost Cost = TTIImpl->getCFInstrCost(Opcode, CostKind, I); 850 assert(Cost >= 0 && "TTI should not produce negative costs!"); 851 return Cost; 852 } 853 854 InstructionCost TargetTransformInfo::getCmpSelInstrCost( 855 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, 856 TTI::TargetCostKind CostKind, const Instruction *I) const { 857 assert((I == nullptr || I->getOpcode() == Opcode) && 858 "Opcode should reflect passed instruction."); 859 InstructionCost Cost = 860 TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 861 assert(Cost >= 0 && "TTI should not produce negative costs!"); 862 return Cost; 863 } 864 865 InstructionCost TargetTransformInfo::getVectorInstrCost(unsigned Opcode, 866 Type *Val, 867 unsigned Index) const { 868 InstructionCost Cost = TTIImpl->getVectorInstrCost(Opcode, Val, Index); 869 assert(Cost >= 0 && "TTI should not produce negative costs!"); 870 return Cost; 871 } 872 873 InstructionCost TargetTransformInfo::getReplicationShuffleCost( 874 Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, 875 TTI::TargetCostKind CostKind) { 876 InstructionCost Cost = TTIImpl->getReplicationShuffleCost( 877 EltTy, ReplicationFactor, VF, DemandedDstElts, CostKind); 878 assert(Cost >= 0 && "TTI should not produce negative costs!"); 879 return Cost; 880 } 881 882 InstructionCost TargetTransformInfo::getMemoryOpCost( 883 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, 884 TTI::TargetCostKind CostKind, const Instruction *I) const { 885 assert((I == nullptr || I->getOpcode() == Opcode) && 886 "Opcode should reflect passed instruction."); 887 InstructionCost Cost = TTIImpl->getMemoryOpCost(Opcode, Src, Alignment, 888 AddressSpace, CostKind, I); 889 assert(Cost >= 0 && "TTI should not produce negative costs!"); 890 return Cost; 891 } 892 893 InstructionCost TargetTransformInfo::getMaskedMemoryOpCost( 894 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, 895 TTI::TargetCostKind CostKind) const { 896 InstructionCost Cost = TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment, 897 AddressSpace, CostKind); 898 assert(Cost >= 0 && "TTI should not produce negative costs!"); 899 return Cost; 900 } 901 902 InstructionCost TargetTransformInfo::getGatherScatterOpCost( 903 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, 904 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { 905 InstructionCost Cost = TTIImpl->getGatherScatterOpCost( 906 Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I); 907 assert(Cost >= 0 && "TTI should not produce negative costs!"); 908 return Cost; 909 } 910 911 InstructionCost TargetTransformInfo::getInterleavedMemoryOpCost( 912 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 913 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 914 bool UseMaskForCond, bool UseMaskForGaps) const { 915 InstructionCost Cost = TTIImpl->getInterleavedMemoryOpCost( 916 Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, CostKind, 917 UseMaskForCond, UseMaskForGaps); 918 assert(Cost >= 0 && "TTI should not produce negative costs!"); 919 return Cost; 920 } 921 922 InstructionCost 923 TargetTransformInfo::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 924 TTI::TargetCostKind CostKind) const { 925 InstructionCost Cost = TTIImpl->getIntrinsicInstrCost(ICA, CostKind); 926 assert(Cost >= 0 && "TTI should not produce negative costs!"); 927 return Cost; 928 } 929 930 InstructionCost 931 TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy, 932 ArrayRef<Type *> Tys, 933 TTI::TargetCostKind CostKind) const { 934 InstructionCost Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys, CostKind); 935 assert(Cost >= 0 && "TTI should not produce negative costs!"); 936 return Cost; 937 } 938 939 unsigned TargetTransformInfo::getNumberOfParts(Type *Tp) const { 940 return TTIImpl->getNumberOfParts(Tp); 941 } 942 943 InstructionCost 944 TargetTransformInfo::getAddressComputationCost(Type *Tp, ScalarEvolution *SE, 945 const SCEV *Ptr) const { 946 InstructionCost Cost = TTIImpl->getAddressComputationCost(Tp, SE, Ptr); 947 assert(Cost >= 0 && "TTI should not produce negative costs!"); 948 return Cost; 949 } 950 951 InstructionCost TargetTransformInfo::getMemcpyCost(const Instruction *I) const { 952 InstructionCost Cost = TTIImpl->getMemcpyCost(I); 953 assert(Cost >= 0 && "TTI should not produce negative costs!"); 954 return Cost; 955 } 956 957 InstructionCost TargetTransformInfo::getArithmeticReductionCost( 958 unsigned Opcode, VectorType *Ty, Optional<FastMathFlags> FMF, 959 TTI::TargetCostKind CostKind) const { 960 InstructionCost Cost = 961 TTIImpl->getArithmeticReductionCost(Opcode, Ty, FMF, CostKind); 962 assert(Cost >= 0 && "TTI should not produce negative costs!"); 963 return Cost; 964 } 965 966 InstructionCost TargetTransformInfo::getMinMaxReductionCost( 967 VectorType *Ty, VectorType *CondTy, bool IsUnsigned, 968 TTI::TargetCostKind CostKind) const { 969 InstructionCost Cost = 970 TTIImpl->getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind); 971 assert(Cost >= 0 && "TTI should not produce negative costs!"); 972 return Cost; 973 } 974 975 InstructionCost TargetTransformInfo::getExtendedAddReductionCost( 976 bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty, 977 TTI::TargetCostKind CostKind) const { 978 return TTIImpl->getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, Ty, 979 CostKind); 980 } 981 982 InstructionCost 983 TargetTransformInfo::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const { 984 return TTIImpl->getCostOfKeepingLiveOverCall(Tys); 985 } 986 987 bool TargetTransformInfo::getTgtMemIntrinsic(IntrinsicInst *Inst, 988 MemIntrinsicInfo &Info) const { 989 return TTIImpl->getTgtMemIntrinsic(Inst, Info); 990 } 991 992 unsigned TargetTransformInfo::getAtomicMemIntrinsicMaxElementSize() const { 993 return TTIImpl->getAtomicMemIntrinsicMaxElementSize(); 994 } 995 996 Value *TargetTransformInfo::getOrCreateResultFromMemIntrinsic( 997 IntrinsicInst *Inst, Type *ExpectedType) const { 998 return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType); 999 } 1000 1001 Type *TargetTransformInfo::getMemcpyLoopLoweringType( 1002 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, 1003 unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign, 1004 Optional<uint32_t> AtomicElementSize) const { 1005 return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace, 1006 DestAddrSpace, SrcAlign, DestAlign, 1007 AtomicElementSize); 1008 } 1009 1010 void TargetTransformInfo::getMemcpyLoopResidualLoweringType( 1011 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context, 1012 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, 1013 unsigned SrcAlign, unsigned DestAlign, 1014 Optional<uint32_t> AtomicCpySize) const { 1015 TTIImpl->getMemcpyLoopResidualLoweringType( 1016 OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign, 1017 DestAlign, AtomicCpySize); 1018 } 1019 1020 bool TargetTransformInfo::areInlineCompatible(const Function *Caller, 1021 const Function *Callee) const { 1022 return TTIImpl->areInlineCompatible(Caller, Callee); 1023 } 1024 1025 bool TargetTransformInfo::areTypesABICompatible( 1026 const Function *Caller, const Function *Callee, 1027 const ArrayRef<Type *> &Types) const { 1028 return TTIImpl->areTypesABICompatible(Caller, Callee, Types); 1029 } 1030 1031 bool TargetTransformInfo::isIndexedLoadLegal(MemIndexedMode Mode, 1032 Type *Ty) const { 1033 return TTIImpl->isIndexedLoadLegal(Mode, Ty); 1034 } 1035 1036 bool TargetTransformInfo::isIndexedStoreLegal(MemIndexedMode Mode, 1037 Type *Ty) const { 1038 return TTIImpl->isIndexedStoreLegal(Mode, Ty); 1039 } 1040 1041 unsigned TargetTransformInfo::getLoadStoreVecRegBitWidth(unsigned AS) const { 1042 return TTIImpl->getLoadStoreVecRegBitWidth(AS); 1043 } 1044 1045 bool TargetTransformInfo::isLegalToVectorizeLoad(LoadInst *LI) const { 1046 return TTIImpl->isLegalToVectorizeLoad(LI); 1047 } 1048 1049 bool TargetTransformInfo::isLegalToVectorizeStore(StoreInst *SI) const { 1050 return TTIImpl->isLegalToVectorizeStore(SI); 1051 } 1052 1053 bool TargetTransformInfo::isLegalToVectorizeLoadChain( 1054 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const { 1055 return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment, 1056 AddrSpace); 1057 } 1058 1059 bool TargetTransformInfo::isLegalToVectorizeStoreChain( 1060 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const { 1061 return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment, 1062 AddrSpace); 1063 } 1064 1065 bool TargetTransformInfo::isLegalToVectorizeReduction( 1066 const RecurrenceDescriptor &RdxDesc, ElementCount VF) const { 1067 return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF); 1068 } 1069 1070 bool TargetTransformInfo::isElementTypeLegalForScalableVector(Type *Ty) const { 1071 return TTIImpl->isElementTypeLegalForScalableVector(Ty); 1072 } 1073 1074 unsigned TargetTransformInfo::getLoadVectorFactor(unsigned VF, 1075 unsigned LoadSize, 1076 unsigned ChainSizeInBytes, 1077 VectorType *VecTy) const { 1078 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy); 1079 } 1080 1081 unsigned TargetTransformInfo::getStoreVectorFactor(unsigned VF, 1082 unsigned StoreSize, 1083 unsigned ChainSizeInBytes, 1084 VectorType *VecTy) const { 1085 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy); 1086 } 1087 1088 bool TargetTransformInfo::preferInLoopReduction(unsigned Opcode, Type *Ty, 1089 ReductionFlags Flags) const { 1090 return TTIImpl->preferInLoopReduction(Opcode, Ty, Flags); 1091 } 1092 1093 bool TargetTransformInfo::preferPredicatedReductionSelect( 1094 unsigned Opcode, Type *Ty, ReductionFlags Flags) const { 1095 return TTIImpl->preferPredicatedReductionSelect(Opcode, Ty, Flags); 1096 } 1097 1098 TargetTransformInfo::VPLegalization 1099 TargetTransformInfo::getVPLegalizationStrategy(const VPIntrinsic &VPI) const { 1100 return TTIImpl->getVPLegalizationStrategy(VPI); 1101 } 1102 1103 bool TargetTransformInfo::shouldExpandReduction(const IntrinsicInst *II) const { 1104 return TTIImpl->shouldExpandReduction(II); 1105 } 1106 1107 unsigned TargetTransformInfo::getGISelRematGlobalCost() const { 1108 return TTIImpl->getGISelRematGlobalCost(); 1109 } 1110 1111 bool TargetTransformInfo::supportsScalableVectors() const { 1112 return TTIImpl->supportsScalableVectors(); 1113 } 1114 1115 bool TargetTransformInfo::enableScalableVectorization() const { 1116 return TTIImpl->enableScalableVectorization(); 1117 } 1118 1119 bool TargetTransformInfo::hasActiveVectorLength(unsigned Opcode, Type *DataType, 1120 Align Alignment) const { 1121 return TTIImpl->hasActiveVectorLength(Opcode, DataType, Alignment); 1122 } 1123 1124 InstructionCost 1125 TargetTransformInfo::getInstructionLatency(const Instruction *I) const { 1126 return TTIImpl->getInstructionLatency(I); 1127 } 1128 1129 InstructionCost 1130 TargetTransformInfo::getInstructionThroughput(const Instruction *I) const { 1131 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 1132 1133 switch (I->getOpcode()) { 1134 case Instruction::GetElementPtr: 1135 case Instruction::Ret: 1136 case Instruction::PHI: 1137 case Instruction::Br: 1138 case Instruction::Add: 1139 case Instruction::FAdd: 1140 case Instruction::Sub: 1141 case Instruction::FSub: 1142 case Instruction::Mul: 1143 case Instruction::FMul: 1144 case Instruction::UDiv: 1145 case Instruction::SDiv: 1146 case Instruction::FDiv: 1147 case Instruction::URem: 1148 case Instruction::SRem: 1149 case Instruction::FRem: 1150 case Instruction::Shl: 1151 case Instruction::LShr: 1152 case Instruction::AShr: 1153 case Instruction::And: 1154 case Instruction::Or: 1155 case Instruction::Xor: 1156 case Instruction::FNeg: 1157 case Instruction::Select: 1158 case Instruction::ICmp: 1159 case Instruction::FCmp: 1160 case Instruction::Store: 1161 case Instruction::Load: 1162 case Instruction::ZExt: 1163 case Instruction::SExt: 1164 case Instruction::FPToUI: 1165 case Instruction::FPToSI: 1166 case Instruction::FPExt: 1167 case Instruction::PtrToInt: 1168 case Instruction::IntToPtr: 1169 case Instruction::SIToFP: 1170 case Instruction::UIToFP: 1171 case Instruction::Trunc: 1172 case Instruction::FPTrunc: 1173 case Instruction::BitCast: 1174 case Instruction::AddrSpaceCast: 1175 case Instruction::ExtractElement: 1176 case Instruction::InsertElement: 1177 case Instruction::ExtractValue: 1178 case Instruction::ShuffleVector: 1179 case Instruction::Call: 1180 case Instruction::Switch: 1181 return getUserCost(I, CostKind); 1182 default: 1183 // We don't have any information on this instruction. 1184 return -1; 1185 } 1186 } 1187 1188 TargetTransformInfo::Concept::~Concept() = default; 1189 1190 TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {} 1191 1192 TargetIRAnalysis::TargetIRAnalysis( 1193 std::function<Result(const Function &)> TTICallback) 1194 : TTICallback(std::move(TTICallback)) {} 1195 1196 TargetIRAnalysis::Result TargetIRAnalysis::run(const Function &F, 1197 FunctionAnalysisManager &) { 1198 return TTICallback(F); 1199 } 1200 1201 AnalysisKey TargetIRAnalysis::Key; 1202 1203 TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(const Function &F) { 1204 return Result(F.getParent()->getDataLayout()); 1205 } 1206 1207 // Register the basic pass. 1208 INITIALIZE_PASS(TargetTransformInfoWrapperPass, "tti", 1209 "Target Transform Information", false, true) 1210 char TargetTransformInfoWrapperPass::ID = 0; 1211 1212 void TargetTransformInfoWrapperPass::anchor() {} 1213 1214 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass() 1215 : ImmutablePass(ID) { 1216 initializeTargetTransformInfoWrapperPassPass( 1217 *PassRegistry::getPassRegistry()); 1218 } 1219 1220 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass( 1221 TargetIRAnalysis TIRA) 1222 : ImmutablePass(ID), TIRA(std::move(TIRA)) { 1223 initializeTargetTransformInfoWrapperPassPass( 1224 *PassRegistry::getPassRegistry()); 1225 } 1226 1227 TargetTransformInfo &TargetTransformInfoWrapperPass::getTTI(const Function &F) { 1228 FunctionAnalysisManager DummyFAM; 1229 TTI = TIRA.run(F, DummyFAM); 1230 return *TTI; 1231 } 1232 1233 ImmutablePass * 1234 llvm::createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA) { 1235 return new TargetTransformInfoWrapperPass(std::move(TIRA)); 1236 } 1237