xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/TargetTransformInfo.cpp (revision 0ad011ececb978e22a9bff2acf76633b094f1ff6)
1 //===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/Analysis/TargetTransformInfo.h"
10 #include "llvm/Analysis/CFG.h"
11 #include "llvm/Analysis/LoopIterator.h"
12 #include "llvm/Analysis/TargetTransformInfoImpl.h"
13 #include "llvm/IR/CFG.h"
14 #include "llvm/IR/Dominators.h"
15 #include "llvm/IR/Instruction.h"
16 #include "llvm/IR/Instructions.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/Module.h"
19 #include "llvm/IR/Operator.h"
20 #include "llvm/IR/PatternMatch.h"
21 #include "llvm/InitializePasses.h"
22 #include "llvm/Support/CommandLine.h"
23 #include <optional>
24 #include <utility>
25 
26 using namespace llvm;
27 using namespace PatternMatch;
28 
29 #define DEBUG_TYPE "tti"
30 
31 static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
32                                      cl::Hidden,
33                                      cl::desc("Recognize reduction patterns."));
34 
35 static cl::opt<unsigned> CacheLineSize(
36     "cache-line-size", cl::init(0), cl::Hidden,
37     cl::desc("Use this to override the target cache line size when "
38              "specified by the user."));
39 
40 static cl::opt<unsigned> PredictableBranchThreshold(
41     "predictable-branch-threshold", cl::init(99), cl::Hidden,
42     cl::desc(
43         "Use this to override the target's predictable branch threshold (%)."));
44 
45 namespace {
46 /// No-op implementation of the TTI interface using the utility base
47 /// classes.
48 ///
49 /// This is used when no target specific information is available.
50 struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> {
51   explicit NoTTIImpl(const DataLayout &DL)
52       : TargetTransformInfoImplCRTPBase<NoTTIImpl>(DL) {}
53 };
54 } // namespace
55 
56 bool HardwareLoopInfo::canAnalyze(LoopInfo &LI) {
57   // If the loop has irreducible control flow, it can not be converted to
58   // Hardware loop.
59   LoopBlocksRPO RPOT(L);
60   RPOT.perform(&LI);
61   if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI))
62     return false;
63   return true;
64 }
65 
66 IntrinsicCostAttributes::IntrinsicCostAttributes(
67     Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarizationCost,
68     bool TypeBasedOnly)
69     : II(dyn_cast<IntrinsicInst>(&CI)), RetTy(CI.getType()), IID(Id),
70       ScalarizationCost(ScalarizationCost) {
71 
72   if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
73     FMF = FPMO->getFastMathFlags();
74 
75   if (!TypeBasedOnly)
76     Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
77   FunctionType *FTy = CI.getCalledFunction()->getFunctionType();
78   ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
79 }
80 
81 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
82                                                  ArrayRef<Type *> Tys,
83                                                  FastMathFlags Flags,
84                                                  const IntrinsicInst *I,
85                                                  InstructionCost ScalarCost)
86     : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
87   ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
88 }
89 
90 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *Ty,
91                                                  ArrayRef<const Value *> Args)
92     : RetTy(Ty), IID(Id) {
93 
94   Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
95   ParamTys.reserve(Arguments.size());
96   for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
97     ParamTys.push_back(Arguments[Idx]->getType());
98 }
99 
100 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
101                                                  ArrayRef<const Value *> Args,
102                                                  ArrayRef<Type *> Tys,
103                                                  FastMathFlags Flags,
104                                                  const IntrinsicInst *I,
105                                                  InstructionCost ScalarCost)
106     : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
107   ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
108   Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
109 }
110 
111 HardwareLoopInfo::HardwareLoopInfo(Loop *L) : L(L) {
112   // Match default options:
113   // - hardware-loop-counter-bitwidth = 32
114   // - hardware-loop-decrement = 1
115   CountType = Type::getInt32Ty(L->getHeader()->getContext());
116   LoopDecrement = ConstantInt::get(CountType, 1);
117 }
118 
119 bool HardwareLoopInfo::isHardwareLoopCandidate(ScalarEvolution &SE,
120                                                LoopInfo &LI, DominatorTree &DT,
121                                                bool ForceNestedLoop,
122                                                bool ForceHardwareLoopPHI) {
123   SmallVector<BasicBlock *, 4> ExitingBlocks;
124   L->getExitingBlocks(ExitingBlocks);
125 
126   for (BasicBlock *BB : ExitingBlocks) {
127     // If we pass the updated counter back through a phi, we need to know
128     // which latch the updated value will be coming from.
129     if (!L->isLoopLatch(BB)) {
130       if (ForceHardwareLoopPHI || CounterInReg)
131         continue;
132     }
133 
134     const SCEV *EC = SE.getExitCount(L, BB);
135     if (isa<SCEVCouldNotCompute>(EC))
136       continue;
137     if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
138       if (ConstEC->getValue()->isZero())
139         continue;
140     } else if (!SE.isLoopInvariant(EC, L))
141       continue;
142 
143     if (SE.getTypeSizeInBits(EC->getType()) > CountType->getBitWidth())
144       continue;
145 
146     // If this exiting block is contained in a nested loop, it is not eligible
147     // for insertion of the branch-and-decrement since the inner loop would
148     // end up messing up the value in the CTR.
149     if (!IsNestingLegal && LI.getLoopFor(BB) != L && !ForceNestedLoop)
150       continue;
151 
152     // We now have a loop-invariant count of loop iterations (which is not the
153     // constant zero) for which we know that this loop will not exit via this
154     // existing block.
155 
156     // We need to make sure that this block will run on every loop iteration.
157     // For this to be true, we must dominate all blocks with backedges. Such
158     // blocks are in-loop predecessors to the header block.
159     bool NotAlways = false;
160     for (BasicBlock *Pred : predecessors(L->getHeader())) {
161       if (!L->contains(Pred))
162         continue;
163 
164       if (!DT.dominates(BB, Pred)) {
165         NotAlways = true;
166         break;
167       }
168     }
169 
170     if (NotAlways)
171       continue;
172 
173     // Make sure this blocks ends with a conditional branch.
174     Instruction *TI = BB->getTerminator();
175     if (!TI)
176       continue;
177 
178     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
179       if (!BI->isConditional())
180         continue;
181 
182       ExitBranch = BI;
183     } else
184       continue;
185 
186     // Note that this block may not be the loop latch block, even if the loop
187     // has a latch block.
188     ExitBlock = BB;
189     ExitCount = EC;
190     break;
191   }
192 
193   if (!ExitBlock)
194     return false;
195   return true;
196 }
197 
198 TargetTransformInfo::TargetTransformInfo(const DataLayout &DL)
199     : TTIImpl(new Model<NoTTIImpl>(NoTTIImpl(DL))) {}
200 
201 TargetTransformInfo::~TargetTransformInfo() = default;
202 
203 TargetTransformInfo::TargetTransformInfo(TargetTransformInfo &&Arg)
204     : TTIImpl(std::move(Arg.TTIImpl)) {}
205 
206 TargetTransformInfo &TargetTransformInfo::operator=(TargetTransformInfo &&RHS) {
207   TTIImpl = std::move(RHS.TTIImpl);
208   return *this;
209 }
210 
211 unsigned TargetTransformInfo::getInliningThresholdMultiplier() const {
212   return TTIImpl->getInliningThresholdMultiplier();
213 }
214 
215 unsigned
216 TargetTransformInfo::adjustInliningThreshold(const CallBase *CB) const {
217   return TTIImpl->adjustInliningThreshold(CB);
218 }
219 
220 unsigned TargetTransformInfo::getCallerAllocaCost(const CallBase *CB,
221                                                   const AllocaInst *AI) const {
222   return TTIImpl->getCallerAllocaCost(CB, AI);
223 }
224 
225 int TargetTransformInfo::getInlinerVectorBonusPercent() const {
226   return TTIImpl->getInlinerVectorBonusPercent();
227 }
228 
229 InstructionCost TargetTransformInfo::getGEPCost(
230     Type *PointeeType, const Value *Ptr, ArrayRef<const Value *> Operands,
231     Type *AccessType, TTI::TargetCostKind CostKind) const {
232   return TTIImpl->getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
233 }
234 
235 InstructionCost TargetTransformInfo::getPointersChainCost(
236     ArrayRef<const Value *> Ptrs, const Value *Base,
237     const TTI::PointersChainInfo &Info, Type *AccessTy,
238     TTI::TargetCostKind CostKind) const {
239   assert((Base || !Info.isSameBase()) &&
240          "If pointers have same base address it has to be provided.");
241   return TTIImpl->getPointersChainCost(Ptrs, Base, Info, AccessTy, CostKind);
242 }
243 
244 unsigned TargetTransformInfo::getEstimatedNumberOfCaseClusters(
245     const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI,
246     BlockFrequencyInfo *BFI) const {
247   return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
248 }
249 
250 InstructionCost
251 TargetTransformInfo::getInstructionCost(const User *U,
252                                         ArrayRef<const Value *> Operands,
253                                         enum TargetCostKind CostKind) const {
254   InstructionCost Cost = TTIImpl->getInstructionCost(U, Operands, CostKind);
255   assert((CostKind == TTI::TCK_RecipThroughput || Cost >= 0) &&
256          "TTI should not produce negative costs!");
257   return Cost;
258 }
259 
260 BranchProbability TargetTransformInfo::getPredictableBranchThreshold() const {
261   return PredictableBranchThreshold.getNumOccurrences() > 0
262              ? BranchProbability(PredictableBranchThreshold, 100)
263              : TTIImpl->getPredictableBranchThreshold();
264 }
265 
266 bool TargetTransformInfo::hasBranchDivergence(const Function *F) const {
267   return TTIImpl->hasBranchDivergence(F);
268 }
269 
270 bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const {
271   return TTIImpl->isSourceOfDivergence(V);
272 }
273 
274 bool llvm::TargetTransformInfo::isAlwaysUniform(const Value *V) const {
275   return TTIImpl->isAlwaysUniform(V);
276 }
277 
278 bool llvm::TargetTransformInfo::isValidAddrSpaceCast(unsigned FromAS,
279                                                      unsigned ToAS) const {
280   return TTIImpl->isValidAddrSpaceCast(FromAS, ToAS);
281 }
282 
283 bool llvm::TargetTransformInfo::addrspacesMayAlias(unsigned FromAS,
284                                                    unsigned ToAS) const {
285   return TTIImpl->addrspacesMayAlias(FromAS, ToAS);
286 }
287 
288 unsigned TargetTransformInfo::getFlatAddressSpace() const {
289   return TTIImpl->getFlatAddressSpace();
290 }
291 
292 bool TargetTransformInfo::collectFlatAddressOperands(
293     SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const {
294   return TTIImpl->collectFlatAddressOperands(OpIndexes, IID);
295 }
296 
297 bool TargetTransformInfo::isNoopAddrSpaceCast(unsigned FromAS,
298                                               unsigned ToAS) const {
299   return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS);
300 }
301 
302 bool TargetTransformInfo::canHaveNonUndefGlobalInitializerInAddressSpace(
303     unsigned AS) const {
304   return TTIImpl->canHaveNonUndefGlobalInitializerInAddressSpace(AS);
305 }
306 
307 unsigned TargetTransformInfo::getAssumedAddrSpace(const Value *V) const {
308   return TTIImpl->getAssumedAddrSpace(V);
309 }
310 
311 bool TargetTransformInfo::isSingleThreaded() const {
312   return TTIImpl->isSingleThreaded();
313 }
314 
315 std::pair<const Value *, unsigned>
316 TargetTransformInfo::getPredicatedAddrSpace(const Value *V) const {
317   return TTIImpl->getPredicatedAddrSpace(V);
318 }
319 
320 Value *TargetTransformInfo::rewriteIntrinsicWithAddressSpace(
321     IntrinsicInst *II, Value *OldV, Value *NewV) const {
322   return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
323 }
324 
325 bool TargetTransformInfo::isLoweredToCall(const Function *F) const {
326   return TTIImpl->isLoweredToCall(F);
327 }
328 
329 bool TargetTransformInfo::isHardwareLoopProfitable(
330     Loop *L, ScalarEvolution &SE, AssumptionCache &AC,
331     TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const {
332   return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
333 }
334 
335 bool TargetTransformInfo::preferPredicateOverEpilogue(
336     TailFoldingInfo *TFI) const {
337   return TTIImpl->preferPredicateOverEpilogue(TFI);
338 }
339 
340 TailFoldingStyle TargetTransformInfo::getPreferredTailFoldingStyle(
341     bool IVUpdateMayOverflow) const {
342   return TTIImpl->getPreferredTailFoldingStyle(IVUpdateMayOverflow);
343 }
344 
345 std::optional<Instruction *>
346 TargetTransformInfo::instCombineIntrinsic(InstCombiner &IC,
347                                           IntrinsicInst &II) const {
348   return TTIImpl->instCombineIntrinsic(IC, II);
349 }
350 
351 std::optional<Value *> TargetTransformInfo::simplifyDemandedUseBitsIntrinsic(
352     InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
353     bool &KnownBitsComputed) const {
354   return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
355                                                    KnownBitsComputed);
356 }
357 
358 std::optional<Value *> TargetTransformInfo::simplifyDemandedVectorEltsIntrinsic(
359     InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
360     APInt &UndefElts2, APInt &UndefElts3,
361     std::function<void(Instruction *, unsigned, APInt, APInt &)>
362         SimplifyAndSetOp) const {
363   return TTIImpl->simplifyDemandedVectorEltsIntrinsic(
364       IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
365       SimplifyAndSetOp);
366 }
367 
368 void TargetTransformInfo::getUnrollingPreferences(
369     Loop *L, ScalarEvolution &SE, UnrollingPreferences &UP,
370     OptimizationRemarkEmitter *ORE) const {
371   return TTIImpl->getUnrollingPreferences(L, SE, UP, ORE);
372 }
373 
374 void TargetTransformInfo::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
375                                                 PeelingPreferences &PP) const {
376   return TTIImpl->getPeelingPreferences(L, SE, PP);
377 }
378 
379 bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm) const {
380   return TTIImpl->isLegalAddImmediate(Imm);
381 }
382 
383 bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm) const {
384   return TTIImpl->isLegalICmpImmediate(Imm);
385 }
386 
387 bool TargetTransformInfo::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
388                                                 int64_t BaseOffset,
389                                                 bool HasBaseReg, int64_t Scale,
390                                                 unsigned AddrSpace,
391                                                 Instruction *I) const {
392   return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
393                                         Scale, AddrSpace, I);
394 }
395 
396 bool TargetTransformInfo::isLSRCostLess(const LSRCost &C1,
397                                         const LSRCost &C2) const {
398   return TTIImpl->isLSRCostLess(C1, C2);
399 }
400 
401 bool TargetTransformInfo::isNumRegsMajorCostOfLSR() const {
402   return TTIImpl->isNumRegsMajorCostOfLSR();
403 }
404 
405 bool TargetTransformInfo::isProfitableLSRChainElement(Instruction *I) const {
406   return TTIImpl->isProfitableLSRChainElement(I);
407 }
408 
409 bool TargetTransformInfo::canMacroFuseCmp() const {
410   return TTIImpl->canMacroFuseCmp();
411 }
412 
413 bool TargetTransformInfo::canSaveCmp(Loop *L, BranchInst **BI,
414                                      ScalarEvolution *SE, LoopInfo *LI,
415                                      DominatorTree *DT, AssumptionCache *AC,
416                                      TargetLibraryInfo *LibInfo) const {
417   return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
418 }
419 
420 TTI::AddressingModeKind
421 TargetTransformInfo::getPreferredAddressingMode(const Loop *L,
422                                                 ScalarEvolution *SE) const {
423   return TTIImpl->getPreferredAddressingMode(L, SE);
424 }
425 
426 bool TargetTransformInfo::isLegalMaskedStore(Type *DataType,
427                                              Align Alignment) const {
428   return TTIImpl->isLegalMaskedStore(DataType, Alignment);
429 }
430 
431 bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType,
432                                             Align Alignment) const {
433   return TTIImpl->isLegalMaskedLoad(DataType, Alignment);
434 }
435 
436 bool TargetTransformInfo::isLegalNTStore(Type *DataType,
437                                          Align Alignment) const {
438   return TTIImpl->isLegalNTStore(DataType, Alignment);
439 }
440 
441 bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const {
442   return TTIImpl->isLegalNTLoad(DataType, Alignment);
443 }
444 
445 bool TargetTransformInfo::isLegalBroadcastLoad(Type *ElementTy,
446                                                ElementCount NumElements) const {
447   return TTIImpl->isLegalBroadcastLoad(ElementTy, NumElements);
448 }
449 
450 bool TargetTransformInfo::isLegalMaskedGather(Type *DataType,
451                                               Align Alignment) const {
452   return TTIImpl->isLegalMaskedGather(DataType, Alignment);
453 }
454 
455 bool TargetTransformInfo::isLegalAltInstr(
456     VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
457     const SmallBitVector &OpcodeMask) const {
458   return TTIImpl->isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask);
459 }
460 
461 bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType,
462                                                Align Alignment) const {
463   return TTIImpl->isLegalMaskedScatter(DataType, Alignment);
464 }
465 
466 bool TargetTransformInfo::forceScalarizeMaskedGather(VectorType *DataType,
467                                                      Align Alignment) const {
468   return TTIImpl->forceScalarizeMaskedGather(DataType, Alignment);
469 }
470 
471 bool TargetTransformInfo::forceScalarizeMaskedScatter(VectorType *DataType,
472                                                       Align Alignment) const {
473   return TTIImpl->forceScalarizeMaskedScatter(DataType, Alignment);
474 }
475 
476 bool TargetTransformInfo::isLegalMaskedCompressStore(Type *DataType) const {
477   return TTIImpl->isLegalMaskedCompressStore(DataType);
478 }
479 
480 bool TargetTransformInfo::isLegalMaskedExpandLoad(Type *DataType) const {
481   return TTIImpl->isLegalMaskedExpandLoad(DataType);
482 }
483 
484 bool TargetTransformInfo::enableOrderedReductions() const {
485   return TTIImpl->enableOrderedReductions();
486 }
487 
488 bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const {
489   return TTIImpl->hasDivRemOp(DataType, IsSigned);
490 }
491 
492 bool TargetTransformInfo::hasVolatileVariant(Instruction *I,
493                                              unsigned AddrSpace) const {
494   return TTIImpl->hasVolatileVariant(I, AddrSpace);
495 }
496 
497 bool TargetTransformInfo::prefersVectorizedAddressing() const {
498   return TTIImpl->prefersVectorizedAddressing();
499 }
500 
501 InstructionCost TargetTransformInfo::getScalingFactorCost(
502     Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg,
503     int64_t Scale, unsigned AddrSpace) const {
504   InstructionCost Cost = TTIImpl->getScalingFactorCost(
505       Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace);
506   assert(Cost >= 0 && "TTI should not produce negative costs!");
507   return Cost;
508 }
509 
510 bool TargetTransformInfo::LSRWithInstrQueries() const {
511   return TTIImpl->LSRWithInstrQueries();
512 }
513 
514 bool TargetTransformInfo::isTruncateFree(Type *Ty1, Type *Ty2) const {
515   return TTIImpl->isTruncateFree(Ty1, Ty2);
516 }
517 
518 bool TargetTransformInfo::isProfitableToHoist(Instruction *I) const {
519   return TTIImpl->isProfitableToHoist(I);
520 }
521 
522 bool TargetTransformInfo::useAA() const { return TTIImpl->useAA(); }
523 
524 bool TargetTransformInfo::isTypeLegal(Type *Ty) const {
525   return TTIImpl->isTypeLegal(Ty);
526 }
527 
528 unsigned TargetTransformInfo::getRegUsageForType(Type *Ty) const {
529   return TTIImpl->getRegUsageForType(Ty);
530 }
531 
532 bool TargetTransformInfo::shouldBuildLookupTables() const {
533   return TTIImpl->shouldBuildLookupTables();
534 }
535 
536 bool TargetTransformInfo::shouldBuildLookupTablesForConstant(
537     Constant *C) const {
538   return TTIImpl->shouldBuildLookupTablesForConstant(C);
539 }
540 
541 bool TargetTransformInfo::shouldBuildRelLookupTables() const {
542   return TTIImpl->shouldBuildRelLookupTables();
543 }
544 
545 bool TargetTransformInfo::useColdCCForColdCall(Function &F) const {
546   return TTIImpl->useColdCCForColdCall(F);
547 }
548 
549 InstructionCost TargetTransformInfo::getScalarizationOverhead(
550     VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
551     TTI::TargetCostKind CostKind) const {
552   return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
553                                            CostKind);
554 }
555 
556 InstructionCost TargetTransformInfo::getOperandsScalarizationOverhead(
557     ArrayRef<const Value *> Args, ArrayRef<Type *> Tys,
558     TTI::TargetCostKind CostKind) const {
559   return TTIImpl->getOperandsScalarizationOverhead(Args, Tys, CostKind);
560 }
561 
562 bool TargetTransformInfo::supportsEfficientVectorElementLoadStore() const {
563   return TTIImpl->supportsEfficientVectorElementLoadStore();
564 }
565 
566 bool TargetTransformInfo::supportsTailCalls() const {
567   return TTIImpl->supportsTailCalls();
568 }
569 
570 bool TargetTransformInfo::supportsTailCallFor(const CallBase *CB) const {
571   return TTIImpl->supportsTailCallFor(CB);
572 }
573 
574 bool TargetTransformInfo::enableAggressiveInterleaving(
575     bool LoopHasReductions) const {
576   return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
577 }
578 
579 TargetTransformInfo::MemCmpExpansionOptions
580 TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
581   return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
582 }
583 
584 bool TargetTransformInfo::enableSelectOptimize() const {
585   return TTIImpl->enableSelectOptimize();
586 }
587 
588 bool TargetTransformInfo::enableInterleavedAccessVectorization() const {
589   return TTIImpl->enableInterleavedAccessVectorization();
590 }
591 
592 bool TargetTransformInfo::enableMaskedInterleavedAccessVectorization() const {
593   return TTIImpl->enableMaskedInterleavedAccessVectorization();
594 }
595 
596 bool TargetTransformInfo::isFPVectorizationPotentiallyUnsafe() const {
597   return TTIImpl->isFPVectorizationPotentiallyUnsafe();
598 }
599 
600 bool
601 TargetTransformInfo::allowsMisalignedMemoryAccesses(LLVMContext &Context,
602                                                     unsigned BitWidth,
603                                                     unsigned AddressSpace,
604                                                     Align Alignment,
605                                                     unsigned *Fast) const {
606   return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth,
607                                                  AddressSpace, Alignment, Fast);
608 }
609 
610 TargetTransformInfo::PopcntSupportKind
611 TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const {
612   return TTIImpl->getPopcntSupport(IntTyWidthInBit);
613 }
614 
615 bool TargetTransformInfo::haveFastSqrt(Type *Ty) const {
616   return TTIImpl->haveFastSqrt(Ty);
617 }
618 
619 bool TargetTransformInfo::isExpensiveToSpeculativelyExecute(
620     const Instruction *I) const {
621   return TTIImpl->isExpensiveToSpeculativelyExecute(I);
622 }
623 
624 bool TargetTransformInfo::isFCmpOrdCheaperThanFCmpZero(Type *Ty) const {
625   return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);
626 }
627 
628 InstructionCost TargetTransformInfo::getFPOpCost(Type *Ty) const {
629   InstructionCost Cost = TTIImpl->getFPOpCost(Ty);
630   assert(Cost >= 0 && "TTI should not produce negative costs!");
631   return Cost;
632 }
633 
634 InstructionCost TargetTransformInfo::getIntImmCodeSizeCost(unsigned Opcode,
635                                                            unsigned Idx,
636                                                            const APInt &Imm,
637                                                            Type *Ty) const {
638   InstructionCost Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty);
639   assert(Cost >= 0 && "TTI should not produce negative costs!");
640   return Cost;
641 }
642 
643 InstructionCost
644 TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty,
645                                    TTI::TargetCostKind CostKind) const {
646   InstructionCost Cost = TTIImpl->getIntImmCost(Imm, Ty, CostKind);
647   assert(Cost >= 0 && "TTI should not produce negative costs!");
648   return Cost;
649 }
650 
651 InstructionCost TargetTransformInfo::getIntImmCostInst(
652     unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty,
653     TTI::TargetCostKind CostKind, Instruction *Inst) const {
654   InstructionCost Cost =
655       TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
656   assert(Cost >= 0 && "TTI should not produce negative costs!");
657   return Cost;
658 }
659 
660 InstructionCost
661 TargetTransformInfo::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
662                                          const APInt &Imm, Type *Ty,
663                                          TTI::TargetCostKind CostKind) const {
664   InstructionCost Cost =
665       TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
666   assert(Cost >= 0 && "TTI should not produce negative costs!");
667   return Cost;
668 }
669 
670 unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID) const {
671   return TTIImpl->getNumberOfRegisters(ClassID);
672 }
673 
674 unsigned TargetTransformInfo::getRegisterClassForType(bool Vector,
675                                                       Type *Ty) const {
676   return TTIImpl->getRegisterClassForType(Vector, Ty);
677 }
678 
679 const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID) const {
680   return TTIImpl->getRegisterClassName(ClassID);
681 }
682 
683 TypeSize TargetTransformInfo::getRegisterBitWidth(
684     TargetTransformInfo::RegisterKind K) const {
685   return TTIImpl->getRegisterBitWidth(K);
686 }
687 
688 unsigned TargetTransformInfo::getMinVectorRegisterBitWidth() const {
689   return TTIImpl->getMinVectorRegisterBitWidth();
690 }
691 
692 std::optional<unsigned> TargetTransformInfo::getMaxVScale() const {
693   return TTIImpl->getMaxVScale();
694 }
695 
696 std::optional<unsigned> TargetTransformInfo::getVScaleForTuning() const {
697   return TTIImpl->getVScaleForTuning();
698 }
699 
700 bool TargetTransformInfo::isVScaleKnownToBeAPowerOfTwo() const {
701   return TTIImpl->isVScaleKnownToBeAPowerOfTwo();
702 }
703 
704 bool TargetTransformInfo::shouldMaximizeVectorBandwidth(
705     TargetTransformInfo::RegisterKind K) const {
706   return TTIImpl->shouldMaximizeVectorBandwidth(K);
707 }
708 
709 ElementCount TargetTransformInfo::getMinimumVF(unsigned ElemWidth,
710                                                bool IsScalable) const {
711   return TTIImpl->getMinimumVF(ElemWidth, IsScalable);
712 }
713 
714 unsigned TargetTransformInfo::getMaximumVF(unsigned ElemWidth,
715                                            unsigned Opcode) const {
716   return TTIImpl->getMaximumVF(ElemWidth, Opcode);
717 }
718 
719 unsigned TargetTransformInfo::getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
720                                                 Type *ScalarValTy) const {
721   return TTIImpl->getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy);
722 }
723 
724 bool TargetTransformInfo::shouldConsiderAddressTypePromotion(
725     const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
726   return TTIImpl->shouldConsiderAddressTypePromotion(
727       I, AllowPromotionWithoutCommonHeader);
728 }
729 
730 unsigned TargetTransformInfo::getCacheLineSize() const {
731   return CacheLineSize.getNumOccurrences() > 0 ? CacheLineSize
732                                                : TTIImpl->getCacheLineSize();
733 }
734 
735 std::optional<unsigned>
736 TargetTransformInfo::getCacheSize(CacheLevel Level) const {
737   return TTIImpl->getCacheSize(Level);
738 }
739 
740 std::optional<unsigned>
741 TargetTransformInfo::getCacheAssociativity(CacheLevel Level) const {
742   return TTIImpl->getCacheAssociativity(Level);
743 }
744 
745 unsigned TargetTransformInfo::getPrefetchDistance() const {
746   return TTIImpl->getPrefetchDistance();
747 }
748 
749 unsigned TargetTransformInfo::getMinPrefetchStride(
750     unsigned NumMemAccesses, unsigned NumStridedMemAccesses,
751     unsigned NumPrefetches, bool HasCall) const {
752   return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
753                                        NumPrefetches, HasCall);
754 }
755 
756 unsigned TargetTransformInfo::getMaxPrefetchIterationsAhead() const {
757   return TTIImpl->getMaxPrefetchIterationsAhead();
758 }
759 
760 bool TargetTransformInfo::enableWritePrefetching() const {
761   return TTIImpl->enableWritePrefetching();
762 }
763 
764 bool TargetTransformInfo::shouldPrefetchAddressSpace(unsigned AS) const {
765   return TTIImpl->shouldPrefetchAddressSpace(AS);
766 }
767 
768 unsigned TargetTransformInfo::getMaxInterleaveFactor(ElementCount VF) const {
769   return TTIImpl->getMaxInterleaveFactor(VF);
770 }
771 
772 TargetTransformInfo::OperandValueInfo
773 TargetTransformInfo::getOperandInfo(const Value *V) {
774   OperandValueKind OpInfo = OK_AnyValue;
775   OperandValueProperties OpProps = OP_None;
776 
777   if (isa<ConstantInt>(V) || isa<ConstantFP>(V)) {
778     if (const auto *CI = dyn_cast<ConstantInt>(V)) {
779       if (CI->getValue().isPowerOf2())
780         OpProps = OP_PowerOf2;
781       else if (CI->getValue().isNegatedPowerOf2())
782         OpProps = OP_NegatedPowerOf2;
783     }
784     return {OK_UniformConstantValue, OpProps};
785   }
786 
787   // A broadcast shuffle creates a uniform value.
788   // TODO: Add support for non-zero index broadcasts.
789   // TODO: Add support for different source vector width.
790   if (const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V))
791     if (ShuffleInst->isZeroEltSplat())
792       OpInfo = OK_UniformValue;
793 
794   const Value *Splat = getSplatValue(V);
795 
796   // Check for a splat of a constant or for a non uniform vector of constants
797   // and check if the constant(s) are all powers of two.
798   if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
799     OpInfo = OK_NonUniformConstantValue;
800     if (Splat) {
801       OpInfo = OK_UniformConstantValue;
802       if (auto *CI = dyn_cast<ConstantInt>(Splat)) {
803         if (CI->getValue().isPowerOf2())
804           OpProps = OP_PowerOf2;
805         else if (CI->getValue().isNegatedPowerOf2())
806           OpProps = OP_NegatedPowerOf2;
807       }
808     } else if (const auto *CDS = dyn_cast<ConstantDataSequential>(V)) {
809       bool AllPow2 = true, AllNegPow2 = true;
810       for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) {
811         if (auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(I))) {
812           AllPow2 &= CI->getValue().isPowerOf2();
813           AllNegPow2 &= CI->getValue().isNegatedPowerOf2();
814           if (AllPow2 || AllNegPow2)
815             continue;
816         }
817         AllPow2 = AllNegPow2 = false;
818         break;
819       }
820       OpProps = AllPow2 ? OP_PowerOf2 : OpProps;
821       OpProps = AllNegPow2 ? OP_NegatedPowerOf2 : OpProps;
822     }
823   }
824 
825   // Check for a splat of a uniform value. This is not loop aware, so return
826   // true only for the obviously uniform cases (argument, globalvalue)
827   if (Splat && (isa<Argument>(Splat) || isa<GlobalValue>(Splat)))
828     OpInfo = OK_UniformValue;
829 
830   return {OpInfo, OpProps};
831 }
832 
833 InstructionCost TargetTransformInfo::getArithmeticInstrCost(
834     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
835     OperandValueInfo Op1Info, OperandValueInfo Op2Info,
836     ArrayRef<const Value *> Args, const Instruction *CxtI) const {
837   InstructionCost Cost =
838       TTIImpl->getArithmeticInstrCost(Opcode, Ty, CostKind,
839                                       Op1Info, Op2Info,
840                                       Args, CxtI);
841   assert(Cost >= 0 && "TTI should not produce negative costs!");
842   return Cost;
843 }
844 
845 InstructionCost TargetTransformInfo::getShuffleCost(
846     ShuffleKind Kind, VectorType *Ty, ArrayRef<int> Mask,
847     TTI::TargetCostKind CostKind, int Index, VectorType *SubTp,
848     ArrayRef<const Value *> Args) const {
849   InstructionCost Cost =
850       TTIImpl->getShuffleCost(Kind, Ty, Mask, CostKind, Index, SubTp, Args);
851   assert(Cost >= 0 && "TTI should not produce negative costs!");
852   return Cost;
853 }
854 
855 TTI::CastContextHint
856 TargetTransformInfo::getCastContextHint(const Instruction *I) {
857   if (!I)
858     return CastContextHint::None;
859 
860   auto getLoadStoreKind = [](const Value *V, unsigned LdStOp, unsigned MaskedOp,
861                              unsigned GatScatOp) {
862     const Instruction *I = dyn_cast<Instruction>(V);
863     if (!I)
864       return CastContextHint::None;
865 
866     if (I->getOpcode() == LdStOp)
867       return CastContextHint::Normal;
868 
869     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
870       if (II->getIntrinsicID() == MaskedOp)
871         return TTI::CastContextHint::Masked;
872       if (II->getIntrinsicID() == GatScatOp)
873         return TTI::CastContextHint::GatherScatter;
874     }
875 
876     return TTI::CastContextHint::None;
877   };
878 
879   switch (I->getOpcode()) {
880   case Instruction::ZExt:
881   case Instruction::SExt:
882   case Instruction::FPExt:
883     return getLoadStoreKind(I->getOperand(0), Instruction::Load,
884                             Intrinsic::masked_load, Intrinsic::masked_gather);
885   case Instruction::Trunc:
886   case Instruction::FPTrunc:
887     if (I->hasOneUse())
888       return getLoadStoreKind(*I->user_begin(), Instruction::Store,
889                               Intrinsic::masked_store,
890                               Intrinsic::masked_scatter);
891     break;
892   default:
893     return CastContextHint::None;
894   }
895 
896   return TTI::CastContextHint::None;
897 }
898 
899 InstructionCost TargetTransformInfo::getCastInstrCost(
900     unsigned Opcode, Type *Dst, Type *Src, CastContextHint CCH,
901     TTI::TargetCostKind CostKind, const Instruction *I) const {
902   assert((I == nullptr || I->getOpcode() == Opcode) &&
903          "Opcode should reflect passed instruction.");
904   InstructionCost Cost =
905       TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
906   assert(Cost >= 0 && "TTI should not produce negative costs!");
907   return Cost;
908 }
909 
910 InstructionCost TargetTransformInfo::getExtractWithExtendCost(
911     unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) const {
912   InstructionCost Cost =
913       TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
914   assert(Cost >= 0 && "TTI should not produce negative costs!");
915   return Cost;
916 }
917 
918 InstructionCost TargetTransformInfo::getCFInstrCost(
919     unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I) const {
920   assert((I == nullptr || I->getOpcode() == Opcode) &&
921          "Opcode should reflect passed instruction.");
922   InstructionCost Cost = TTIImpl->getCFInstrCost(Opcode, CostKind, I);
923   assert(Cost >= 0 && "TTI should not produce negative costs!");
924   return Cost;
925 }
926 
927 InstructionCost TargetTransformInfo::getCmpSelInstrCost(
928     unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
929     TTI::TargetCostKind CostKind, const Instruction *I) const {
930   assert((I == nullptr || I->getOpcode() == Opcode) &&
931          "Opcode should reflect passed instruction.");
932   InstructionCost Cost =
933       TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
934   assert(Cost >= 0 && "TTI should not produce negative costs!");
935   return Cost;
936 }
937 
938 InstructionCost TargetTransformInfo::getVectorInstrCost(
939     unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
940     Value *Op0, Value *Op1) const {
941   // FIXME: Assert that Opcode is either InsertElement or ExtractElement.
942   // This is mentioned in the interface description and respected by all
943   // callers, but never asserted upon.
944   InstructionCost Cost =
945       TTIImpl->getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1);
946   assert(Cost >= 0 && "TTI should not produce negative costs!");
947   return Cost;
948 }
949 
950 InstructionCost
951 TargetTransformInfo::getVectorInstrCost(const Instruction &I, Type *Val,
952                                         TTI::TargetCostKind CostKind,
953                                         unsigned Index) const {
954   // FIXME: Assert that Opcode is either InsertElement or ExtractElement.
955   // This is mentioned in the interface description and respected by all
956   // callers, but never asserted upon.
957   InstructionCost Cost = TTIImpl->getVectorInstrCost(I, Val, CostKind, Index);
958   assert(Cost >= 0 && "TTI should not produce negative costs!");
959   return Cost;
960 }
961 
962 InstructionCost TargetTransformInfo::getReplicationShuffleCost(
963     Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts,
964     TTI::TargetCostKind CostKind) {
965   InstructionCost Cost = TTIImpl->getReplicationShuffleCost(
966       EltTy, ReplicationFactor, VF, DemandedDstElts, CostKind);
967   assert(Cost >= 0 && "TTI should not produce negative costs!");
968   return Cost;
969 }
970 
971 InstructionCost TargetTransformInfo::getMemoryOpCost(
972     unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
973     TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo,
974     const Instruction *I) const {
975   assert((I == nullptr || I->getOpcode() == Opcode) &&
976          "Opcode should reflect passed instruction.");
977   InstructionCost Cost = TTIImpl->getMemoryOpCost(
978       Opcode, Src, Alignment, AddressSpace, CostKind, OpInfo, I);
979   assert(Cost >= 0 && "TTI should not produce negative costs!");
980   return Cost;
981 }
982 
983 InstructionCost TargetTransformInfo::getMaskedMemoryOpCost(
984     unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
985     TTI::TargetCostKind CostKind) const {
986   InstructionCost Cost = TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment,
987                                                         AddressSpace, CostKind);
988   assert(Cost >= 0 && "TTI should not produce negative costs!");
989   return Cost;
990 }
991 
992 InstructionCost TargetTransformInfo::getGatherScatterOpCost(
993     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
994     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const {
995   InstructionCost Cost = TTIImpl->getGatherScatterOpCost(
996       Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I);
997   assert(Cost >= 0 && "TTI should not produce negative costs!");
998   return Cost;
999 }
1000 
1001 InstructionCost TargetTransformInfo::getInterleavedMemoryOpCost(
1002     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1003     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1004     bool UseMaskForCond, bool UseMaskForGaps) const {
1005   InstructionCost Cost = TTIImpl->getInterleavedMemoryOpCost(
1006       Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, CostKind,
1007       UseMaskForCond, UseMaskForGaps);
1008   assert(Cost >= 0 && "TTI should not produce negative costs!");
1009   return Cost;
1010 }
1011 
1012 InstructionCost
1013 TargetTransformInfo::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1014                                            TTI::TargetCostKind CostKind) const {
1015   InstructionCost Cost = TTIImpl->getIntrinsicInstrCost(ICA, CostKind);
1016   assert(Cost >= 0 && "TTI should not produce negative costs!");
1017   return Cost;
1018 }
1019 
1020 InstructionCost
1021 TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy,
1022                                       ArrayRef<Type *> Tys,
1023                                       TTI::TargetCostKind CostKind) const {
1024   InstructionCost Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys, CostKind);
1025   assert(Cost >= 0 && "TTI should not produce negative costs!");
1026   return Cost;
1027 }
1028 
1029 unsigned TargetTransformInfo::getNumberOfParts(Type *Tp) const {
1030   return TTIImpl->getNumberOfParts(Tp);
1031 }
1032 
1033 InstructionCost
1034 TargetTransformInfo::getAddressComputationCost(Type *Tp, ScalarEvolution *SE,
1035                                                const SCEV *Ptr) const {
1036   InstructionCost Cost = TTIImpl->getAddressComputationCost(Tp, SE, Ptr);
1037   assert(Cost >= 0 && "TTI should not produce negative costs!");
1038   return Cost;
1039 }
1040 
1041 InstructionCost TargetTransformInfo::getMemcpyCost(const Instruction *I) const {
1042   InstructionCost Cost = TTIImpl->getMemcpyCost(I);
1043   assert(Cost >= 0 && "TTI should not produce negative costs!");
1044   return Cost;
1045 }
1046 
1047 uint64_t TargetTransformInfo::getMaxMemIntrinsicInlineSizeThreshold() const {
1048   return TTIImpl->getMaxMemIntrinsicInlineSizeThreshold();
1049 }
1050 
1051 InstructionCost TargetTransformInfo::getArithmeticReductionCost(
1052     unsigned Opcode, VectorType *Ty, std::optional<FastMathFlags> FMF,
1053     TTI::TargetCostKind CostKind) const {
1054   InstructionCost Cost =
1055       TTIImpl->getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
1056   assert(Cost >= 0 && "TTI should not produce negative costs!");
1057   return Cost;
1058 }
1059 
1060 InstructionCost TargetTransformInfo::getMinMaxReductionCost(
1061     Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF,
1062     TTI::TargetCostKind CostKind) const {
1063   InstructionCost Cost =
1064       TTIImpl->getMinMaxReductionCost(IID, Ty, FMF, CostKind);
1065   assert(Cost >= 0 && "TTI should not produce negative costs!");
1066   return Cost;
1067 }
1068 
1069 InstructionCost TargetTransformInfo::getExtendedReductionCost(
1070     unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty,
1071     FastMathFlags FMF, TTI::TargetCostKind CostKind) const {
1072   return TTIImpl->getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty, FMF,
1073                                            CostKind);
1074 }
1075 
1076 InstructionCost TargetTransformInfo::getMulAccReductionCost(
1077     bool IsUnsigned, Type *ResTy, VectorType *Ty,
1078     TTI::TargetCostKind CostKind) const {
1079   return TTIImpl->getMulAccReductionCost(IsUnsigned, ResTy, Ty, CostKind);
1080 }
1081 
1082 InstructionCost
1083 TargetTransformInfo::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const {
1084   return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
1085 }
1086 
1087 bool TargetTransformInfo::getTgtMemIntrinsic(IntrinsicInst *Inst,
1088                                              MemIntrinsicInfo &Info) const {
1089   return TTIImpl->getTgtMemIntrinsic(Inst, Info);
1090 }
1091 
1092 unsigned TargetTransformInfo::getAtomicMemIntrinsicMaxElementSize() const {
1093   return TTIImpl->getAtomicMemIntrinsicMaxElementSize();
1094 }
1095 
1096 Value *TargetTransformInfo::getOrCreateResultFromMemIntrinsic(
1097     IntrinsicInst *Inst, Type *ExpectedType) const {
1098   return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
1099 }
1100 
1101 Type *TargetTransformInfo::getMemcpyLoopLoweringType(
1102     LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
1103     unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign,
1104     std::optional<uint32_t> AtomicElementSize) const {
1105   return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,
1106                                             DestAddrSpace, SrcAlign, DestAlign,
1107                                             AtomicElementSize);
1108 }
1109 
1110 void TargetTransformInfo::getMemcpyLoopResidualLoweringType(
1111     SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1112     unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1113     unsigned SrcAlign, unsigned DestAlign,
1114     std::optional<uint32_t> AtomicCpySize) const {
1115   TTIImpl->getMemcpyLoopResidualLoweringType(
1116       OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign,
1117       DestAlign, AtomicCpySize);
1118 }
1119 
1120 bool TargetTransformInfo::areInlineCompatible(const Function *Caller,
1121                                               const Function *Callee) const {
1122   return TTIImpl->areInlineCompatible(Caller, Callee);
1123 }
1124 
1125 bool TargetTransformInfo::areTypesABICompatible(
1126     const Function *Caller, const Function *Callee,
1127     const ArrayRef<Type *> &Types) const {
1128   return TTIImpl->areTypesABICompatible(Caller, Callee, Types);
1129 }
1130 
1131 bool TargetTransformInfo::isIndexedLoadLegal(MemIndexedMode Mode,
1132                                              Type *Ty) const {
1133   return TTIImpl->isIndexedLoadLegal(Mode, Ty);
1134 }
1135 
1136 bool TargetTransformInfo::isIndexedStoreLegal(MemIndexedMode Mode,
1137                                               Type *Ty) const {
1138   return TTIImpl->isIndexedStoreLegal(Mode, Ty);
1139 }
1140 
1141 unsigned TargetTransformInfo::getLoadStoreVecRegBitWidth(unsigned AS) const {
1142   return TTIImpl->getLoadStoreVecRegBitWidth(AS);
1143 }
1144 
1145 bool TargetTransformInfo::isLegalToVectorizeLoad(LoadInst *LI) const {
1146   return TTIImpl->isLegalToVectorizeLoad(LI);
1147 }
1148 
1149 bool TargetTransformInfo::isLegalToVectorizeStore(StoreInst *SI) const {
1150   return TTIImpl->isLegalToVectorizeStore(SI);
1151 }
1152 
1153 bool TargetTransformInfo::isLegalToVectorizeLoadChain(
1154     unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1155   return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
1156                                               AddrSpace);
1157 }
1158 
1159 bool TargetTransformInfo::isLegalToVectorizeStoreChain(
1160     unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1161   return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
1162                                                AddrSpace);
1163 }
1164 
1165 bool TargetTransformInfo::isLegalToVectorizeReduction(
1166     const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
1167   return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF);
1168 }
1169 
1170 bool TargetTransformInfo::isElementTypeLegalForScalableVector(Type *Ty) const {
1171   return TTIImpl->isElementTypeLegalForScalableVector(Ty);
1172 }
1173 
1174 unsigned TargetTransformInfo::getLoadVectorFactor(unsigned VF,
1175                                                   unsigned LoadSize,
1176                                                   unsigned ChainSizeInBytes,
1177                                                   VectorType *VecTy) const {
1178   return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1179 }
1180 
1181 unsigned TargetTransformInfo::getStoreVectorFactor(unsigned VF,
1182                                                    unsigned StoreSize,
1183                                                    unsigned ChainSizeInBytes,
1184                                                    VectorType *VecTy) const {
1185   return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1186 }
1187 
1188 bool TargetTransformInfo::preferInLoopReduction(unsigned Opcode, Type *Ty,
1189                                                 ReductionFlags Flags) const {
1190   return TTIImpl->preferInLoopReduction(Opcode, Ty, Flags);
1191 }
1192 
1193 bool TargetTransformInfo::preferPredicatedReductionSelect(
1194     unsigned Opcode, Type *Ty, ReductionFlags Flags) const {
1195   return TTIImpl->preferPredicatedReductionSelect(Opcode, Ty, Flags);
1196 }
1197 
1198 bool TargetTransformInfo::preferEpilogueVectorization() const {
1199   return TTIImpl->preferEpilogueVectorization();
1200 }
1201 
1202 TargetTransformInfo::VPLegalization
1203 TargetTransformInfo::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
1204   return TTIImpl->getVPLegalizationStrategy(VPI);
1205 }
1206 
1207 bool TargetTransformInfo::hasArmWideBranch(bool Thumb) const {
1208   return TTIImpl->hasArmWideBranch(Thumb);
1209 }
1210 
1211 unsigned TargetTransformInfo::getMaxNumArgs() const {
1212   return TTIImpl->getMaxNumArgs();
1213 }
1214 
1215 bool TargetTransformInfo::shouldExpandReduction(const IntrinsicInst *II) const {
1216   return TTIImpl->shouldExpandReduction(II);
1217 }
1218 
1219 unsigned TargetTransformInfo::getGISelRematGlobalCost() const {
1220   return TTIImpl->getGISelRematGlobalCost();
1221 }
1222 
1223 unsigned TargetTransformInfo::getMinTripCountTailFoldingThreshold() const {
1224   return TTIImpl->getMinTripCountTailFoldingThreshold();
1225 }
1226 
1227 bool TargetTransformInfo::supportsScalableVectors() const {
1228   return TTIImpl->supportsScalableVectors();
1229 }
1230 
1231 bool TargetTransformInfo::enableScalableVectorization() const {
1232   return TTIImpl->enableScalableVectorization();
1233 }
1234 
1235 bool TargetTransformInfo::hasActiveVectorLength(unsigned Opcode, Type *DataType,
1236                                                 Align Alignment) const {
1237   return TTIImpl->hasActiveVectorLength(Opcode, DataType, Alignment);
1238 }
1239 
1240 TargetTransformInfo::Concept::~Concept() = default;
1241 
1242 TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {}
1243 
1244 TargetIRAnalysis::TargetIRAnalysis(
1245     std::function<Result(const Function &)> TTICallback)
1246     : TTICallback(std::move(TTICallback)) {}
1247 
1248 TargetIRAnalysis::Result TargetIRAnalysis::run(const Function &F,
1249                                                FunctionAnalysisManager &) {
1250   return TTICallback(F);
1251 }
1252 
1253 AnalysisKey TargetIRAnalysis::Key;
1254 
1255 TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(const Function &F) {
1256   return Result(F.getParent()->getDataLayout());
1257 }
1258 
1259 // Register the basic pass.
1260 INITIALIZE_PASS(TargetTransformInfoWrapperPass, "tti",
1261                 "Target Transform Information", false, true)
1262 char TargetTransformInfoWrapperPass::ID = 0;
1263 
1264 void TargetTransformInfoWrapperPass::anchor() {}
1265 
1266 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass()
1267     : ImmutablePass(ID) {
1268   initializeTargetTransformInfoWrapperPassPass(
1269       *PassRegistry::getPassRegistry());
1270 }
1271 
1272 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass(
1273     TargetIRAnalysis TIRA)
1274     : ImmutablePass(ID), TIRA(std::move(TIRA)) {
1275   initializeTargetTransformInfoWrapperPassPass(
1276       *PassRegistry::getPassRegistry());
1277 }
1278 
1279 TargetTransformInfo &TargetTransformInfoWrapperPass::getTTI(const Function &F) {
1280   FunctionAnalysisManager DummyFAM;
1281   TTI = TIRA.run(F, DummyFAM);
1282   return *TTI;
1283 }
1284 
1285 ImmutablePass *
1286 llvm::createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA) {
1287   return new TargetTransformInfoWrapperPass(std::move(TIRA));
1288 }
1289