xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/X86TargetTransformInfo.h (revision 0d8fe2373503aeac48492f28073049a8bfa4feb5)
1 //===-- X86TargetTransformInfo.h - X86 specific TTI -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file a TargetTransformInfo::Concept conforming object specific to the
10 /// X86 target machine. It uses the target's detailed information to
11 /// provide more precise answers to certain TTI queries, while letting the
12 /// target independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
15 
16 #ifndef LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
17 #define LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
18 
19 #include "X86TargetMachine.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/CodeGen/BasicTTIImpl.h"
22 
23 namespace llvm {
24 
25 class InstCombiner;
26 
27 class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
28   typedef BasicTTIImplBase<X86TTIImpl> BaseT;
29   typedef TargetTransformInfo TTI;
30   friend BaseT;
31 
32   const X86Subtarget *ST;
33   const X86TargetLowering *TLI;
34 
35   const X86Subtarget *getST() const { return ST; }
36   const X86TargetLowering *getTLI() const { return TLI; }
37 
38   const FeatureBitset InlineFeatureIgnoreList = {
39       // This indicates the CPU is 64 bit capable not that we are in 64-bit
40       // mode.
41       X86::Feature64Bit,
42 
43       // These features don't have any intrinsics or ABI effect.
44       X86::FeatureNOPL,
45       X86::FeatureCMPXCHG16B,
46       X86::FeatureLAHFSAHF,
47 
48       // Codegen control options.
49       X86::FeatureFast11ByteNOP,
50       X86::FeatureFast15ByteNOP,
51       X86::FeatureFastBEXTR,
52       X86::FeatureFastHorizontalOps,
53       X86::FeatureFastLZCNT,
54       X86::FeatureFastScalarFSQRT,
55       X86::FeatureFastSHLDRotate,
56       X86::FeatureFastScalarShiftMasks,
57       X86::FeatureFastVectorShiftMasks,
58       X86::FeatureFastVariableShuffle,
59       X86::FeatureFastVectorFSQRT,
60       X86::FeatureLEAForSP,
61       X86::FeatureLEAUsesAG,
62       X86::FeatureLZCNTFalseDeps,
63       X86::FeatureBranchFusion,
64       X86::FeatureMacroFusion,
65       X86::FeaturePadShortFunctions,
66       X86::FeaturePOPCNTFalseDeps,
67       X86::FeatureSSEUnalignedMem,
68       X86::FeatureSlow3OpsLEA,
69       X86::FeatureSlowDivide32,
70       X86::FeatureSlowDivide64,
71       X86::FeatureSlowIncDec,
72       X86::FeatureSlowLEA,
73       X86::FeatureSlowPMADDWD,
74       X86::FeatureSlowPMULLD,
75       X86::FeatureSlowSHLD,
76       X86::FeatureSlowTwoMemOps,
77       X86::FeatureSlowUAMem16,
78       X86::FeaturePreferMaskRegisters,
79       X86::FeatureInsertVZEROUPPER,
80       X86::FeatureUseGLMDivSqrtCosts,
81 
82       // Perf-tuning flags.
83       X86::FeatureHasFastGather,
84       X86::FeatureSlowUAMem32,
85 
86       // Based on whether user set the -mprefer-vector-width command line.
87       X86::FeaturePrefer128Bit,
88       X86::FeaturePrefer256Bit,
89 
90       // CPU name enums. These just follow CPU string.
91       X86::ProcIntelAtom,
92       X86::ProcIntelSLM,
93   };
94 
95 public:
96   explicit X86TTIImpl(const X86TargetMachine *TM, const Function &F)
97       : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
98         TLI(ST->getTargetLowering()) {}
99 
100   /// \name Scalar TTI Implementations
101   /// @{
102   TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
103 
104   /// @}
105 
106   /// \name Cache TTI Implementation
107   /// @{
108   llvm::Optional<unsigned> getCacheSize(
109     TargetTransformInfo::CacheLevel Level) const override;
110   llvm::Optional<unsigned> getCacheAssociativity(
111     TargetTransformInfo::CacheLevel Level) const override;
112   /// @}
113 
114   /// \name Vector TTI Implementations
115   /// @{
116 
117   unsigned getNumberOfRegisters(unsigned ClassID) const;
118   unsigned getRegisterBitWidth(bool Vector) const;
119   unsigned getLoadStoreVecRegBitWidth(unsigned AS) const;
120   unsigned getMaxInterleaveFactor(unsigned VF);
121   int getArithmeticInstrCost(
122       unsigned Opcode, Type *Ty,
123       TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
124       TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
125       TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
126       TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
127       TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
128       ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
129       const Instruction *CxtI = nullptr);
130   int getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, int Index,
131                      VectorType *SubTp);
132   int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
133                        TTI::CastContextHint CCH, TTI::TargetCostKind CostKind,
134                        const Instruction *I = nullptr);
135   int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
136                          CmpInst::Predicate VecPred,
137                          TTI::TargetCostKind CostKind,
138                          const Instruction *I = nullptr);
139   int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
140   unsigned getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
141                                     bool Insert, bool Extract);
142   int getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
143                       unsigned AddressSpace,
144                       TTI::TargetCostKind CostKind,
145                       const Instruction *I = nullptr);
146   int getMaskedMemoryOpCost(
147       unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
148       TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency);
149   int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
150                              bool VariableMask, Align Alignment,
151                              TTI::TargetCostKind CostKind,
152                              const Instruction *I);
153   int getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE,
154                                 const SCEV *Ptr);
155 
156   Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
157                                                IntrinsicInst &II) const;
158   Optional<Value *>
159   simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
160                                    APInt DemandedMask, KnownBits &Known,
161                                    bool &KnownBitsComputed) const;
162   Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
163       InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
164       APInt &UndefElts2, APInt &UndefElts3,
165       std::function<void(Instruction *, unsigned, APInt, APInt &)>
166           SimplifyAndSetOp) const;
167 
168   unsigned getAtomicMemIntrinsicMaxElementSize() const;
169 
170   int getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
171                                      TTI::TargetCostKind CostKind);
172   int getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
173                             TTI::TargetCostKind CostKind);
174 
175   int getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
176                                  bool IsPairwiseForm,
177                                  TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency);
178 
179   int getMinMaxCost(Type *Ty, Type *CondTy, bool IsUnsigned);
180 
181   int getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
182                              bool IsPairwiseForm, bool IsUnsigned,
183                              TTI::TargetCostKind CostKind);
184 
185   int getInterleavedMemoryOpCost(
186       unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
187       Align Alignment, unsigned AddressSpace,
188       TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
189       bool UseMaskForCond = false, bool UseMaskForGaps = false);
190   int getInterleavedMemoryOpCostAVX512(
191       unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
192       ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
193       TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
194       bool UseMaskForCond = false, bool UseMaskForGaps = false);
195   int getInterleavedMemoryOpCostAVX2(
196       unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
197       ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
198       TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
199       bool UseMaskForCond = false, bool UseMaskForGaps = false);
200 
201   int getIntImmCost(int64_t);
202 
203   int getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind);
204 
205   unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind);
206 
207   int getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm,
208                         Type *Ty, TTI::TargetCostKind CostKind,
209                         Instruction *Inst = nullptr);
210   int getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
211                           Type *Ty, TTI::TargetCostKind CostKind);
212   bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
213                      TargetTransformInfo::LSRCost &C2);
214   bool canMacroFuseCmp();
215   bool isLegalMaskedLoad(Type *DataType, Align Alignment);
216   bool isLegalMaskedStore(Type *DataType, Align Alignment);
217   bool isLegalNTLoad(Type *DataType, Align Alignment);
218   bool isLegalNTStore(Type *DataType, Align Alignment);
219   bool isLegalMaskedGather(Type *DataType, Align Alignment);
220   bool isLegalMaskedScatter(Type *DataType, Align Alignment);
221   bool isLegalMaskedExpandLoad(Type *DataType);
222   bool isLegalMaskedCompressStore(Type *DataType);
223   bool hasDivRemOp(Type *DataType, bool IsSigned);
224   bool isFCmpOrdCheaperThanFCmpZero(Type *Ty);
225   bool areInlineCompatible(const Function *Caller,
226                            const Function *Callee) const;
227   bool areFunctionArgsABICompatible(const Function *Caller,
228                                     const Function *Callee,
229                                     SmallPtrSetImpl<Argument *> &Args) const;
230   TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
231                                                     bool IsZeroCmp) const;
232   bool enableInterleavedAccessVectorization();
233 
234   /// Allow vectorizers to form reduction intrinsics in IR. The IR is expanded
235   /// into shuffles and vector math/logic by the backend
236   /// (see TTI::shouldExpandReduction)
237   bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
238                              TTI::ReductionFlags Flags) const {
239     return true;
240   }
241 
242 private:
243   int getGSScalarCost(unsigned Opcode, Type *DataTy, bool VariableMask,
244                       Align Alignment, unsigned AddressSpace);
245   int getGSVectorCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
246                       Align Alignment, unsigned AddressSpace);
247 
248   int getGatherOverhead() const;
249   int getScatterOverhead() const;
250 
251   /// @}
252 };
253 
254 } // end namespace llvm
255 
256 #endif
257