1 //===-- X86TargetTransformInfo.h - X86 specific TTI -------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file a TargetTransformInfo::Concept conforming object specific to the 10 /// X86 target machine. It uses the target's detailed information to 11 /// provide more precise answers to certain TTI queries, while letting the 12 /// target independent and default TTI implementations handle the rest. 13 /// 14 //===----------------------------------------------------------------------===// 15 16 #ifndef LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H 17 #define LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H 18 19 #include "X86TargetMachine.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/CodeGen/BasicTTIImpl.h" 22 #include <optional> 23 24 namespace llvm { 25 26 class InstCombiner; 27 28 class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> { 29 typedef BasicTTIImplBase<X86TTIImpl> BaseT; 30 typedef TargetTransformInfo TTI; 31 friend BaseT; 32 33 const X86Subtarget *ST; 34 const X86TargetLowering *TLI; 35 36 const X86Subtarget *getST() const { return ST; } 37 const X86TargetLowering *getTLI() const { return TLI; } 38 39 const FeatureBitset InlineFeatureIgnoreList = { 40 // This indicates the CPU is 64 bit capable not that we are in 64-bit 41 // mode. 42 X86::FeatureX86_64, 43 44 // These features don't have any intrinsics or ABI effect. 45 X86::FeatureNOPL, 46 X86::FeatureCX16, 47 X86::FeatureLAHFSAHF64, 48 49 // Some older targets can be setup to fold unaligned loads. 50 X86::FeatureSSEUnalignedMem, 51 52 // Codegen control options. 53 X86::TuningFast11ByteNOP, 54 X86::TuningFast15ByteNOP, 55 X86::TuningFastBEXTR, 56 X86::TuningFastHorizontalOps, 57 X86::TuningFastLZCNT, 58 X86::TuningFastScalarFSQRT, 59 X86::TuningFastSHLDRotate, 60 X86::TuningFastScalarShiftMasks, 61 X86::TuningFastVectorShiftMasks, 62 X86::TuningFastVariableCrossLaneShuffle, 63 X86::TuningFastVariablePerLaneShuffle, 64 X86::TuningFastVectorFSQRT, 65 X86::TuningLEAForSP, 66 X86::TuningLEAUsesAG, 67 X86::TuningLZCNTFalseDeps, 68 X86::TuningBranchFusion, 69 X86::TuningMacroFusion, 70 X86::TuningPadShortFunctions, 71 X86::TuningPOPCNTFalseDeps, 72 X86::TuningMULCFalseDeps, 73 X86::TuningPERMFalseDeps, 74 X86::TuningRANGEFalseDeps, 75 X86::TuningGETMANTFalseDeps, 76 X86::TuningMULLQFalseDeps, 77 X86::TuningSlow3OpsLEA, 78 X86::TuningSlowDivide32, 79 X86::TuningSlowDivide64, 80 X86::TuningSlowIncDec, 81 X86::TuningSlowLEA, 82 X86::TuningSlowPMADDWD, 83 X86::TuningSlowPMULLD, 84 X86::TuningSlowSHLD, 85 X86::TuningSlowTwoMemOps, 86 X86::TuningSlowUAMem16, 87 X86::TuningPreferMaskRegisters, 88 X86::TuningInsertVZEROUPPER, 89 X86::TuningUseSLMArithCosts, 90 X86::TuningUseGLMDivSqrtCosts, 91 X86::TuningNoDomainDelay, 92 X86::TuningNoDomainDelayMov, 93 X86::TuningNoDomainDelayShuffle, 94 X86::TuningNoDomainDelayBlend, 95 X86::TuningPreferShiftShuffle, 96 X86::TuningFastImmVectorShift, 97 98 // Perf-tuning flags. 99 X86::TuningFastGather, 100 X86::TuningSlowUAMem32, 101 X86::TuningAllowLight256Bit, 102 103 // Based on whether user set the -mprefer-vector-width command line. 104 X86::TuningPrefer128Bit, 105 X86::TuningPrefer256Bit, 106 107 // CPU name enums. These just follow CPU string. 108 X86::ProcIntelAtom 109 }; 110 111 public: 112 explicit X86TTIImpl(const X86TargetMachine *TM, const Function &F) 113 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)), 114 TLI(ST->getTargetLowering()) {} 115 116 /// \name Scalar TTI Implementations 117 /// @{ 118 TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth); 119 120 /// @} 121 122 /// \name Cache TTI Implementation 123 /// @{ 124 std::optional<unsigned> getCacheSize( 125 TargetTransformInfo::CacheLevel Level) const override; 126 std::optional<unsigned> getCacheAssociativity( 127 TargetTransformInfo::CacheLevel Level) const override; 128 /// @} 129 130 /// \name Vector TTI Implementations 131 /// @{ 132 133 unsigned getNumberOfRegisters(unsigned ClassID) const; 134 TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const; 135 unsigned getLoadStoreVecRegBitWidth(unsigned AS) const; 136 unsigned getMaxInterleaveFactor(ElementCount VF); 137 InstructionCost getArithmeticInstrCost( 138 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 139 TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, 140 TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, 141 ArrayRef<const Value *> Args = ArrayRef<const Value *>(), 142 const Instruction *CxtI = nullptr); 143 InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, 144 ArrayRef<int> Mask, 145 TTI::TargetCostKind CostKind, int Index, 146 VectorType *SubTp, 147 ArrayRef<const Value *> Args = std::nullopt); 148 InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 149 TTI::CastContextHint CCH, 150 TTI::TargetCostKind CostKind, 151 const Instruction *I = nullptr); 152 InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 153 CmpInst::Predicate VecPred, 154 TTI::TargetCostKind CostKind, 155 const Instruction *I = nullptr); 156 using BaseT::getVectorInstrCost; 157 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, 158 TTI::TargetCostKind CostKind, 159 unsigned Index, Value *Op0, Value *Op1); 160 InstructionCost getScalarizationOverhead(VectorType *Ty, 161 const APInt &DemandedElts, 162 bool Insert, bool Extract, 163 TTI::TargetCostKind CostKind); 164 InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, 165 int VF, 166 const APInt &DemandedDstElts, 167 TTI::TargetCostKind CostKind); 168 InstructionCost 169 getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, 170 unsigned AddressSpace, TTI::TargetCostKind CostKind, 171 TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, 172 const Instruction *I = nullptr); 173 InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, 174 Align Alignment, unsigned AddressSpace, 175 TTI::TargetCostKind CostKind); 176 InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, 177 const Value *Ptr, bool VariableMask, 178 Align Alignment, 179 TTI::TargetCostKind CostKind, 180 const Instruction *I); 181 InstructionCost getPointersChainCost(ArrayRef<const Value *> Ptrs, 182 const Value *Base, 183 const TTI::PointersChainInfo &Info, 184 Type *AccessTy, 185 TTI::TargetCostKind CostKind); 186 InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, 187 const SCEV *Ptr); 188 189 std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC, 190 IntrinsicInst &II) const; 191 std::optional<Value *> 192 simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, 193 APInt DemandedMask, KnownBits &Known, 194 bool &KnownBitsComputed) const; 195 std::optional<Value *> simplifyDemandedVectorEltsIntrinsic( 196 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, 197 APInt &UndefElts2, APInt &UndefElts3, 198 std::function<void(Instruction *, unsigned, APInt, APInt &)> 199 SimplifyAndSetOp) const; 200 201 unsigned getAtomicMemIntrinsicMaxElementSize() const; 202 203 InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 204 TTI::TargetCostKind CostKind); 205 206 InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, 207 std::optional<FastMathFlags> FMF, 208 TTI::TargetCostKind CostKind); 209 210 InstructionCost getMinMaxCost(Intrinsic::ID IID, Type *Ty, 211 TTI::TargetCostKind CostKind, 212 FastMathFlags FMF); 213 214 InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, 215 FastMathFlags FMF, 216 TTI::TargetCostKind CostKind); 217 218 InstructionCost getInterleavedMemoryOpCost( 219 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 220 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 221 bool UseMaskForCond = false, bool UseMaskForGaps = false); 222 InstructionCost getInterleavedMemoryOpCostAVX512( 223 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, 224 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace, 225 TTI::TargetCostKind CostKind, bool UseMaskForCond = false, 226 bool UseMaskForGaps = false); 227 228 InstructionCost getIntImmCost(int64_t); 229 230 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, 231 TTI::TargetCostKind CostKind); 232 233 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, 234 const Instruction *I = nullptr); 235 236 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, 237 const APInt &Imm, Type *Ty, 238 TTI::TargetCostKind CostKind, 239 Instruction *Inst = nullptr); 240 InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 241 const APInt &Imm, Type *Ty, 242 TTI::TargetCostKind CostKind); 243 /// Return the cost of the scaling factor used in the addressing 244 /// mode represented by AM for this target, for a load/store 245 /// of the specified type. 246 /// If the AM is supported, the return value must be >= 0. 247 /// If the AM is not supported, it returns a negative value. 248 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, 249 int64_t BaseOffset, bool HasBaseReg, 250 int64_t Scale, unsigned AddrSpace) const; 251 252 bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, 253 const TargetTransformInfo::LSRCost &C2); 254 bool canMacroFuseCmp(); 255 bool isLegalMaskedLoad(Type *DataType, Align Alignment); 256 bool isLegalMaskedStore(Type *DataType, Align Alignment); 257 bool isLegalNTLoad(Type *DataType, Align Alignment); 258 bool isLegalNTStore(Type *DataType, Align Alignment); 259 bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const; 260 bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment); 261 bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) { 262 return forceScalarizeMaskedGather(VTy, Alignment); 263 } 264 bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment); 265 bool isLegalMaskedGather(Type *DataType, Align Alignment); 266 bool isLegalMaskedScatter(Type *DataType, Align Alignment); 267 bool isLegalMaskedExpandLoad(Type *DataType); 268 bool isLegalMaskedCompressStore(Type *DataType); 269 bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, 270 const SmallBitVector &OpcodeMask) const; 271 bool hasDivRemOp(Type *DataType, bool IsSigned); 272 bool isExpensiveToSpeculativelyExecute(const Instruction *I); 273 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty); 274 bool areInlineCompatible(const Function *Caller, 275 const Function *Callee) const; 276 bool areTypesABICompatible(const Function *Caller, const Function *Callee, 277 const ArrayRef<Type *> &Type) const; 278 279 uint64_t getMaxMemIntrinsicInlineSizeThreshold() const { 280 return ST->getMaxInlineSizeThreshold(); 281 } 282 283 TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, 284 bool IsZeroCmp) const; 285 bool prefersVectorizedAddressing() const; 286 bool supportsEfficientVectorElementLoadStore() const; 287 bool enableInterleavedAccessVectorization(); 288 289 private: 290 bool supportsGather() const; 291 InstructionCost getGSScalarCost(unsigned Opcode, Type *DataTy, 292 bool VariableMask, Align Alignment, 293 unsigned AddressSpace); 294 InstructionCost getGSVectorCost(unsigned Opcode, Type *DataTy, 295 const Value *Ptr, Align Alignment, 296 unsigned AddressSpace); 297 298 int getGatherOverhead() const; 299 int getScatterOverhead() const; 300 301 /// @} 302 }; 303 304 } // end namespace llvm 305 306 #endif 307