1 //===-- X86TargetTransformInfo.h - X86 specific TTI -------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file a TargetTransformInfo::Concept conforming object specific to the 10 /// X86 target machine. It uses the target's detailed information to 11 /// provide more precise answers to certain TTI queries, while letting the 12 /// target independent and default TTI implementations handle the rest. 13 /// 14 //===----------------------------------------------------------------------===// 15 16 #ifndef LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H 17 #define LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H 18 19 #include "X86TargetMachine.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/CodeGen/BasicTTIImpl.h" 22 23 namespace llvm { 24 25 class InstCombiner; 26 27 class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> { 28 typedef BasicTTIImplBase<X86TTIImpl> BaseT; 29 typedef TargetTransformInfo TTI; 30 friend BaseT; 31 32 const X86Subtarget *ST; 33 const X86TargetLowering *TLI; 34 35 const X86Subtarget *getST() const { return ST; } 36 const X86TargetLowering *getTLI() const { return TLI; } 37 38 const FeatureBitset InlineFeatureIgnoreList = { 39 // This indicates the CPU is 64 bit capable not that we are in 64-bit 40 // mode. 41 X86::Feature64Bit, 42 43 // These features don't have any intrinsics or ABI effect. 44 X86::FeatureNOPL, 45 X86::FeatureCMPXCHG16B, 46 X86::FeatureLAHFSAHF, 47 48 // Codegen control options. 49 X86::FeatureFast11ByteNOP, 50 X86::FeatureFast15ByteNOP, 51 X86::FeatureFastBEXTR, 52 X86::FeatureFastHorizontalOps, 53 X86::FeatureFastLZCNT, 54 X86::FeatureFastScalarFSQRT, 55 X86::FeatureFastSHLDRotate, 56 X86::FeatureFastScalarShiftMasks, 57 X86::FeatureFastVectorShiftMasks, 58 X86::FeatureFastVariableCrossLaneShuffle, 59 X86::FeatureFastVariablePerLaneShuffle, 60 X86::FeatureFastVectorFSQRT, 61 X86::FeatureLEAForSP, 62 X86::FeatureLEAUsesAG, 63 X86::FeatureLZCNTFalseDeps, 64 X86::FeatureBranchFusion, 65 X86::FeatureMacroFusion, 66 X86::FeaturePadShortFunctions, 67 X86::FeaturePOPCNTFalseDeps, 68 X86::FeatureSSEUnalignedMem, 69 X86::FeatureSlow3OpsLEA, 70 X86::FeatureSlowDivide32, 71 X86::FeatureSlowDivide64, 72 X86::FeatureSlowIncDec, 73 X86::FeatureSlowLEA, 74 X86::FeatureSlowPMADDWD, 75 X86::FeatureSlowPMULLD, 76 X86::FeatureSlowSHLD, 77 X86::FeatureSlowTwoMemOps, 78 X86::FeatureSlowUAMem16, 79 X86::FeaturePreferMaskRegisters, 80 X86::FeatureInsertVZEROUPPER, 81 X86::FeatureUseGLMDivSqrtCosts, 82 83 // Perf-tuning flags. 84 X86::FeatureHasFastGather, 85 X86::FeatureSlowUAMem32, 86 87 // Based on whether user set the -mprefer-vector-width command line. 88 X86::FeaturePrefer128Bit, 89 X86::FeaturePrefer256Bit, 90 91 // CPU name enums. These just follow CPU string. 92 X86::ProcIntelAtom, 93 X86::ProcIntelSLM, 94 }; 95 96 public: 97 explicit X86TTIImpl(const X86TargetMachine *TM, const Function &F) 98 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)), 99 TLI(ST->getTargetLowering()) {} 100 101 /// \name Scalar TTI Implementations 102 /// @{ 103 TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth); 104 105 /// @} 106 107 /// \name Cache TTI Implementation 108 /// @{ 109 llvm::Optional<unsigned> getCacheSize( 110 TargetTransformInfo::CacheLevel Level) const override; 111 llvm::Optional<unsigned> getCacheAssociativity( 112 TargetTransformInfo::CacheLevel Level) const override; 113 /// @} 114 115 /// \name Vector TTI Implementations 116 /// @{ 117 118 unsigned getNumberOfRegisters(unsigned ClassID) const; 119 TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const; 120 unsigned getLoadStoreVecRegBitWidth(unsigned AS) const; 121 unsigned getMaxInterleaveFactor(unsigned VF); 122 InstructionCost getArithmeticInstrCost( 123 unsigned Opcode, Type *Ty, 124 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, 125 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue, 126 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue, 127 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None, 128 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None, 129 ArrayRef<const Value *> Args = ArrayRef<const Value *>(), 130 const Instruction *CxtI = nullptr); 131 InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, 132 ArrayRef<int> Mask, int Index, 133 VectorType *SubTp); 134 InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 135 TTI::CastContextHint CCH, 136 TTI::TargetCostKind CostKind, 137 const Instruction *I = nullptr); 138 InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 139 CmpInst::Predicate VecPred, 140 TTI::TargetCostKind CostKind, 141 const Instruction *I = nullptr); 142 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, 143 unsigned Index); 144 InstructionCost getScalarizationOverhead(VectorType *Ty, 145 const APInt &DemandedElts, 146 bool Insert, bool Extract); 147 InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, 148 MaybeAlign Alignment, unsigned AddressSpace, 149 TTI::TargetCostKind CostKind, 150 const Instruction *I = nullptr); 151 InstructionCost 152 getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, 153 unsigned AddressSpace, 154 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency); 155 InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, 156 const Value *Ptr, bool VariableMask, 157 Align Alignment, 158 TTI::TargetCostKind CostKind, 159 const Instruction *I); 160 InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, 161 const SCEV *Ptr); 162 163 Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC, 164 IntrinsicInst &II) const; 165 Optional<Value *> 166 simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, 167 APInt DemandedMask, KnownBits &Known, 168 bool &KnownBitsComputed) const; 169 Optional<Value *> simplifyDemandedVectorEltsIntrinsic( 170 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, 171 APInt &UndefElts2, APInt &UndefElts3, 172 std::function<void(Instruction *, unsigned, APInt, APInt &)> 173 SimplifyAndSetOp) const; 174 175 unsigned getAtomicMemIntrinsicMaxElementSize() const; 176 177 InstructionCost 178 getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 179 TTI::TargetCostKind CostKind); 180 InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 181 TTI::TargetCostKind CostKind); 182 183 InstructionCost getArithmeticReductionCost( 184 unsigned Opcode, VectorType *Ty, Optional<FastMathFlags> FMF, 185 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency); 186 187 InstructionCost getMinMaxCost(Type *Ty, Type *CondTy, bool IsUnsigned); 188 189 InstructionCost getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, 190 bool IsUnsigned, 191 TTI::TargetCostKind CostKind); 192 193 InstructionCost getInterleavedMemoryOpCost( 194 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 195 Align Alignment, unsigned AddressSpace, 196 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency, 197 bool UseMaskForCond = false, bool UseMaskForGaps = false); 198 InstructionCost getInterleavedMemoryOpCostAVX512( 199 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, 200 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace, 201 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency, 202 bool UseMaskForCond = false, bool UseMaskForGaps = false); 203 InstructionCost getInterleavedMemoryOpCostAVX2( 204 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, 205 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace, 206 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency, 207 bool UseMaskForCond = false, bool UseMaskForGaps = false); 208 209 InstructionCost getIntImmCost(int64_t); 210 211 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, 212 TTI::TargetCostKind CostKind); 213 214 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, 215 const Instruction *I = nullptr); 216 217 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, 218 const APInt &Imm, Type *Ty, 219 TTI::TargetCostKind CostKind, 220 Instruction *Inst = nullptr); 221 InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 222 const APInt &Imm, Type *Ty, 223 TTI::TargetCostKind CostKind); 224 bool isLSRCostLess(TargetTransformInfo::LSRCost &C1, 225 TargetTransformInfo::LSRCost &C2); 226 bool canMacroFuseCmp(); 227 bool isLegalMaskedLoad(Type *DataType, Align Alignment); 228 bool isLegalMaskedStore(Type *DataType, Align Alignment); 229 bool isLegalNTLoad(Type *DataType, Align Alignment); 230 bool isLegalNTStore(Type *DataType, Align Alignment); 231 bool isLegalMaskedGather(Type *DataType, Align Alignment); 232 bool isLegalMaskedScatter(Type *DataType, Align Alignment); 233 bool isLegalMaskedExpandLoad(Type *DataType); 234 bool isLegalMaskedCompressStore(Type *DataType); 235 bool hasDivRemOp(Type *DataType, bool IsSigned); 236 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty); 237 bool areInlineCompatible(const Function *Caller, 238 const Function *Callee) const; 239 bool areFunctionArgsABICompatible(const Function *Caller, 240 const Function *Callee, 241 SmallPtrSetImpl<Argument *> &Args) const; 242 TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, 243 bool IsZeroCmp) const; 244 bool enableInterleavedAccessVectorization(); 245 246 private: 247 InstructionCost getGSScalarCost(unsigned Opcode, Type *DataTy, 248 bool VariableMask, Align Alignment, 249 unsigned AddressSpace); 250 InstructionCost getGSVectorCost(unsigned Opcode, Type *DataTy, 251 const Value *Ptr, Align Alignment, 252 unsigned AddressSpace); 253 254 int getGatherOverhead() const; 255 int getScatterOverhead() const; 256 257 /// @} 258 }; 259 260 } // end namespace llvm 261 262 #endif 263