1 //===-- X86TargetTransformInfo.h - X86 specific TTI -------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file a TargetTransformInfo::Concept conforming object specific to the 10 /// X86 target machine. It uses the target's detailed information to 11 /// provide more precise answers to certain TTI queries, while letting the 12 /// target independent and default TTI implementations handle the rest. 13 /// 14 //===----------------------------------------------------------------------===// 15 16 #ifndef LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H 17 #define LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H 18 19 #include "X86TargetMachine.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/CodeGen/BasicTTIImpl.h" 22 23 namespace llvm { 24 25 class InstCombiner; 26 27 class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> { 28 typedef BasicTTIImplBase<X86TTIImpl> BaseT; 29 typedef TargetTransformInfo TTI; 30 friend BaseT; 31 32 const X86Subtarget *ST; 33 const X86TargetLowering *TLI; 34 35 const X86Subtarget *getST() const { return ST; } 36 const X86TargetLowering *getTLI() const { return TLI; } 37 38 const FeatureBitset InlineFeatureIgnoreList = { 39 // This indicates the CPU is 64 bit capable not that we are in 64-bit 40 // mode. 41 X86::Feature64Bit, 42 43 // These features don't have any intrinsics or ABI effect. 44 X86::FeatureNOPL, 45 X86::FeatureCMPXCHG16B, 46 X86::FeatureLAHFSAHF, 47 48 // Some older targets can be setup to fold unaligned loads. 49 X86::FeatureSSEUnalignedMem, 50 51 // Codegen control options. 52 X86::TuningFast11ByteNOP, 53 X86::TuningFast15ByteNOP, 54 X86::TuningFastBEXTR, 55 X86::TuningFastHorizontalOps, 56 X86::TuningFastLZCNT, 57 X86::TuningFastScalarFSQRT, 58 X86::TuningFastSHLDRotate, 59 X86::TuningFastScalarShiftMasks, 60 X86::TuningFastVectorShiftMasks, 61 X86::TuningFastVariableCrossLaneShuffle, 62 X86::TuningFastVariablePerLaneShuffle, 63 X86::TuningFastVectorFSQRT, 64 X86::TuningLEAForSP, 65 X86::TuningLEAUsesAG, 66 X86::TuningLZCNTFalseDeps, 67 X86::TuningBranchFusion, 68 X86::TuningMacroFusion, 69 X86::TuningPadShortFunctions, 70 X86::TuningPOPCNTFalseDeps, 71 X86::TuningSlow3OpsLEA, 72 X86::TuningSlowDivide32, 73 X86::TuningSlowDivide64, 74 X86::TuningSlowIncDec, 75 X86::TuningSlowLEA, 76 X86::TuningSlowPMADDWD, 77 X86::TuningSlowPMULLD, 78 X86::TuningSlowSHLD, 79 X86::TuningSlowTwoMemOps, 80 X86::TuningSlowUAMem16, 81 X86::TuningPreferMaskRegisters, 82 X86::TuningInsertVZEROUPPER, 83 X86::TuningUseSLMArithCosts, 84 X86::TuningUseGLMDivSqrtCosts, 85 86 // Perf-tuning flags. 87 X86::TuningFastGather, 88 X86::TuningSlowUAMem32, 89 90 // Based on whether user set the -mprefer-vector-width command line. 91 X86::TuningPrefer128Bit, 92 X86::TuningPrefer256Bit, 93 94 // CPU name enums. These just follow CPU string. 95 X86::ProcIntelAtom 96 }; 97 98 public: 99 explicit X86TTIImpl(const X86TargetMachine *TM, const Function &F) 100 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)), 101 TLI(ST->getTargetLowering()) {} 102 103 /// \name Scalar TTI Implementations 104 /// @{ 105 TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth); 106 107 /// @} 108 109 /// \name Cache TTI Implementation 110 /// @{ 111 llvm::Optional<unsigned> getCacheSize( 112 TargetTransformInfo::CacheLevel Level) const override; 113 llvm::Optional<unsigned> getCacheAssociativity( 114 TargetTransformInfo::CacheLevel Level) const override; 115 /// @} 116 117 /// \name Vector TTI Implementations 118 /// @{ 119 120 unsigned getNumberOfRegisters(unsigned ClassID) const; 121 TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const; 122 unsigned getLoadStoreVecRegBitWidth(unsigned AS) const; 123 unsigned getMaxInterleaveFactor(unsigned VF); 124 InstructionCost getArithmeticInstrCost( 125 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 126 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue, 127 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue, 128 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None, 129 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None, 130 ArrayRef<const Value *> Args = ArrayRef<const Value *>(), 131 const Instruction *CxtI = nullptr); 132 InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, 133 ArrayRef<int> Mask, int Index, 134 VectorType *SubTp); 135 InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 136 TTI::CastContextHint CCH, 137 TTI::TargetCostKind CostKind, 138 const Instruction *I = nullptr); 139 InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 140 CmpInst::Predicate VecPred, 141 TTI::TargetCostKind CostKind, 142 const Instruction *I = nullptr); 143 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, 144 unsigned Index); 145 InstructionCost getScalarizationOverhead(VectorType *Ty, 146 const APInt &DemandedElts, 147 bool Insert, bool Extract); 148 InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, 149 int VF, 150 const APInt &DemandedDstElts, 151 TTI::TargetCostKind CostKind); 152 InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, 153 MaybeAlign Alignment, unsigned AddressSpace, 154 TTI::TargetCostKind CostKind, 155 const Instruction *I = nullptr); 156 InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, 157 Align Alignment, unsigned AddressSpace, 158 TTI::TargetCostKind CostKind); 159 InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, 160 const Value *Ptr, bool VariableMask, 161 Align Alignment, 162 TTI::TargetCostKind CostKind, 163 const Instruction *I); 164 InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, 165 const SCEV *Ptr); 166 167 Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC, 168 IntrinsicInst &II) const; 169 Optional<Value *> 170 simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, 171 APInt DemandedMask, KnownBits &Known, 172 bool &KnownBitsComputed) const; 173 Optional<Value *> simplifyDemandedVectorEltsIntrinsic( 174 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, 175 APInt &UndefElts2, APInt &UndefElts3, 176 std::function<void(Instruction *, unsigned, APInt, APInt &)> 177 SimplifyAndSetOp) const; 178 179 unsigned getAtomicMemIntrinsicMaxElementSize() const; 180 181 InstructionCost 182 getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 183 TTI::TargetCostKind CostKind); 184 InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 185 TTI::TargetCostKind CostKind); 186 187 InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, 188 Optional<FastMathFlags> FMF, 189 TTI::TargetCostKind CostKind); 190 191 InstructionCost getMinMaxCost(Type *Ty, Type *CondTy, bool IsUnsigned); 192 193 InstructionCost getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, 194 bool IsUnsigned, 195 TTI::TargetCostKind CostKind); 196 197 InstructionCost getInterleavedMemoryOpCost( 198 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 199 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 200 bool UseMaskForCond = false, bool UseMaskForGaps = false); 201 InstructionCost getInterleavedMemoryOpCostAVX512( 202 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, 203 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace, 204 TTI::TargetCostKind CostKind, bool UseMaskForCond = false, 205 bool UseMaskForGaps = false); 206 207 InstructionCost getIntImmCost(int64_t); 208 209 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, 210 TTI::TargetCostKind CostKind); 211 212 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, 213 const Instruction *I = nullptr); 214 215 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, 216 const APInt &Imm, Type *Ty, 217 TTI::TargetCostKind CostKind, 218 Instruction *Inst = nullptr); 219 InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 220 const APInt &Imm, Type *Ty, 221 TTI::TargetCostKind CostKind); 222 bool isLSRCostLess(TargetTransformInfo::LSRCost &C1, 223 TargetTransformInfo::LSRCost &C2); 224 bool canMacroFuseCmp(); 225 bool isLegalMaskedLoad(Type *DataType, Align Alignment); 226 bool isLegalMaskedStore(Type *DataType, Align Alignment); 227 bool isLegalNTLoad(Type *DataType, Align Alignment); 228 bool isLegalNTStore(Type *DataType, Align Alignment); 229 bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment); 230 bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) { 231 return forceScalarizeMaskedGather(VTy, Alignment); 232 } 233 bool isLegalMaskedGather(Type *DataType, Align Alignment); 234 bool isLegalMaskedScatter(Type *DataType, Align Alignment); 235 bool isLegalMaskedExpandLoad(Type *DataType); 236 bool isLegalMaskedCompressStore(Type *DataType); 237 bool hasDivRemOp(Type *DataType, bool IsSigned); 238 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty); 239 bool areInlineCompatible(const Function *Caller, 240 const Function *Callee) const; 241 bool areTypesABICompatible(const Function *Caller, const Function *Callee, 242 const ArrayRef<Type *> &Type) const; 243 TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, 244 bool IsZeroCmp) const; 245 bool prefersVectorizedAddressing() const; 246 bool supportsEfficientVectorElementLoadStore() const; 247 bool enableInterleavedAccessVectorization(); 248 249 private: 250 bool supportsGather() const; 251 InstructionCost getGSScalarCost(unsigned Opcode, Type *DataTy, 252 bool VariableMask, Align Alignment, 253 unsigned AddressSpace); 254 InstructionCost getGSVectorCost(unsigned Opcode, Type *DataTy, 255 const Value *Ptr, Align Alignment, 256 unsigned AddressSpace); 257 258 int getGatherOverhead() const; 259 int getScatterOverhead() const; 260 261 /// @} 262 }; 263 264 } // end namespace llvm 265 266 #endif 267