1 //===- AArch64TargetTransformInfo.h - AArch64 specific TTI ------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file a TargetTransformInfo::Concept conforming object specific to the 10 /// AArch64 target machine. It uses the target's detailed information to 11 /// provide more precise answers to certain TTI queries, while letting the 12 /// target independent and default TTI implementations handle the rest. 13 /// 14 //===----------------------------------------------------------------------===// 15 16 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H 17 #define LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H 18 19 #include "AArch64.h" 20 #include "AArch64Subtarget.h" 21 #include "AArch64TargetMachine.h" 22 #include "llvm/ADT/ArrayRef.h" 23 #include "llvm/Analysis/TargetTransformInfo.h" 24 #include "llvm/CodeGen/BasicTTIImpl.h" 25 #include "llvm/IR/Function.h" 26 #include "llvm/IR/Intrinsics.h" 27 #include <cstdint> 28 29 namespace llvm { 30 31 class APInt; 32 class Instruction; 33 class IntrinsicInst; 34 class Loop; 35 class SCEV; 36 class ScalarEvolution; 37 class Type; 38 class Value; 39 class VectorType; 40 41 class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> { 42 using BaseT = BasicTTIImplBase<AArch64TTIImpl>; 43 using TTI = TargetTransformInfo; 44 45 friend BaseT; 46 47 const AArch64Subtarget *ST; 48 const AArch64TargetLowering *TLI; 49 50 const AArch64Subtarget *getST() const { return ST; } 51 const AArch64TargetLowering *getTLI() const { return TLI; } 52 53 enum MemIntrinsicType { 54 VECTOR_LDST_TWO_ELEMENTS, 55 VECTOR_LDST_THREE_ELEMENTS, 56 VECTOR_LDST_FOUR_ELEMENTS 57 }; 58 59 bool isWideningInstruction(Type *Ty, unsigned Opcode, 60 ArrayRef<const Value *> Args); 61 62 public: 63 explicit AArch64TTIImpl(const AArch64TargetMachine *TM, const Function &F) 64 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)), 65 TLI(ST->getTargetLowering()) {} 66 67 bool areInlineCompatible(const Function *Caller, 68 const Function *Callee) const; 69 70 /// \name Scalar TTI Implementations 71 /// @{ 72 73 using BaseT::getIntImmCost; 74 int getIntImmCost(int64_t Val); 75 int getIntImmCost(const APInt &Imm, Type *Ty); 76 int getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty); 77 int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 78 Type *Ty); 79 TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth); 80 81 /// @} 82 83 /// \name Vector TTI Implementations 84 /// @{ 85 86 bool enableInterleavedAccessVectorization() { return true; } 87 88 unsigned getNumberOfRegisters(bool Vector) { 89 if (Vector) { 90 if (ST->hasNEON()) 91 return 32; 92 return 0; 93 } 94 return 31; 95 } 96 97 unsigned getRegisterBitWidth(bool Vector) const { 98 if (Vector) { 99 if (ST->hasNEON()) 100 return 128; 101 return 0; 102 } 103 return 64; 104 } 105 106 unsigned getMinVectorRegisterBitWidth() { 107 return ST->getMinVectorRegisterBitWidth(); 108 } 109 110 unsigned getMaxInterleaveFactor(unsigned VF); 111 112 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 113 const Instruction *I = nullptr); 114 115 int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, 116 unsigned Index); 117 118 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index); 119 120 int getArithmeticInstrCost( 121 unsigned Opcode, Type *Ty, 122 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue, 123 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue, 124 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None, 125 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None, 126 ArrayRef<const Value *> Args = ArrayRef<const Value *>()); 127 128 int getAddressComputationCost(Type *Ty, ScalarEvolution *SE, const SCEV *Ptr); 129 130 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 131 const Instruction *I = nullptr); 132 133 TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, 134 bool IsZeroCmp) const; 135 136 int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 137 unsigned AddressSpace, const Instruction *I = nullptr); 138 139 int getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys); 140 141 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 142 TTI::UnrollingPreferences &UP); 143 144 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 145 Type *ExpectedType); 146 147 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info); 148 149 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, 150 ArrayRef<unsigned> Indices, unsigned Alignment, 151 unsigned AddressSpace, 152 bool UseMaskForCond = false, 153 bool UseMaskForGaps = false); 154 155 bool 156 shouldConsiderAddressTypePromotion(const Instruction &I, 157 bool &AllowPromotionWithoutCommonHeader); 158 159 unsigned getCacheLineSize(); 160 161 unsigned getPrefetchDistance(); 162 163 unsigned getMinPrefetchStride(); 164 165 unsigned getMaxPrefetchIterationsAhead(); 166 167 bool shouldExpandReduction(const IntrinsicInst *II) const { 168 return false; 169 } 170 171 unsigned getGISelRematGlobalCost() const { 172 return 2; 173 } 174 175 bool useReductionIntrinsic(unsigned Opcode, Type *Ty, 176 TTI::ReductionFlags Flags) const; 177 178 int getArithmeticReductionCost(unsigned Opcode, Type *Ty, 179 bool IsPairwiseForm); 180 181 int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp); 182 /// @} 183 }; 184 185 } // end namespace llvm 186 187 #endif // LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H 188