xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===- AArch64TargetTransformInfo.h - AArch64 specific TTI ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file a TargetTransformInfo::Concept conforming object specific to the
10 /// AArch64 target machine. It uses the target's detailed information to
11 /// provide more precise answers to certain TTI queries, while letting the
12 /// target independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
15 
16 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H
17 #define LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H
18 
19 #include "AArch64.h"
20 #include "AArch64Subtarget.h"
21 #include "AArch64TargetMachine.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/Analysis/TargetTransformInfo.h"
24 #include "llvm/CodeGen/BasicTTIImpl.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/Intrinsics.h"
27 #include <cstdint>
28 
29 namespace llvm {
30 
31 class APInt;
32 class Instruction;
33 class IntrinsicInst;
34 class Loop;
35 class SCEV;
36 class ScalarEvolution;
37 class Type;
38 class Value;
39 class VectorType;
40 
41 class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
42   using BaseT = BasicTTIImplBase<AArch64TTIImpl>;
43   using TTI = TargetTransformInfo;
44 
45   friend BaseT;
46 
47   const AArch64Subtarget *ST;
48   const AArch64TargetLowering *TLI;
49 
50   const AArch64Subtarget *getST() const { return ST; }
51   const AArch64TargetLowering *getTLI() const { return TLI; }
52 
53   enum MemIntrinsicType {
54     VECTOR_LDST_TWO_ELEMENTS,
55     VECTOR_LDST_THREE_ELEMENTS,
56     VECTOR_LDST_FOUR_ELEMENTS
57   };
58 
59   bool isWideningInstruction(Type *Ty, unsigned Opcode,
60                              ArrayRef<const Value *> Args);
61 
62 public:
63   explicit AArch64TTIImpl(const AArch64TargetMachine *TM, const Function &F)
64       : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
65         TLI(ST->getTargetLowering()) {}
66 
67   bool areInlineCompatible(const Function *Caller,
68                            const Function *Callee) const;
69 
70   /// \name Scalar TTI Implementations
71   /// @{
72 
73   using BaseT::getIntImmCost;
74   InstructionCost getIntImmCost(int64_t Val);
75   InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
76                                 TTI::TargetCostKind CostKind);
77   InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
78                                     const APInt &Imm, Type *Ty,
79                                     TTI::TargetCostKind CostKind,
80                                     Instruction *Inst = nullptr);
81   InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
82                                       const APInt &Imm, Type *Ty,
83                                       TTI::TargetCostKind CostKind);
84   TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
85 
86   /// @}
87 
88   /// \name Vector TTI Implementations
89   /// @{
90 
91   bool enableInterleavedAccessVectorization() { return true; }
92 
93   unsigned getNumberOfRegisters(unsigned ClassID) const {
94     bool Vector = (ClassID == 1);
95     if (Vector) {
96       if (ST->hasNEON())
97         return 32;
98       return 0;
99     }
100     return 31;
101   }
102 
103   InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
104                                         TTI::TargetCostKind CostKind);
105 
106   Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
107                                                IntrinsicInst &II) const;
108 
109   TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
110     switch (K) {
111     case TargetTransformInfo::RGK_Scalar:
112       return TypeSize::getFixed(64);
113     case TargetTransformInfo::RGK_FixedWidthVector:
114       if (ST->hasSVE())
115         return TypeSize::getFixed(
116             std::max(ST->getMinSVEVectorSizeInBits(), 128u));
117       return TypeSize::getFixed(ST->hasNEON() ? 128 : 0);
118     case TargetTransformInfo::RGK_ScalableVector:
119       return TypeSize::getScalable(ST->hasSVE() ? 128 : 0);
120     }
121     llvm_unreachable("Unsupported register kind");
122   }
123 
124   unsigned getMinVectorRegisterBitWidth() const {
125     return ST->getMinVectorRegisterBitWidth();
126   }
127 
128   Optional<unsigned> getVScaleForTuning() const {
129     return ST->getVScaleForTuning();
130   }
131 
132   /// Try to return an estimate cost factor that can be used as a multiplier
133   /// when scalarizing an operation for a vector with ElementCount \p VF.
134   /// For scalable vectors this currently takes the most pessimistic view based
135   /// upon the maximum possible value for vscale.
136   unsigned getMaxNumElements(ElementCount VF) const {
137     if (!VF.isScalable())
138       return VF.getFixedValue();
139 
140     return VF.getKnownMinValue() * ST->getVScaleForTuning();
141   }
142 
143   unsigned getMaxInterleaveFactor(unsigned VF);
144 
145   InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
146                                         Align Alignment, unsigned AddressSpace,
147                                         TTI::TargetCostKind CostKind);
148 
149   InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
150                                          const Value *Ptr, bool VariableMask,
151                                          Align Alignment,
152                                          TTI::TargetCostKind CostKind,
153                                          const Instruction *I = nullptr);
154 
155   InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
156                                    TTI::CastContextHint CCH,
157                                    TTI::TargetCostKind CostKind,
158                                    const Instruction *I = nullptr);
159 
160   InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
161                                            VectorType *VecTy, unsigned Index);
162 
163   InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
164                                  const Instruction *I = nullptr);
165 
166   InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
167                                      unsigned Index);
168 
169   InstructionCost getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
170                                          bool IsUnsigned,
171                                          TTI::TargetCostKind CostKind);
172 
173   InstructionCost getArithmeticReductionCostSVE(unsigned Opcode,
174                                                 VectorType *ValTy,
175                                                 TTI::TargetCostKind CostKind);
176 
177   InstructionCost getSpliceCost(VectorType *Tp, int Index);
178 
179   InstructionCost getArithmeticInstrCost(
180       unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
181       TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
182       TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
183       TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
184       TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
185       ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
186       const Instruction *CxtI = nullptr);
187 
188   InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
189                                             const SCEV *Ptr);
190 
191   InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
192                                      CmpInst::Predicate VecPred,
193                                      TTI::TargetCostKind CostKind,
194                                      const Instruction *I = nullptr);
195 
196   TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
197                                                     bool IsZeroCmp) const;
198   bool useNeonVector(const Type *Ty) const;
199 
200   InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src,
201                                   MaybeAlign Alignment, unsigned AddressSpace,
202                                   TTI::TargetCostKind CostKind,
203                                   const Instruction *I = nullptr);
204 
205   InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys);
206 
207   void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
208                                TTI::UnrollingPreferences &UP,
209                                OptimizationRemarkEmitter *ORE);
210 
211   void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
212                              TTI::PeelingPreferences &PP);
213 
214   Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
215                                            Type *ExpectedType);
216 
217   bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info);
218 
219   bool isElementTypeLegalForScalableVector(Type *Ty) const {
220     if (Ty->isPointerTy())
221       return true;
222 
223     if (Ty->isBFloatTy() && ST->hasBF16())
224       return true;
225 
226     if (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
227       return true;
228 
229     if (Ty->isIntegerTy(8) || Ty->isIntegerTy(16) ||
230         Ty->isIntegerTy(32) || Ty->isIntegerTy(64))
231       return true;
232 
233     return false;
234   }
235 
236   bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) {
237     if (!ST->hasSVE())
238       return false;
239 
240     // For fixed vectors, avoid scalarization if using SVE for them.
241     if (isa<FixedVectorType>(DataType) && !ST->useSVEForFixedLengthVectors())
242       return false; // Fall back to scalarization of masked operations.
243 
244     return isElementTypeLegalForScalableVector(DataType->getScalarType());
245   }
246 
247   bool isLegalMaskedLoad(Type *DataType, Align Alignment) {
248     return isLegalMaskedLoadStore(DataType, Alignment);
249   }
250 
251   bool isLegalMaskedStore(Type *DataType, Align Alignment) {
252     return isLegalMaskedLoadStore(DataType, Alignment);
253   }
254 
255   bool isLegalMaskedGatherScatter(Type *DataType) const {
256     if (!ST->hasSVE())
257       return false;
258 
259     // For fixed vectors, scalarize if not using SVE for them.
260     auto *DataTypeFVTy = dyn_cast<FixedVectorType>(DataType);
261     if (DataTypeFVTy && (!ST->useSVEForFixedLengthVectors() ||
262                          DataTypeFVTy->getNumElements() < 2))
263       return false;
264 
265     return isElementTypeLegalForScalableVector(DataType->getScalarType());
266   }
267 
268   bool isLegalMaskedGather(Type *DataType, Align Alignment) const {
269     return isLegalMaskedGatherScatter(DataType);
270   }
271   bool isLegalMaskedScatter(Type *DataType, Align Alignment) const {
272     return isLegalMaskedGatherScatter(DataType);
273   }
274 
275   bool isLegalNTStore(Type *DataType, Align Alignment) {
276     // NOTE: The logic below is mostly geared towards LV, which calls it with
277     //       vectors with 2 elements. We might want to improve that, if other
278     //       users show up.
279     // Nontemporal vector stores can be directly lowered to STNP, if the vector
280     // can be halved so that each half fits into a register. That's the case if
281     // the element type fits into a register and the number of elements is a
282     // power of 2 > 1.
283     if (auto *DataTypeVTy = dyn_cast<VectorType>(DataType)) {
284       unsigned NumElements =
285           cast<FixedVectorType>(DataTypeVTy)->getNumElements();
286       unsigned EltSize = DataTypeVTy->getElementType()->getScalarSizeInBits();
287       return NumElements > 1 && isPowerOf2_64(NumElements) && EltSize >= 8 &&
288              EltSize <= 128 && isPowerOf2_64(EltSize);
289     }
290     return BaseT::isLegalNTStore(DataType, Alignment);
291   }
292 
293   bool enableOrderedReductions() const { return true; }
294 
295   InstructionCost getInterleavedMemoryOpCost(
296       unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
297       Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
298       bool UseMaskForCond = false, bool UseMaskForGaps = false);
299 
300   bool
301   shouldConsiderAddressTypePromotion(const Instruction &I,
302                                      bool &AllowPromotionWithoutCommonHeader);
303 
304   bool shouldExpandReduction(const IntrinsicInst *II) const { return false; }
305 
306   unsigned getGISelRematGlobalCost() const {
307     return 2;
308   }
309 
310   bool supportsScalableVectors() const { return ST->hasSVE(); }
311 
312   bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
313                                    ElementCount VF) const;
314 
315   InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
316                                              Optional<FastMathFlags> FMF,
317                                              TTI::TargetCostKind CostKind);
318 
319   InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
320                                  ArrayRef<int> Mask, int Index,
321                                  VectorType *SubTp);
322   /// @}
323 };
324 
325 } // end namespace llvm
326 
327 #endif // LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H
328