xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 //===- RISCVTargetTransformInfo.h - RISC-V specific TTI ---------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file defines a TargetTransformInfo::Concept conforming object specific
10 /// to the RISC-V target machine. It uses the target's detailed information to
11 /// provide more precise answers to certain TTI queries, while letting the
12 /// target independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
15 
16 #ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
17 #define LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
18 
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/Analysis/IVDescriptors.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/CodeGen/BasicTTIImpl.h"
24 #include "llvm/IR/Function.h"
25 #include <optional>
26 
27 namespace llvm {
28 
29 class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
30   using BaseT = BasicTTIImplBase<RISCVTTIImpl>;
31   using TTI = TargetTransformInfo;
32 
33   friend BaseT;
34 
35   const RISCVSubtarget *ST;
36   const RISCVTargetLowering *TLI;
37 
38   const RISCVSubtarget *getST() const { return ST; }
39   const RISCVTargetLowering *getTLI() const { return TLI; }
40 
41   /// This function returns an estimate for VL to be used in VL based terms
42   /// of the cost model.  For fixed length vectors, this is simply the
43   /// vector length.  For scalable vectors, we return results consistent
44   /// with getVScaleForTuning under the assumption that clients are also
45   /// using that when comparing costs between scalar and vector representation.
46   /// This does unfortunately mean that we can both undershoot and overshot
47   /// the true cost significantly if getVScaleForTuning is wildly off for the
48   /// actual target hardware.
49   unsigned getEstimatedVLFor(VectorType *Ty);
50 
51   /// Return the cost of LMUL. The larger the LMUL, the higher the cost.
52   InstructionCost getLMULCost(MVT VT);
53 
54   /// Return the cost of accessing a constant pool entry of the specified
55   /// type.
56   InstructionCost getConstantPoolLoadCost(Type *Ty,
57                                           TTI::TargetCostKind CostKind);
58 public:
59   explicit RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
60       : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
61         TLI(ST->getTargetLowering()) {}
62 
63   /// Return the cost of materializing an immediate for a value operand of
64   /// a store instruction.
65   InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo,
66                                   TTI::TargetCostKind CostKind);
67 
68   InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
69                                 TTI::TargetCostKind CostKind);
70   InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
71                                     const APInt &Imm, Type *Ty,
72                                     TTI::TargetCostKind CostKind,
73                                     Instruction *Inst = nullptr);
74   InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
75                                       const APInt &Imm, Type *Ty,
76                                       TTI::TargetCostKind CostKind);
77 
78   TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
79 
80   bool shouldExpandReduction(const IntrinsicInst *II) const;
81   bool supportsScalableVectors() const { return ST->hasVInstructions(); }
82   bool enableOrderedReductions() const { return true; }
83   bool enableScalableVectorization() const { return ST->hasVInstructions(); }
84   TailFoldingStyle
85   getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const {
86     return ST->hasVInstructions() ? TailFoldingStyle::Data
87                                   : TailFoldingStyle::DataWithoutLaneMask;
88   }
89   std::optional<unsigned> getMaxVScale() const;
90   std::optional<unsigned> getVScaleForTuning() const;
91 
92   TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const;
93 
94   unsigned getRegUsageForType(Type *Ty);
95 
96   unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
97 
98   bool preferEpilogueVectorization() const {
99     // Epilogue vectorization is usually unprofitable - tail folding or
100     // a smaller VF would have been better.  This a blunt hammer - we
101     // should re-examine this once vectorization is better tuned.
102     return false;
103   }
104 
105   InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
106                                         Align Alignment, unsigned AddressSpace,
107                                         TTI::TargetCostKind CostKind);
108 
109   InstructionCost getPointersChainCost(ArrayRef<const Value *> Ptrs,
110                                        const Value *Base,
111                                        const TTI::PointersChainInfo &Info,
112                                        Type *AccessTy,
113                                        TTI::TargetCostKind CostKind);
114 
115   void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
116                                TTI::UnrollingPreferences &UP,
117                                OptimizationRemarkEmitter *ORE);
118 
119   void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
120                              TTI::PeelingPreferences &PP);
121 
122   unsigned getMinVectorRegisterBitWidth() const {
123     return ST->useRVVForFixedLengthVectors() ? 16 : 0;
124   }
125 
126   InstructionCost getVRGatherVVCost(MVT VT);
127 
128   InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
129                                  ArrayRef<int> Mask,
130                                  TTI::TargetCostKind CostKind, int Index,
131                                  VectorType *SubTp,
132                                  ArrayRef<const Value *> Args = std::nullopt);
133 
134   InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
135                                         TTI::TargetCostKind CostKind);
136 
137   InstructionCost getInterleavedMemoryOpCost(
138       unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
139       Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
140       bool UseMaskForCond = false, bool UseMaskForGaps = false);
141 
142   InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
143                                          const Value *Ptr, bool VariableMask,
144                                          Align Alignment,
145                                          TTI::TargetCostKind CostKind,
146                                          const Instruction *I);
147 
148   InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
149                                    TTI::CastContextHint CCH,
150                                    TTI::TargetCostKind CostKind,
151                                    const Instruction *I = nullptr);
152 
153   InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
154                                          FastMathFlags FMF,
155                                          TTI::TargetCostKind CostKind);
156 
157   InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
158                                              std::optional<FastMathFlags> FMF,
159                                              TTI::TargetCostKind CostKind);
160 
161   InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
162                                            Type *ResTy, VectorType *ValTy,
163                                            FastMathFlags FMF,
164                                            TTI::TargetCostKind CostKind);
165 
166   InstructionCost
167   getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
168                   unsigned AddressSpace, TTI::TargetCostKind CostKind,
169                   TTI::OperandValueInfo OpdInfo = {TTI::OK_AnyValue, TTI::OP_None},
170                   const Instruction *I = nullptr);
171 
172   InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
173                                      CmpInst::Predicate VecPred,
174                                      TTI::TargetCostKind CostKind,
175                                      const Instruction *I = nullptr);
176 
177   using BaseT::getVectorInstrCost;
178   InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
179                                      TTI::TargetCostKind CostKind,
180                                      unsigned Index, Value *Op0, Value *Op1);
181 
182   InstructionCost getArithmeticInstrCost(
183       unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
184       TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
185       TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
186       ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
187       const Instruction *CxtI = nullptr);
188 
189   bool isElementTypeLegalForScalableVector(Type *Ty) const {
190     return TLI->isLegalElementTypeForRVV(TLI->getValueType(DL, Ty));
191   }
192 
193   bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) {
194     if (!ST->hasVInstructions())
195       return false;
196 
197     EVT DataTypeVT = TLI->getValueType(DL, DataType);
198 
199     // Only support fixed vectors if we know the minimum vector size.
200     if (DataTypeVT.isFixedLengthVector() && !ST->useRVVForFixedLengthVectors())
201       return false;
202 
203     EVT ElemType = DataTypeVT.getScalarType();
204     if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
205       return false;
206 
207     return TLI->isLegalElementTypeForRVV(ElemType);
208 
209   }
210 
211   bool isLegalMaskedLoad(Type *DataType, Align Alignment) {
212     return isLegalMaskedLoadStore(DataType, Alignment);
213   }
214   bool isLegalMaskedStore(Type *DataType, Align Alignment) {
215     return isLegalMaskedLoadStore(DataType, Alignment);
216   }
217 
218   bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) {
219     if (!ST->hasVInstructions())
220       return false;
221 
222     EVT DataTypeVT = TLI->getValueType(DL, DataType);
223 
224     // Only support fixed vectors if we know the minimum vector size.
225     if (DataTypeVT.isFixedLengthVector() && !ST->useRVVForFixedLengthVectors())
226       return false;
227 
228     EVT ElemType = DataTypeVT.getScalarType();
229     if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
230       return false;
231 
232     return TLI->isLegalElementTypeForRVV(ElemType);
233   }
234 
235   bool isLegalMaskedGather(Type *DataType, Align Alignment) {
236     return isLegalMaskedGatherScatter(DataType, Alignment);
237   }
238   bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
239     return isLegalMaskedGatherScatter(DataType, Alignment);
240   }
241 
242   bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) {
243     // Scalarize masked gather for RV64 if EEW=64 indices aren't supported.
244     return ST->is64Bit() && !ST->hasVInstructionsI64();
245   }
246 
247   bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) {
248     // Scalarize masked scatter for RV64 if EEW=64 indices aren't supported.
249     return ST->is64Bit() && !ST->hasVInstructionsI64();
250   }
251 
252   bool isVScaleKnownToBeAPowerOfTwo() const {
253     return TLI->isVScaleKnownToBeAPowerOfTwo();
254   }
255 
256   /// \returns How the target needs this vector-predicated operation to be
257   /// transformed.
258   TargetTransformInfo::VPLegalization
259   getVPLegalizationStrategy(const VPIntrinsic &PI) const {
260     using VPLegalization = TargetTransformInfo::VPLegalization;
261     if (!ST->hasVInstructions() ||
262         (PI.getIntrinsicID() == Intrinsic::vp_reduce_mul &&
263          cast<VectorType>(PI.getArgOperand(1)->getType())
264                  ->getElementType()
265                  ->getIntegerBitWidth() != 1))
266       return VPLegalization(VPLegalization::Discard, VPLegalization::Convert);
267     return VPLegalization(VPLegalization::Legal, VPLegalization::Legal);
268   }
269 
270   bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
271                                    ElementCount VF) const {
272     if (!VF.isScalable())
273       return true;
274 
275     Type *Ty = RdxDesc.getRecurrenceType();
276     if (!TLI->isLegalElementTypeForRVV(TLI->getValueType(DL, Ty)))
277       return false;
278 
279     switch (RdxDesc.getRecurrenceKind()) {
280     case RecurKind::Add:
281     case RecurKind::FAdd:
282     case RecurKind::And:
283     case RecurKind::Or:
284     case RecurKind::Xor:
285     case RecurKind::SMin:
286     case RecurKind::SMax:
287     case RecurKind::UMin:
288     case RecurKind::UMax:
289     case RecurKind::FMin:
290     case RecurKind::FMax:
291     case RecurKind::SelectICmp:
292     case RecurKind::SelectFCmp:
293     case RecurKind::FMulAdd:
294       return true;
295     default:
296       return false;
297     }
298   }
299 
300   unsigned getMaxInterleaveFactor(ElementCount VF) {
301     // Don't interleave if the loop has been vectorized with scalable vectors.
302     if (VF.isScalable())
303       return 1;
304     // If the loop will not be vectorized, don't interleave the loop.
305     // Let regular unroll to unroll the loop.
306     return VF.isScalar() ? 1 : ST->getMaxInterleaveFactor();
307   }
308 
309   bool enableInterleavedAccessVectorization() { return true; }
310 
311   enum RISCVRegisterClass { GPRRC, FPRRC, VRRC };
312   unsigned getNumberOfRegisters(unsigned ClassID) const {
313     switch (ClassID) {
314     case RISCVRegisterClass::GPRRC:
315       // 31 = 32 GPR - x0 (zero register)
316       // FIXME: Should we exclude fixed registers like SP, TP or GP?
317       return 31;
318     case RISCVRegisterClass::FPRRC:
319       if (ST->hasStdExtF())
320         return 32;
321       return 0;
322     case RISCVRegisterClass::VRRC:
323       // Although there are 32 vector registers, v0 is special in that it is the
324       // only register that can be used to hold a mask.
325       // FIXME: Should we conservatively return 31 as the number of usable
326       // vector registers?
327       return ST->hasVInstructions() ? 32 : 0;
328     }
329     llvm_unreachable("unknown register class");
330   }
331 
332   unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const {
333     if (Vector)
334       return RISCVRegisterClass::VRRC;
335     if (!Ty)
336       return RISCVRegisterClass::GPRRC;
337 
338     Type *ScalarTy = Ty->getScalarType();
339     if ((ScalarTy->isHalfTy() && ST->hasStdExtZfhOrZfhmin()) ||
340         (ScalarTy->isFloatTy() && ST->hasStdExtF()) ||
341         (ScalarTy->isDoubleTy() && ST->hasStdExtD())) {
342       return RISCVRegisterClass::FPRRC;
343     }
344 
345     return RISCVRegisterClass::GPRRC;
346   }
347 
348   const char *getRegisterClassName(unsigned ClassID) const {
349     switch (ClassID) {
350     case RISCVRegisterClass::GPRRC:
351       return "RISCV::GPRRC";
352     case RISCVRegisterClass::FPRRC:
353       return "RISCV::FPRRC";
354     case RISCVRegisterClass::VRRC:
355       return "RISCV::VRRC";
356     }
357     llvm_unreachable("unknown register class");
358   }
359 
360   bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
361                      const TargetTransformInfo::LSRCost &C2);
362 };
363 
364 } // end namespace llvm
365 
366 #endif // LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
367