xref: /freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp (revision 77013d11e6483b970af25e13c9b892075742f7e5)
1 //===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 /// This file implements a TargetTransformInfo analysis pass specific to the
9 /// Hexagon target machine. It uses the target's detailed information to provide
10 /// more precise answers to certain TTI queries, while letting the target
11 /// independent and default TTI implementations handle the rest.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "HexagonTargetTransformInfo.h"
16 #include "HexagonSubtarget.h"
17 #include "llvm/Analysis/TargetTransformInfo.h"
18 #include "llvm/CodeGen/ValueTypes.h"
19 #include "llvm/IR/InstrTypes.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/User.h"
22 #include "llvm/Support/Casting.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Transforms/Utils/LoopPeel.h"
25 #include "llvm/Transforms/Utils/UnrollLoop.h"
26 
27 using namespace llvm;
28 
29 #define DEBUG_TYPE "hexagontti"
30 
31 static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
32   cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
33 
34 static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
35   cl::init(true), cl::Hidden,
36   cl::desc("Control lookup table emission on Hexagon target"));
37 
38 static cl::opt<bool> HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true),
39   cl::Hidden, cl::desc("Enable masked loads/stores for HVX"));
40 
41 // Constant "cost factor" to make floating point operations more expensive
42 // in terms of vectorization cost. This isn't the best way, but it should
43 // do. Ultimately, the cost should use cycles.
44 static const unsigned FloatFactor = 4;
45 
46 bool HexagonTTIImpl::useHVX() const {
47   return ST.useHVXOps() && HexagonAutoHVX;
48 }
49 
50 unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
51   if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
52     return VTy->getNumElements();
53   assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
54          "Expecting scalar type");
55   return 1;
56 }
57 
58 TargetTransformInfo::PopcntSupportKind
59 HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
60   // Return fast hardware support as every input < 64 bits will be promoted
61   // to 64 bits.
62   return TargetTransformInfo::PSK_FastHardware;
63 }
64 
65 // The Hexagon target can unroll loops with run-time trip counts.
66 void HexagonTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
67                                              TTI::UnrollingPreferences &UP) {
68   UP.Runtime = UP.Partial = true;
69 }
70 
71 void HexagonTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
72                                            TTI::PeelingPreferences &PP) {
73   BaseT::getPeelingPreferences(L, SE, PP);
74   // Only try to peel innermost loops with small runtime trip counts.
75   if (L && L->isInnermost() && canPeel(L) &&
76       SE.getSmallConstantTripCount(L) == 0 &&
77       SE.getSmallConstantMaxTripCount(L) > 0 &&
78       SE.getSmallConstantMaxTripCount(L) <= 5) {
79     PP.PeelCount = 2;
80   }
81 }
82 
83 bool HexagonTTIImpl::shouldFavorPostInc() const {
84   return true;
85 }
86 
87 /// --- Vector TTI begin ---
88 
89 unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
90   if (Vector)
91     return useHVX() ? 32 : 0;
92   return 32;
93 }
94 
95 unsigned HexagonTTIImpl::getMaxInterleaveFactor(unsigned VF) {
96   return useHVX() ? 2 : 1;
97 }
98 
99 unsigned HexagonTTIImpl::getRegisterBitWidth(bool Vector) const {
100   return Vector ? getMinVectorRegisterBitWidth() : 32;
101 }
102 
103 unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
104   return useHVX() ? ST.getVectorLength()*8 : 32;
105 }
106 
107 unsigned HexagonTTIImpl::getMinimumVF(unsigned ElemWidth) const {
108   return (8 * ST.getVectorLength()) / ElemWidth;
109 }
110 
111 unsigned HexagonTTIImpl::getScalarizationOverhead(VectorType *Ty,
112                                                   const APInt &DemandedElts,
113                                                   bool Insert, bool Extract) {
114   return BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
115 }
116 
117 unsigned HexagonTTIImpl::getOperandsScalarizationOverhead(
118       ArrayRef<const Value*> Args, unsigned VF) {
119   return BaseT::getOperandsScalarizationOverhead(Args, VF);
120 }
121 
122 unsigned HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy,
123       ArrayRef<Type*> Tys, TTI::TargetCostKind CostKind) {
124   return BaseT::getCallInstrCost(F, RetTy, Tys, CostKind);
125 }
126 
127 unsigned
128 HexagonTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
129                                       TTI::TargetCostKind CostKind) {
130   if (ICA.getID() == Intrinsic::bswap) {
131     std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, ICA.getReturnType());
132     return LT.first + 2;
133   }
134   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
135 }
136 
137 unsigned HexagonTTIImpl::getAddressComputationCost(Type *Tp,
138       ScalarEvolution *SE, const SCEV *S) {
139   return 0;
140 }
141 
142 unsigned HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
143                                          MaybeAlign Alignment,
144                                          unsigned AddressSpace,
145                                          TTI::TargetCostKind CostKind,
146                                          const Instruction *I) {
147   assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
148   // TODO: Handle other cost kinds.
149   if (CostKind != TTI::TCK_RecipThroughput)
150     return 1;
151 
152   if (Opcode == Instruction::Store)
153     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
154                                   CostKind, I);
155 
156   if (Src->isVectorTy()) {
157     VectorType *VecTy = cast<VectorType>(Src);
158     unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedSize();
159     if (useHVX() && ST.isTypeForHVX(VecTy)) {
160       unsigned RegWidth = getRegisterBitWidth(true);
161       assert(RegWidth && "Non-zero vector register width expected");
162       // Cost of HVX loads.
163       if (VecWidth % RegWidth == 0)
164         return VecWidth / RegWidth;
165       // Cost of constructing HVX vector from scalar loads
166       const Align RegAlign(RegWidth / 8);
167       if (!Alignment || *Alignment > RegAlign)
168         Alignment = RegAlign;
169       assert(Alignment);
170       unsigned AlignWidth = 8 * Alignment->value();
171       unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
172       return 3 * NumLoads;
173     }
174 
175     // Non-HVX vectors.
176     // Add extra cost for floating point types.
177     unsigned Cost =
178         VecTy->getElementType()->isFloatingPointTy() ? FloatFactor : 1;
179 
180     // At this point unspecified alignment is considered as Align(1).
181     const Align BoundAlignment = std::min(Alignment.valueOrOne(), Align(8));
182     unsigned AlignWidth = 8 * BoundAlignment.value();
183     unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
184     if (Alignment == Align(4) || Alignment == Align(8))
185       return Cost * NumLoads;
186     // Loads of less than 32 bits will need extra inserts to compose a vector.
187     assert(BoundAlignment <= Align(8));
188     unsigned LogA = Log2(BoundAlignment);
189     return (3 - LogA) * Cost * NumLoads;
190   }
191 
192   return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
193                                 CostKind, I);
194 }
195 
196 unsigned HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
197                                                Align Alignment,
198                                                unsigned AddressSpace,
199                                                TTI::TargetCostKind CostKind) {
200   return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
201                                       CostKind);
202 }
203 
204 unsigned HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
205       int Index, Type *SubTp) {
206   return 1;
207 }
208 
209 unsigned HexagonTTIImpl::getGatherScatterOpCost(
210     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
211     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
212   return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
213                                        Alignment, CostKind, I);
214 }
215 
216 unsigned HexagonTTIImpl::getInterleavedMemoryOpCost(
217     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
218     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
219     bool UseMaskForCond, bool UseMaskForGaps) {
220   if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
221     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
222                                              Alignment, AddressSpace,
223                                              CostKind,
224                                              UseMaskForCond, UseMaskForGaps);
225   return getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
226                          CostKind);
227 }
228 
229 unsigned HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
230                                             Type *CondTy,
231                                             CmpInst::Predicate VecPred,
232                                             TTI::TargetCostKind CostKind,
233                                             const Instruction *I) {
234   if (ValTy->isVectorTy() && CostKind == TTI::TCK_RecipThroughput) {
235     std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, ValTy);
236     if (Opcode == Instruction::FCmp)
237       return LT.first + FloatFactor * getTypeNumElements(ValTy);
238   }
239   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
240 }
241 
242 unsigned HexagonTTIImpl::getArithmeticInstrCost(
243     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
244     TTI::OperandValueKind Opd1Info,
245     TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
246     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
247     const Instruction *CxtI) {
248   // TODO: Handle more cost kinds.
249   if (CostKind != TTI::TCK_RecipThroughput)
250     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
251                                          Opd2Info, Opd1PropInfo,
252                                          Opd2PropInfo, Args, CxtI);
253 
254   if (Ty->isVectorTy()) {
255     std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, Ty);
256     if (LT.second.isFloatingPoint())
257       return LT.first + FloatFactor * getTypeNumElements(Ty);
258   }
259   return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
260                                        Opd1PropInfo, Opd2PropInfo, Args, CxtI);
261 }
262 
263 unsigned HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy,
264                                           Type *SrcTy, TTI::CastContextHint CCH,
265                                           TTI::TargetCostKind CostKind,
266                                           const Instruction *I) {
267   if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
268     unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
269     unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
270 
271     std::pair<int, MVT> SrcLT = TLI.getTypeLegalizationCost(DL, SrcTy);
272     std::pair<int, MVT> DstLT = TLI.getTypeLegalizationCost(DL, DstTy);
273     unsigned Cost = std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
274     // TODO: Allow non-throughput costs that aren't binary.
275     if (CostKind != TTI::TCK_RecipThroughput)
276       return Cost == 0 ? 0 : 1;
277     return Cost;
278   }
279   return 1;
280 }
281 
282 unsigned HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
283       unsigned Index) {
284   Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
285                                    : Val;
286   if (Opcode == Instruction::InsertElement) {
287     // Need two rotations for non-zero index.
288     unsigned Cost = (Index != 0) ? 2 : 0;
289     if (ElemTy->isIntegerTy(32))
290       return Cost;
291     // If it's not a 32-bit value, there will need to be an extract.
292     return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, Index);
293   }
294 
295   if (Opcode == Instruction::ExtractElement)
296     return 2;
297 
298   return 1;
299 }
300 
301 bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/) {
302   return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
303 }
304 
305 bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/) {
306   return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
307 }
308 
309 /// --- Vector TTI end ---
310 
311 unsigned HexagonTTIImpl::getPrefetchDistance() const {
312   return ST.getL1PrefetchDistance();
313 }
314 
315 unsigned HexagonTTIImpl::getCacheLineSize() const {
316   return ST.getL1CacheLineSize();
317 }
318 
319 int
320 HexagonTTIImpl::getUserCost(const User *U,
321                             ArrayRef<const Value *> Operands,
322                             TTI::TargetCostKind CostKind) {
323   auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
324     if (!CI->isIntegerCast())
325       return false;
326     // Only extensions from an integer type shorter than 32-bit to i32
327     // can be folded into the load.
328     const DataLayout &DL = getDataLayout();
329     unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
330     unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
331     if (DBW != 32 || SBW >= DBW)
332       return false;
333 
334     const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
335     // Technically, this code could allow multiple uses of the load, and
336     // check if all the uses are the same extension operation, but this
337     // should be sufficient for most cases.
338     return LI && LI->hasOneUse();
339   };
340 
341   if (const CastInst *CI = dyn_cast<const CastInst>(U))
342     if (isCastFoldedIntoLoad(CI))
343       return TargetTransformInfo::TCC_Free;
344   return BaseT::getUserCost(U, Operands, CostKind);
345 }
346 
347 bool HexagonTTIImpl::shouldBuildLookupTables() const {
348   return EmitLookupTables;
349 }
350